LLVM 22.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <variant>
67#include <vector>
68
69using namespace llvm;
70using namespace llvm::SCEVPatternMatch;
71
72#define DEBUG_TYPE "loop-accesses"
73
75VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
79
81VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
87
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
94
95/// The maximum iterations used to merge memory checks
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
100 cl::init(100));
101
102/// Maximum SIMD width.
103const unsigned VectorizerParams::MaxVectorWidth = 64;
104
105/// We collect dependences up to this threshold.
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
110 cl::init(100));
111
112/// This enables versioning on the strides of symbolically striding memory
113/// accesses in code like the following.
114/// for (i = 0; i < N; ++i)
115/// A[i * Stride1] += B[i * Stride2] ...
116///
117/// Will be roughly translated to
118/// if (Stride1 == 1 && Stride2 == 1) {
119/// for (i = 0; i < N; i+=4)
120/// A[i:i+3] += ...
121/// } else
122/// ...
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
126
127/// Enable store-to-load forwarding conflict detection. This option can
128/// be disabled for correctness testing.
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
132 cl::init(true));
133
135 "max-forked-scev-depth", cl::Hidden,
136 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
137 cl::init(5));
138
140 "laa-speculate-unit-stride", cl::Hidden,
141 cl::desc("Speculate that non-constant strides are unit in LAA"),
142 cl::init(true));
143
145 "hoist-runtime-checks", cl::Hidden,
146 cl::desc(
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
150
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
153}
154
156 const DenseMap<Value *, const SCEV *> &PtrToStride,
157 Value *Ptr) {
158 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
159
160 // If there is an entry in the map return the SCEV of the pointer with the
161 // symbolic stride replaced by one.
162 const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
163 if (!StrideSCEV)
164 // For a non-symbolic stride, just return the original expression.
165 return OrigSCEV;
166
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
194/// return nullptr. \p A and \p B must have the same type.
195static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
196 ScalarEvolution &SE) {
197 if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
198 return nullptr;
199 return SE.getAddExpr(A, B);
200}
201
202/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
203/// return nullptr. \p A and \p B must have the same type.
204static const SCEV *mulSCEVOverflow(const SCEV *A, const SCEV *B,
205 ScalarEvolution &SE) {
206 if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
207 return nullptr;
208 return SE.getMulExpr(A, B);
209}
210
211/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
212/// \p MaxBTC is guaranteed inbounds of the accessed object.
214 const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
216 AssumptionCache *AC,
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
218 auto *PointerBase = SE.getPointerBase(AR->getStart());
219 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
220 if (!StartPtr)
221 return false;
222 const Loop *L = AR->getLoop();
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
225 uint64_t DerefBytes = StartPtrV->getPointerDereferenceableBytes(
226 DL, CheckForNonNull, CheckForFreed);
227
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
229 return false;
230
231 const SCEV *Step = AR->getStepRecurrence(SE);
232 Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
233 const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);
234
235 // Check if we have a suitable dereferencable assumption we can use.
236 if (!StartPtrV->canBeFreed()) {
238 StartPtrV, {Attribute::Dereferenceable}, *AC,
239 L->getLoopPredecessor()->getTerminator(), DT);
240 if (DerefRK) {
241 DerefBytesSCEV =
242 SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue));
243 }
244 }
245
246 if (DerefBytesSCEV->isZero())
247 return false;
248
249 bool IsKnownNonNegative = SE.isKnownNonNegative(Step);
250 if (!IsKnownNonNegative && !SE.isKnownNegative(Step))
251 return false;
252
253 Step = SE.getNoopOrSignExtend(Step, WiderTy);
254 MaxBTC = SE.getNoopOrZeroExtend(MaxBTC, WiderTy);
255
256 // For the computations below, make sure they don't unsigned wrap.
257 if (!SE.isKnownPredicate(CmpInst::ICMP_UGE, AR->getStart(), StartPtr))
258 return false;
259 const SCEV *StartOffset = SE.getNoopOrZeroExtend(
260 SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);
261
262 if (!LoopGuards)
263 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));
264 MaxBTC = SE.applyLoopGuards(MaxBTC, *LoopGuards);
265
266 const SCEV *OffsetAtLastIter =
267 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
268 if (!OffsetAtLastIter) {
269 // Re-try with constant max backedge-taken count if using the symbolic one
270 // failed.
271 MaxBTC = SE.getConstantMaxBackedgeTakenCount(AR->getLoop());
272 if (isa<SCEVCouldNotCompute>(MaxBTC))
273 return false;
274 MaxBTC = SE.getNoopOrZeroExtend(
275 MaxBTC, WiderTy);
276 OffsetAtLastIter =
277 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
278 if (!OffsetAtLastIter)
279 return false;
280 }
281
282 const SCEV *OffsetEndBytes = addSCEVNoOverflow(
283 OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
284 if (!OffsetEndBytes)
285 return false;
286
287 if (IsKnownNonNegative) {
288 // For positive steps, check if
289 // (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
290 // while making sure none of the computations unsigned wrap themselves.
291 const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
292 if (!EndBytes)
293 return false;
294
295 DerefBytesSCEV = SE.applyLoopGuards(DerefBytesSCEV, *LoopGuards);
296 return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
297 }
298
299 // For negative steps check if
300 // * StartOffset >= (MaxBTC * Step + EltSize)
301 // * StartOffset <= DerefBytes.
302 assert(SE.isKnownNegative(Step) && "must be known negative");
303 return SE.isKnownPredicate(CmpInst::ICMP_SGE, StartOffset, OffsetEndBytes) &&
304 SE.isKnownPredicate(CmpInst::ICMP_ULE, StartOffset, DerefBytesSCEV);
305}
306
307std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
308 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
309 const SCEV *MaxBTC, ScalarEvolution *SE,
310 DenseMap<std::pair<const SCEV *, Type *>,
311 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
313 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
314 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
315 if (PointerBounds) {
316 auto [Iter, Ins] = PointerBounds->insert(
317 {{PtrExpr, AccessTy},
318 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
319 if (!Ins)
320 return Iter->second;
321 PtrBoundsPair = &Iter->second;
322 }
323
324 const SCEV *ScStart;
325 const SCEV *ScEnd;
326
327 auto &DL = Lp->getHeader()->getDataLayout();
328 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
329 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
330 if (SE->isLoopInvariant(PtrExpr, Lp)) {
331 ScStart = ScEnd = PtrExpr;
332 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
333 ScStart = AR->getStart();
334 if (!isa<SCEVCouldNotCompute>(BTC))
335 // Evaluating AR at an exact BTC is safe: LAA separately checks that
336 // accesses cannot wrap in the loop. If evaluating AR at BTC wraps, then
337 // the loop either triggers UB when executing a memory access with a
338 // poison pointer or the wrapping/poisoned pointer is not used.
339 ScEnd = AR->evaluateAtIteration(BTC, *SE);
340 else {
341 // Evaluating AR at MaxBTC may wrap and create an expression that is less
342 // than the start of the AddRec due to wrapping (for example consider
343 // MaxBTC = -2). If that's the case, set ScEnd to -(EltSize + 1). ScEnd
344 // will get incremented by EltSize before returning, so this effectively
345 // sets ScEnd to the maximum unsigned value for the type. Note that LAA
346 // separately checks that accesses cannot not wrap, so unsigned max
347 // represents an upper bound.
348 if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
349 DT, AC, LoopGuards)) {
350 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
351 } else {
352 ScEnd = SE->getAddExpr(
353 SE->getNegativeSCEV(EltSizeSCEV),
355 ConstantInt::get(EltSizeSCEV->getType(), -1), AR->getType())));
356 }
357 }
358 const SCEV *Step = AR->getStepRecurrence(*SE);
359
360 // For expressions with negative step, the upper bound is ScStart and the
361 // lower bound is ScEnd.
362 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
363 if (CStep->getValue()->isNegative())
364 std::swap(ScStart, ScEnd);
365 } else {
366 // Fallback case: the step is not constant, but we can still
367 // get the upper and lower bounds of the interval by using min/max
368 // expressions.
369 ScStart = SE->getUMinExpr(ScStart, ScEnd);
370 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
371 }
372 } else
373 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
374
375 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
376 assert(SE->isLoopInvariant(ScEnd, Lp) && "ScEnd needs to be invariant");
377
378 // Add the size of the pointed element to ScEnd.
379 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
380
381 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
382 if (PointerBounds)
383 *PtrBoundsPair = Res;
384 return Res;
385}
386
387/// Calculate Start and End points of memory access using
388/// getStartAndEndForAccess.
390 Type *AccessTy, bool WritePtr,
391 unsigned DepSetId, unsigned ASId,
393 bool NeedsFreeze) {
394 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
395 const SCEV *BTC = PSE.getBackedgeTakenCount();
396 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
397 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
398 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
400 !isa<SCEVCouldNotCompute>(ScEnd) &&
401 "must be able to compute both start and end expressions");
402 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
403 NeedsFreeze);
404}
405
406bool RuntimePointerChecking::tryToCreateDiffCheck(
407 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
408 // If either group contains multiple different pointers, bail out.
409 // TODO: Support multiple pointers by using the minimum or maximum pointer,
410 // depending on src & sink.
411 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
412 return false;
413
414 const PointerInfo *Src = &Pointers[CGI.Members[0]];
415 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
416
417 // If either pointer is read and written, multiple checks may be needed. Bail
418 // out.
419 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
420 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
421 return false;
422
423 ArrayRef<unsigned> AccSrc =
424 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
425 ArrayRef<unsigned> AccSink =
426 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
427 // If either pointer is accessed multiple times, there may not be a clear
428 // src/sink relation. Bail out for now.
429 if (AccSrc.size() != 1 || AccSink.size() != 1)
430 return false;
431
432 // If the sink is accessed before src, swap src/sink.
433 if (AccSink[0] < AccSrc[0])
434 std::swap(Src, Sink);
435
436 const SCEVConstant *Step;
437 const SCEV *SrcStart;
438 const SCEV *SinkStart;
439 const Loop *InnerLoop = DC.getInnermostLoop();
440 if (!match(Src->Expr,
442 m_SpecificLoop(InnerLoop))) ||
443 !match(Sink->Expr,
445 m_SpecificLoop(InnerLoop))))
446 return false;
447
449 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
451 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
452 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
453 Type *DstTy = getLoadStoreType(SinkInsts[0]);
455 return false;
456
457 const DataLayout &DL = InnerLoop->getHeader()->getDataLayout();
458 unsigned AllocSize =
459 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
460
461 // Only matching constant steps matching the AllocSize are supported at the
462 // moment. This simplifies the difference computation. Can be extended in the
463 // future.
464 if (Step->getAPInt().abs() != AllocSize)
465 return false;
466
467 IntegerType *IntTy =
468 IntegerType::get(Src->PointerValue->getContext(),
469 DL.getPointerSizeInBits(CGI.AddressSpace));
470
471 // When counting down, the dependence distance needs to be swapped.
472 if (Step->getValue()->isNegative())
473 std::swap(SinkStart, SrcStart);
474
475 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkStart, IntTy);
476 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcStart, IntTy);
477 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
478 isa<SCEVCouldNotCompute>(SrcStartInt))
479 return false;
480
481 // If the start values for both Src and Sink also vary according to an outer
482 // loop, then it's probably better to avoid creating diff checks because
483 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
484 // do the expanded full range overlap checks, which can be hoisted.
485 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
486 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
487 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
488 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
489 const Loop *StartARLoop = SrcStartAR->getLoop();
490 if (StartARLoop == SinkStartAR->getLoop() &&
491 StartARLoop == InnerLoop->getParentLoop() &&
492 // If the diff check would already be loop invariant (due to the
493 // recurrences being the same), then we prefer to keep the diff checks
494 // because they are cheaper.
495 SrcStartAR->getStepRecurrence(*SE) !=
496 SinkStartAR->getStepRecurrence(*SE)) {
497 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
498 "cannot be hoisted out of the outer loop\n");
499 return false;
500 }
501 }
502
503 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
504 << "SrcStart: " << *SrcStartInt << '\n'
505 << "SinkStartInt: " << *SinkStartInt << '\n');
506 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
507 Src->NeedsFreeze || Sink->NeedsFreeze);
508 return true;
509}
510
512 SmallVector<RuntimePointerCheck, 4> Checks;
513
514 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
515 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
518
519 if (needsChecking(CGI, CGJ)) {
520 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
521 Checks.emplace_back(&CGI, &CGJ);
522 }
523 }
524 }
525 return Checks;
526}
527
529 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
530 assert(Checks.empty() && "Checks is not empty");
531 groupChecks(DepCands, UseDependencies);
532 Checks = generateChecks();
533}
534
536 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
537 for (const auto &I : M.Members)
538 for (const auto &J : N.Members)
539 if (needsChecking(I, J))
540 return true;
541 return false;
542}
543
544/// Compare \p I and \p J and return the minimum.
545/// Return nullptr in case we couldn't find an answer.
546static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
547 ScalarEvolution *SE) {
548 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
549 if (!Diff)
550 return nullptr;
551 return Diff->isNegative() ? J : I;
552}
553
555 unsigned Index, const RuntimePointerChecking &RtCheck) {
556 return addPointer(
557 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
558 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
559 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
560}
561
562bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
563 const SCEV *End, unsigned AS,
564 bool NeedsFreeze,
565 ScalarEvolution &SE) {
566 assert(AddressSpace == AS &&
567 "all pointers in a checking group must be in the same address space");
568
569 // Compare the starts and ends with the known minimum and maximum
570 // of this set. We need to know how we compare against the min/max
571 // of the set in order to be able to emit memchecks.
572 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
573 if (!Min0)
574 return false;
575
576 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
577 if (!Min1)
578 return false;
579
580 // Update the low bound expression if we've found a new min value.
581 if (Min0 == Start)
582 Low = Start;
583
584 // Update the high bound expression if we've found a new max value.
585 if (Min1 != End)
586 High = End;
587
588 Members.push_back(Index);
589 this->NeedsFreeze |= NeedsFreeze;
590 return true;
591}
592
593void RuntimePointerChecking::groupChecks(
594 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
595 // We build the groups from dependency candidates equivalence classes
596 // because:
597 // - We know that pointers in the same equivalence class share
598 // the same underlying object and therefore there is a chance
599 // that we can compare pointers
600 // - We wouldn't be able to merge two pointers for which we need
601 // to emit a memcheck. The classes in DepCands are already
602 // conveniently built such that no two pointers in the same
603 // class need checking against each other.
604
605 // We use the following (greedy) algorithm to construct the groups
606 // For every pointer in the equivalence class:
607 // For each existing group:
608 // - if the difference between this pointer and the min/max bounds
609 // of the group is a constant, then make the pointer part of the
610 // group and update the min/max bounds of that group as required.
611
612 CheckingGroups.clear();
613
614 // If we need to check two pointers to the same underlying object
615 // with a non-constant difference, we shouldn't perform any pointer
616 // grouping with those pointers. This is because we can easily get
617 // into cases where the resulting check would return false, even when
618 // the accesses are safe.
619 //
620 // The following example shows this:
621 // for (i = 0; i < 1000; ++i)
622 // a[5000 + i * m] = a[i] + a[i + 9000]
623 //
624 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
625 // (0, 10000) which is always false. However, if m is 1, there is no
626 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
627 // us to perform an accurate check in this case.
628 //
629 // In the above case, we have a non-constant distance and an Unknown
630 // dependence between accesses to the same underlying object, and could retry
631 // with runtime checks. Therefore UseDependencies is false. In this case we
632 // will use the fallback path and create separate checking groups for all
633 // pointers.
634
635 // If we don't have the dependency partitions, construct a new
636 // checking pointer group for each pointer. This is also required
637 // for correctness, because in this case we can have checking between
638 // pointers to the same underlying object.
639 if (!UseDependencies) {
640 for (unsigned I = 0; I < Pointers.size(); ++I)
641 CheckingGroups.emplace_back(I, *this);
642 return;
643 }
644
645 unsigned TotalComparisons = 0;
646
648 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
649 PositionMap[Pointers[Index].PointerValue].push_back(Index);
650
651 // We need to keep track of what pointers we've already seen so we
652 // don't process them twice.
654
655 // Go through all equivalence classes, get the "pointer check groups"
656 // and add them to the overall solution. We use the order in which accesses
657 // appear in 'Pointers' to enforce determinism.
658 for (unsigned I = 0; I < Pointers.size(); ++I) {
659 // We've seen this pointer before, and therefore already processed
660 // its equivalence class.
661 if (Seen.contains(I))
662 continue;
663
665 Pointers[I].IsWritePtr);
666
668
669 // Because DepCands is constructed by visiting accesses in the order in
670 // which they appear in alias sets (which is deterministic) and the
671 // iteration order within an equivalence class member is only dependent on
672 // the order in which unions and insertions are performed on the
673 // equivalence class, the iteration order is deterministic.
674 for (auto M : DepCands.members(Access)) {
675 auto PointerI = PositionMap.find(M.getPointer());
676 // If we can't find the pointer in PositionMap that means we can't
677 // generate a memcheck for it.
678 if (PointerI == PositionMap.end())
679 continue;
680 for (unsigned Pointer : PointerI->second) {
681 bool Merged = false;
682 // Mark this pointer as seen.
683 Seen.insert(Pointer);
684
685 // Go through all the existing sets and see if we can find one
686 // which can include this pointer.
687 for (RuntimeCheckingPtrGroup &Group : Groups) {
688 // Don't perform more than a certain amount of comparisons.
689 // This should limit the cost of grouping the pointers to something
690 // reasonable. If we do end up hitting this threshold, the algorithm
691 // will create separate groups for all remaining pointers.
692 if (TotalComparisons > MemoryCheckMergeThreshold)
693 break;
694
695 TotalComparisons++;
696
697 if (Group.addPointer(Pointer, *this)) {
698 Merged = true;
699 break;
700 }
701 }
702
703 if (!Merged)
704 // We couldn't add this pointer to any existing set or the threshold
705 // for the number of comparisons has been reached. Create a new group
706 // to hold the current pointer.
707 Groups.emplace_back(Pointer, *this);
708 }
709 }
710
711 // We've computed the grouped checks for this partition.
712 // Save the results and continue with the next one.
714 }
715}
716
718 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
719 unsigned PtrIdx2) {
720 return (PtrToPartition[PtrIdx1] != -1 &&
721 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
722}
723
724bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
725 const PointerInfo &PointerI = Pointers[I];
726 const PointerInfo &PointerJ = Pointers[J];
727
728 // No need to check if two readonly pointers intersect.
729 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
730 return false;
731
732 // Only need to check pointers between two different dependency sets.
733 if (PointerI.DependencySetId == PointerJ.DependencySetId)
734 return false;
735
736 // Only need to check pointers in the same alias set.
737 return PointerI.AliasSetId == PointerJ.AliasSetId;
738}
739
740/// Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
744 for (const auto &[Idx, CG] : enumerate(CheckingGroups))
745 PtrIndices[&CG] = Idx;
746 return PtrIndices;
747}
748
751 unsigned Depth) const {
752 unsigned N = 0;
753 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
754 for (const auto &[Check1, Check2] : Checks) {
755 const auto &First = Check1->Members, &Second = Check2->Members;
756 OS.indent(Depth) << "Check " << N++ << ":\n";
757 OS.indent(Depth + 2) << "Comparing group GRP" << PtrIndices.at(Check1)
758 << ":\n";
759 for (unsigned K : First)
760 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
761 OS.indent(Depth + 2) << "Against group GRP" << PtrIndices.at(Check2)
762 << ":\n";
763 for (unsigned K : Second)
764 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
765 }
766}
767
769
770 OS.indent(Depth) << "Run-time memory checks:\n";
771 printChecks(OS, Checks, Depth);
772
773 OS.indent(Depth) << "Grouped accesses:\n";
774 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
775 for (const auto &CG : CheckingGroups) {
776 OS.indent(Depth + 2) << "Group GRP" << PtrIndices.at(&CG) << ":\n";
777 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
778 << ")\n";
779 for (unsigned Member : CG.Members) {
780 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
781 }
782 }
783}
784
785namespace {
786
787/// Analyses memory accesses in a loop.
788///
789/// Checks whether run time pointer checks are needed and builds sets for data
790/// dependence checking.
791class AccessAnalysis {
792public:
793 /// Read or write access location.
794 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
795 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
796
797 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
800 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
801 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
802 LoopAliasScopes(LoopAliasScopes) {
803 // We're analyzing dependences across loop iterations.
804 BAA.enableCrossIterationMode();
805 }
806
807 /// Register a load and whether it is only read from.
808 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
809 Value *Ptr = const_cast<Value *>(Loc.Ptr);
810 AST.add(adjustLoc(Loc));
811 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
812 if (IsReadOnly)
813 ReadOnlyPtr.insert(Ptr);
814 }
815
816 /// Register a store.
817 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
818 Value *Ptr = const_cast<Value *>(Loc.Ptr);
819 AST.add(adjustLoc(Loc));
820 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
821 }
822
823 /// Check if we can emit a run-time no-alias check for \p Access.
824 ///
825 /// Returns true if we can emit a run-time no alias check for \p Access.
826 /// If we can check this access, this also adds it to a dependence set and
827 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
828 /// we will attempt to use additional run-time checks in order to get
829 /// the bounds of the pointer.
830 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
831 MemAccessInfo Access, Type *AccessTy,
832 const DenseMap<Value *, const SCEV *> &Strides,
833 DenseMap<Value *, unsigned> &DepSetId,
834 Loop *TheLoop, unsigned &RunningDepId,
835 unsigned ASId, bool Assume);
836
837 /// Check whether we can check the pointers at runtime for
838 /// non-intersection.
839 ///
840 /// Returns true if we need no check or if we do and we can generate them
841 /// (i.e. the pointers have computable bounds). A return value of false means
842 /// we couldn't analyze and generate runtime checks for all pointers in the
843 /// loop, but if \p AllowPartial is set then we will have checks for those
844 /// pointers we could analyze.
845 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
846 const DenseMap<Value *, const SCEV *> &Strides,
847 Value *&UncomputablePtr, bool AllowPartial);
848
849 /// Goes over all memory accesses, checks whether a RT check is needed
850 /// and builds sets of dependent accesses.
851 void buildDependenceSets() {
852 processMemAccesses();
853 }
854
855 /// Initial processing of memory accesses determined that we need to
856 /// perform dependency checking.
857 ///
858 /// Note that this can later be cleared if we retry memcheck analysis without
859 /// dependency checking (i.e. ShouldRetryWithRuntimeChecks).
860 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
861
862 /// We decided that no dependence analysis would be used. Reset the state.
863 void resetDepChecks(MemoryDepChecker &DepChecker) {
864 CheckDeps.clear();
865 DepChecker.clearDependences();
866 }
867
868 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
869
870private:
871 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
872
873 /// Adjust the MemoryLocation so that it represents accesses to this
874 /// location across all iterations, rather than a single one.
875 MemoryLocation adjustLoc(MemoryLocation Loc) const {
876 // The accessed location varies within the loop, but remains within the
877 // underlying object.
879 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
880 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
881 return Loc;
882 }
883
884 /// Drop alias scopes that are only valid within a single loop iteration.
885 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
886 if (!ScopeList)
887 return nullptr;
888
889 // For the sake of simplicity, drop the whole scope list if any scope is
890 // iteration-local.
891 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
892 return LoopAliasScopes.contains(cast<MDNode>(Scope));
893 }))
894 return nullptr;
895
896 return ScopeList;
897 }
898
899 /// Go over all memory access and check whether runtime pointer checks
900 /// are needed and build sets of dependency check candidates.
901 void processMemAccesses();
902
903 /// Map of all accesses. Values are the types used to access memory pointed to
904 /// by the pointer.
905 PtrAccessMap Accesses;
906
907 /// The loop being checked.
908 const Loop *TheLoop;
909
910 /// List of accesses that need a further dependence check.
911 MemAccessInfoList CheckDeps;
912
913 /// Set of pointers that are read only.
914 SmallPtrSet<Value*, 16> ReadOnlyPtr;
915
916 /// Batched alias analysis results.
917 BatchAAResults BAA;
918
919 /// An alias set tracker to partition the access set by underlying object and
920 //intrinsic property (such as TBAA metadata).
921 AliasSetTracker AST;
922
923 /// The LoopInfo of the loop being checked.
924 const LoopInfo *LI;
925
926 /// Sets of potentially dependent accesses - members of one set share an
927 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
928 /// dependence check.
930
931 /// Initial processing of memory accesses determined that we may need
932 /// to add memchecks. Perform the analysis to determine the necessary checks.
933 ///
934 /// Note that, this is different from isDependencyCheckNeeded. When we retry
935 /// memcheck analysis without dependency checking
936 /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is
937 /// cleared while this remains set if we have potentially dependent accesses.
938 bool IsRTCheckAnalysisNeeded = false;
939
940 /// The SCEV predicate containing all the SCEV-related assumptions.
941 PredicatedScalarEvolution &PSE;
942
943 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
944
945 /// Alias scopes that are declared inside the loop, and as such not valid
946 /// across iterations.
947 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
948};
949
950} // end anonymous namespace
951
952/// Try to compute a constant stride for \p AR. Used by getPtrStride and
953/// isNoWrap.
954static std::optional<int64_t>
955getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
957 if (isa<ScalableVectorType>(AccessTy)) {
958 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
959 << "\n");
960 return std::nullopt;
961 }
962
963 // The access function must stride over the innermost loop.
964 if (Lp != AR->getLoop()) {
965 LLVM_DEBUG({
966 dbgs() << "LAA: Bad stride - Not striding over innermost loop ";
967 if (Ptr)
968 dbgs() << *Ptr << " ";
969
970 dbgs() << "SCEV: " << *AR << "\n";
971 });
972 return std::nullopt;
973 }
974
975 // Check the step is constant.
976 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
977
978 // Calculate the pointer stride and check if it is constant.
979 const APInt *APStepVal;
980 if (!match(Step, m_scev_APInt(APStepVal))) {
981 LLVM_DEBUG({
982 dbgs() << "LAA: Bad stride - Not a constant strided ";
983 if (Ptr)
984 dbgs() << *Ptr << " ";
985 dbgs() << "SCEV: " << *AR << "\n";
986 });
987 return std::nullopt;
988 }
989
990 const auto &DL = Lp->getHeader()->getDataLayout();
991 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
992 int64_t Size = AllocSize.getFixedValue();
993
994 // Huge step value - give up.
995 std::optional<int64_t> StepVal = APStepVal->trySExtValue();
996 if (!StepVal)
997 return std::nullopt;
998
999 // Strided access.
1000 return *StepVal % Size ? std::nullopt : std::make_optional(*StepVal / Size);
1001}
1002
1003/// Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
1004/// informating from the IR pointer value to determine no-wrap.
1006 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
1007 std::optional<int64_t> Stride = std::nullopt) {
1008 // FIXME: This should probably only return true for NUW.
1010 return true;
1011
1013 return true;
1014
1015 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1016 // the distance between the previously accessed location and the wrapped
1017 // location will be larger than half the pointer index type space. In that
1018 // case, the GEP would be poison and any memory access dependent on it would
1019 // be immediate UB when executed.
1021 GEP && GEP->hasNoUnsignedSignedWrap())
1022 return true;
1023
1024 if (!Stride)
1025 Stride = getStrideFromAddRec(AR, L, AccessTy, Ptr, PSE);
1026 if (Stride) {
1027 // If the null pointer is undefined, then a access sequence which would
1028 // otherwise access it can be assumed not to unsigned wrap. Note that this
1029 // assumes the object in memory is aligned to the natural alignment.
1030 unsigned AddrSpace = AR->getType()->getPointerAddressSpace();
1031 if (!NullPointerIsDefined(L->getHeader()->getParent(), AddrSpace) &&
1032 (Stride == 1 || Stride == -1))
1033 return true;
1034 }
1035
1036 if (Ptr && Assume) {
1038 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1039 << "LAA: Pointer: " << *Ptr << "\n"
1040 << "LAA: SCEV: " << *AR << "\n"
1041 << "LAA: Added an overflow assumption\n");
1042 return true;
1043 }
1044
1045 return false;
1046}
1047
1048static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
1049 function_ref<void(Value *)> AddPointer) {
1051 SmallVector<Value *> WorkList;
1052 WorkList.push_back(StartPtr);
1053
1054 while (!WorkList.empty()) {
1055 Value *Ptr = WorkList.pop_back_val();
1056 if (!Visited.insert(Ptr).second)
1057 continue;
1058 auto *PN = dyn_cast<PHINode>(Ptr);
1059 // SCEV does not look through non-header PHIs inside the loop. Such phis
1060 // can be analyzed by adding separate accesses for each incoming pointer
1061 // value.
1062 if (PN && InnermostLoop.contains(PN->getParent()) &&
1063 PN->getParent() != InnermostLoop.getHeader()) {
1064 llvm::append_range(WorkList, PN->incoming_values());
1065 } else
1066 AddPointer(Ptr);
1067 }
1068}
1069
1070// Walk back through the IR for a pointer, looking for a select like the
1071// following:
1072//
1073// %offset = select i1 %cmp, i64 %a, i64 %b
1074// %addr = getelementptr double, double* %base, i64 %offset
1075// %ld = load double, double* %addr, align 8
1076//
1077// We won't be able to form a single SCEVAddRecExpr from this since the
1078// address for each loop iteration depends on %cmp. We could potentially
1079// produce multiple valid SCEVAddRecExprs, though, and check all of them for
1080// memory safety/aliasing if needed.
1081//
1082// If we encounter some IR we don't yet handle, or something obviously fine
1083// like a constant, then we just add the SCEV for that term to the list passed
1084// in by the caller. If we have a node that may potentially yield a valid
1085// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
1086// ourselves before adding to the list.
1088 ScalarEvolution *SE, const Loop *L, Value *Ptr,
1090 unsigned Depth) {
1091 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
1092 // we've exceeded our limit on recursion, just return whatever we have
1093 // regardless of whether it can be used for a forked pointer or not, along
1094 // with an indication of whether it might be a poison or undef value.
1095 const SCEV *Scev = SE->getSCEV(Ptr);
1096 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
1097 !isa<Instruction>(Ptr) || Depth == 0) {
1098 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1099 return;
1100 }
1101
1102 Depth--;
1103
1104 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
1105 return get<1>(S);
1106 };
1107
1108 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
1109 switch (Opcode) {
1110 case Instruction::Add:
1111 return SE->getAddExpr(L, R);
1112 case Instruction::Sub:
1113 return SE->getMinusSCEV(L, R);
1114 default:
1115 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
1116 }
1117 };
1118
1120 unsigned Opcode = I->getOpcode();
1121 switch (Opcode) {
1122 case Instruction::GetElementPtr: {
1123 auto *GEP = cast<GetElementPtrInst>(I);
1124 Type *SourceTy = GEP->getSourceElementType();
1125 // We only handle base + single offset GEPs here for now.
1126 // Not dealing with preexisting gathers yet, so no vectors.
1127 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
1128 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
1129 break;
1130 }
1133 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
1134 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
1135
1136 // See if we need to freeze our fork...
1137 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
1138 any_of(OffsetScevs, UndefPoisonCheck);
1139
1140 // Check that we only have a single fork, on either the base or the offset.
1141 // Copy the SCEV across for the one without a fork in order to generate
1142 // the full SCEV for both sides of the GEP.
1143 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
1144 BaseScevs.push_back(BaseScevs[0]);
1145 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
1146 OffsetScevs.push_back(OffsetScevs[0]);
1147 else {
1148 ScevList.emplace_back(Scev, NeedsFreeze);
1149 break;
1150 }
1151
1152 Type *IntPtrTy = SE->getEffectiveSCEVType(GEP->getPointerOperandType());
1153
1154 // Find the size of the type being pointed to. We only have a single
1155 // index term (guarded above) so we don't need to index into arrays or
1156 // structures, just get the size of the scalar value.
1157 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
1158
1159 for (auto [B, O] : zip(BaseScevs, OffsetScevs)) {
1160 const SCEV *Base = get<0>(B);
1161 const SCEV *Offset = get<0>(O);
1162
1163 // Scale up the offsets by the size of the type, then add to the bases.
1164 const SCEV *Scaled =
1165 SE->getMulExpr(Size, SE->getTruncateOrSignExtend(Offset, IntPtrTy));
1166 ScevList.emplace_back(SE->getAddExpr(Base, Scaled), NeedsFreeze);
1167 }
1168 break;
1169 }
1170 case Instruction::Select: {
1172 // A select means we've found a forked pointer, but we currently only
1173 // support a single select per pointer so if there's another behind this
1174 // then we just bail out and return the generic SCEV.
1175 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1176 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
1177 if (ChildScevs.size() == 2)
1178 append_range(ScevList, ChildScevs);
1179 else
1180 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1181 break;
1182 }
1183 case Instruction::PHI: {
1185 // A phi means we've found a forked pointer, but we currently only
1186 // support a single phi per pointer so if there's another behind this
1187 // then we just bail out and return the generic SCEV.
1188 if (I->getNumOperands() == 2) {
1189 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
1190 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1191 }
1192 if (ChildScevs.size() == 2)
1193 append_range(ScevList, ChildScevs);
1194 else
1195 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1196 break;
1197 }
1198 case Instruction::Add:
1199 case Instruction::Sub: {
1202 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1203 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1204
1205 // See if we need to freeze our fork...
1206 bool NeedsFreeze =
1207 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1208
1209 // Check that we only have a single fork, on either the left or right side.
1210 // Copy the SCEV across for the one without a fork in order to generate
1211 // the full SCEV for both sides of the BinOp.
1212 if (LScevs.size() == 2 && RScevs.size() == 1)
1213 RScevs.push_back(RScevs[0]);
1214 else if (RScevs.size() == 2 && LScevs.size() == 1)
1215 LScevs.push_back(LScevs[0]);
1216 else {
1217 ScevList.emplace_back(Scev, NeedsFreeze);
1218 break;
1219 }
1220
1221 for (auto [L, R] : zip(LScevs, RScevs))
1222 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1223 NeedsFreeze);
1224 break;
1225 }
1226 default:
1227 // Just return the current SCEV if we haven't handled the instruction yet.
1228 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1229 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1230 break;
1231 }
1232}
1233
1234bool AccessAnalysis::createCheckForAccess(
1235 RuntimePointerChecking &RtCheck, MemAccessInfo Access, Type *AccessTy,
1236 const DenseMap<Value *, const SCEV *> &StridesMap,
1237 DenseMap<Value *, unsigned> &DepSetId, Loop *TheLoop,
1238 unsigned &RunningDepId, unsigned ASId, bool Assume) {
1239 Value *Ptr = Access.getPointer();
1240 ScalarEvolution *SE = PSE.getSE();
1241 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1242
1244 findForkedSCEVs(SE, TheLoop, Ptr, RTCheckPtrs, MaxForkedSCEVDepth);
1245 assert(!RTCheckPtrs.empty() &&
1246 "Must have some runtime-check pointer candidates");
1247
1248 // RTCheckPtrs must have size 2 if there are forked pointers. Otherwise, there
1249 // are no forked pointers; replaceSymbolicStridesSCEV in this case.
1250 auto IsLoopInvariantOrAR =
1251 [&SE, &TheLoop](const PointerIntPair<const SCEV *, 1, bool> &P) {
1252 return SE->isLoopInvariant(P.getPointer(), TheLoop) ||
1253 isa<SCEVAddRecExpr>(P.getPointer());
1254 };
1255 if (RTCheckPtrs.size() == 2 && all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1256 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n";
1257 for (const auto &[Idx, Q] : enumerate(RTCheckPtrs)) dbgs()
1258 << "\t(" << Idx << ") " << *Q.getPointer() << "\n");
1259 } else {
1260 RTCheckPtrs = {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1261 }
1262
1263 /// Check whether all pointers can participate in a runtime bounds check. They
1264 /// must either be invariant or non-wrapping affine AddRecs.
1265 for (auto &P : RTCheckPtrs) {
1266 // The bounds for loop-invariant pointer is trivial.
1267 if (SE->isLoopInvariant(P.getPointer(), TheLoop))
1268 continue;
1269
1270 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(P.getPointer());
1271 if (!AR && Assume)
1272 AR = PSE.getAsAddRec(Ptr);
1273 if (!AR || !AR->isAffine())
1274 return false;
1275
1276 // If there's only one option for Ptr, look it up after bounds and wrap
1277 // checking, because assumptions might have been added to PSE.
1278 if (RTCheckPtrs.size() == 1) {
1279 AR =
1281 P.setPointer(AR);
1282 }
1283
1284 if (!isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr : nullptr, AccessTy,
1285 TheLoop, Assume))
1286 return false;
1287 }
1288
1289 for (const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1290 // The id of the dependence set.
1291 unsigned DepId;
1292
1293 if (isDependencyCheckNeeded()) {
1294 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1295 unsigned &LeaderId = DepSetId[Leader];
1296 if (!LeaderId)
1297 LeaderId = RunningDepId++;
1298 DepId = LeaderId;
1299 } else
1300 // Each access has its own dependence set.
1301 DepId = RunningDepId++;
1302
1303 bool IsWrite = Access.getInt();
1304 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1305 NeedsFreeze);
1306 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1307 }
1308
1309 return true;
1310}
1311
1312bool AccessAnalysis::canCheckPtrAtRT(
1313 RuntimePointerChecking &RtCheck, Loop *TheLoop,
1314 const DenseMap<Value *, const SCEV *> &StridesMap, Value *&UncomputablePtr,
1315 bool AllowPartial) {
1316 // Find pointers with computable bounds. We are going to use this information
1317 // to place a runtime bound check.
1318 bool CanDoRT = true;
1319
1320 bool MayNeedRTCheck = false;
1321 if (!IsRTCheckAnalysisNeeded) return true;
1322
1323 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1324
1325 // We assign a consecutive id to access from different alias sets.
1326 // Accesses between different groups doesn't need to be checked.
1327 unsigned ASId = 0;
1328 for (const auto &AS : AST) {
1329 int NumReadPtrChecks = 0;
1330 int NumWritePtrChecks = 0;
1331 bool CanDoAliasSetRT = true;
1332 ++ASId;
1333 auto ASPointers = AS.getPointers();
1334
1335 // We assign consecutive id to access from different dependence sets.
1336 // Accesses within the same set don't need a runtime check.
1337 unsigned RunningDepId = 1;
1339
1341
1342 // First, count how many write and read accesses are in the alias set. Also
1343 // collect MemAccessInfos for later.
1345 for (const Value *ConstPtr : ASPointers) {
1346 Value *Ptr = const_cast<Value *>(ConstPtr);
1347 bool IsWrite = Accesses.contains(MemAccessInfo(Ptr, true));
1348 if (IsWrite)
1349 ++NumWritePtrChecks;
1350 else
1351 ++NumReadPtrChecks;
1352 AccessInfos.emplace_back(Ptr, IsWrite);
1353 }
1354
1355 // We do not need runtime checks for this alias set, if there are no writes
1356 // or a single write and no reads.
1357 if (NumWritePtrChecks == 0 ||
1358 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1359 assert((ASPointers.size() <= 1 ||
1360 all_of(ASPointers,
1361 [this](const Value *Ptr) {
1362 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1363 true);
1364 return !DepCands.contains(AccessWrite);
1365 })) &&
1366 "Can only skip updating CanDoRT below, if all entries in AS "
1367 "are reads or there is at most 1 entry");
1368 continue;
1369 }
1370
1371 for (auto &Access : AccessInfos) {
1372 for (const auto &AccessTy : Accesses[Access]) {
1373 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1374 DepSetId, TheLoop, RunningDepId, ASId,
1375 false)) {
1376 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1377 << *Access.getPointer() << '\n');
1378 Retries.emplace_back(Access, AccessTy);
1379 CanDoAliasSetRT = false;
1380 }
1381 }
1382 }
1383
1384 // Note that this function computes CanDoRT and MayNeedRTCheck
1385 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1386 // we have a pointer for which we couldn't find the bounds but we don't
1387 // actually need to emit any checks so it does not matter.
1388 //
1389 // We need runtime checks for this alias set, if there are at least 2
1390 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1391 // any bound checks (because in that case the number of dependence sets is
1392 // incomplete).
1393 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1394
1395 // We need to perform run-time alias checks, but some pointers had bounds
1396 // that couldn't be checked.
1397 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1398 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1399 // We know that we need these checks, so we can now be more aggressive
1400 // and add further checks if required (overflow checks).
1401 CanDoAliasSetRT = true;
1402 for (const auto &[Access, AccessTy] : Retries) {
1403 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1404 DepSetId, TheLoop, RunningDepId, ASId,
1405 /*Assume=*/true)) {
1406 CanDoAliasSetRT = false;
1407 UncomputablePtr = Access.getPointer();
1408 if (!AllowPartial)
1409 break;
1410 }
1411 }
1412 }
1413
1414 CanDoRT &= CanDoAliasSetRT;
1415 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1416 ++ASId;
1417 }
1418
1419 // If the pointers that we would use for the bounds comparison have different
1420 // address spaces, assume the values aren't directly comparable, so we can't
1421 // use them for the runtime check. We also have to assume they could
1422 // overlap. In the future there should be metadata for whether address spaces
1423 // are disjoint.
1424 unsigned NumPointers = RtCheck.Pointers.size();
1425 for (unsigned i = 0; i < NumPointers; ++i) {
1426 for (unsigned j = i + 1; j < NumPointers; ++j) {
1427 // Only need to check pointers between two different dependency sets.
1428 if (RtCheck.Pointers[i].DependencySetId ==
1429 RtCheck.Pointers[j].DependencySetId)
1430 continue;
1431 // Only need to check pointers in the same alias set.
1432 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1433 continue;
1434
1435 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1436 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1437
1438 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1439 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1440 if (ASi != ASj) {
1441 LLVM_DEBUG(
1442 dbgs() << "LAA: Runtime check would require comparison between"
1443 " different address spaces\n");
1444 return false;
1445 }
1446 }
1447 }
1448
1449 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1450 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1451
1452 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1453 << " pointer comparisons.\n");
1454
1455 // If we can do run-time checks, but there are no checks, no runtime checks
1456 // are needed. This can happen when all pointers point to the same underlying
1457 // object for example.
1458 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1459
1460 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1461 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1462 "CanDoRTIfNeeded depends on RtCheck.Need");
1463 if (!CanDoRTIfNeeded && !AllowPartial)
1464 RtCheck.reset();
1465 return CanDoRTIfNeeded;
1466}
1467
1468void AccessAnalysis::processMemAccesses() {
1469 // We process the set twice: first we process read-write pointers, last we
1470 // process read-only pointers. This allows us to skip dependence tests for
1471 // read-only pointers.
1472
1473 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1474 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1475 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1476 LLVM_DEBUG({
1477 for (const auto &[A, _] : Accesses)
1478 dbgs() << "\t" << *A.getPointer() << " ("
1479 << (A.getInt()
1480 ? "write"
1481 : (ReadOnlyPtr.contains(A.getPointer()) ? "read-only"
1482 : "read"))
1483 << ")\n";
1484 });
1485
1486 // The AliasSetTracker has nicely partitioned our pointers by metadata
1487 // compatibility and potential for underlying-object overlap. As a result, we
1488 // only need to check for potential pointer dependencies within each alias
1489 // set.
1490 for (const auto &AS : AST) {
1491 // Note that both the alias-set tracker and the alias sets themselves used
1492 // ordered collections internally and so the iteration order here is
1493 // deterministic.
1494 auto ASPointers = AS.getPointers();
1495
1496 bool SetHasWrite = false;
1497
1498 // Map of (pointer to underlying objects, accessed address space) to last
1499 // access encountered.
1500 typedef DenseMap<std::pair<const Value *, unsigned>, MemAccessInfo>
1501 UnderlyingObjToAccessMap;
1502 UnderlyingObjToAccessMap ObjToLastAccess;
1503
1504 // Set of access to check after all writes have been processed.
1505 PtrAccessMap DeferredAccesses;
1506
1507 // Iterate over each alias set twice, once to process read/write pointers,
1508 // and then to process read-only pointers.
1509 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1510 bool UseDeferred = SetIteration > 0;
1511 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1512
1513 for (const Value *ConstPtr : ASPointers) {
1514 Value *Ptr = const_cast<Value *>(ConstPtr);
1515
1516 // For a single memory access in AliasSetTracker, Accesses may contain
1517 // both read and write, and they both need to be handled for CheckDeps.
1518 for (const auto &[AC, _] : S) {
1519 if (AC.getPointer() != Ptr)
1520 continue;
1521
1522 bool IsWrite = AC.getInt();
1523
1524 // If we're using the deferred access set, then it contains only
1525 // reads.
1526 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1527 if (UseDeferred && !IsReadOnlyPtr)
1528 continue;
1529 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1530 // read or a write.
1531 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1532 S.contains(MemAccessInfo(Ptr, false))) &&
1533 "Alias-set pointer not in the access set?");
1534
1535 MemAccessInfo Access(Ptr, IsWrite);
1536 DepCands.insert(Access);
1537
1538 // Memorize read-only pointers for later processing and skip them in
1539 // the first round (they need to be checked after we have seen all
1540 // write pointers). Note: we also mark pointer that are not
1541 // consecutive as "read-only" pointers (so that we check
1542 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1543 if (!UseDeferred && IsReadOnlyPtr) {
1544 // We only use the pointer keys, the types vector values don't
1545 // matter.
1546 DeferredAccesses.insert({Access, {}});
1547 continue;
1548 }
1549
1550 // If this is a write - check other reads and writes for conflicts. If
1551 // this is a read only check other writes for conflicts (but only if
1552 // there is no other write to the ptr - this is an optimization to
1553 // catch "a[i] = a[i] + " without having to do a dependence check).
1554 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1555 CheckDeps.push_back(Access);
1556 IsRTCheckAnalysisNeeded = true;
1557 }
1558
1559 if (IsWrite)
1560 SetHasWrite = true;
1561
1562 // Create sets of pointers connected by a shared alias set and
1563 // underlying object.
1564 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1565 UOs = {};
1566 ::getUnderlyingObjects(Ptr, UOs, LI);
1568 << "Underlying objects for pointer " << *Ptr << "\n");
1569 for (const Value *UnderlyingObj : UOs) {
1570 // nullptr never alias, don't join sets for pointer that have "null"
1571 // in their UnderlyingObjects list.
1572 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1574 TheLoop->getHeader()->getParent(),
1575 UnderlyingObj->getType()->getPointerAddressSpace()))
1576 continue;
1577
1578 auto [It, Inserted] = ObjToLastAccess.try_emplace(
1579 {UnderlyingObj,
1580 cast<PointerType>(Ptr->getType())->getAddressSpace()},
1581 Access);
1582 if (!Inserted) {
1583 DepCands.unionSets(Access, It->second);
1584 It->second = Access;
1585 }
1586
1587 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1588 }
1589 }
1590 }
1591 }
1592 }
1593}
1594
1595/// Check whether the access through \p Ptr has a constant stride.
1596std::optional<int64_t>
1598 const Loop *Lp,
1599 const DenseMap<Value *, const SCEV *> &StridesMap,
1600 bool Assume, bool ShouldCheckWrap) {
1601 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1602 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1603 return 0;
1604
1605 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
1606
1607 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1608 if (Assume && !AR)
1609 AR = PSE.getAsAddRec(Ptr);
1610
1611 if (!AR) {
1612 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1613 << " SCEV: " << *PtrScev << "\n");
1614 return std::nullopt;
1615 }
1616
1617 std::optional<int64_t> Stride =
1618 getStrideFromAddRec(AR, Lp, AccessTy, Ptr, PSE);
1619 if (!ShouldCheckWrap || !Stride)
1620 return Stride;
1621
1622 if (isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, Stride))
1623 return Stride;
1624
1625 LLVM_DEBUG(
1626 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1627 << *Ptr << " SCEV: " << *AR << "\n");
1628 return std::nullopt;
1629}
1630
1631std::optional<int64_t> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1632 Type *ElemTyB, Value *PtrB,
1633 const DataLayout &DL,
1634 ScalarEvolution &SE,
1635 bool StrictCheck, bool CheckType) {
1636 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1637
1638 // Make sure that A and B are different pointers.
1639 if (PtrA == PtrB)
1640 return 0;
1641
1642 // Make sure that the element types are the same if required.
1643 if (CheckType && ElemTyA != ElemTyB)
1644 return std::nullopt;
1645
1646 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1647 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1648
1649 // Check that the address spaces match.
1650 if (ASA != ASB)
1651 return std::nullopt;
1652 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1653
1654 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1655 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1656 DL, OffsetA, /*AllowNonInbounds=*/true);
1657 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1658 DL, OffsetB, /*AllowNonInbounds=*/true);
1659
1660 std::optional<int64_t> Val;
1661 if (PtrA1 == PtrB1) {
1662 // Retrieve the address space again as pointer stripping now tracks through
1663 // `addrspacecast`.
1664 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1665 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1666 // Check that the address spaces match and that the pointers are valid.
1667 if (ASA != ASB)
1668 return std::nullopt;
1669
1670 IdxWidth = DL.getIndexSizeInBits(ASA);
1671 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1672 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1673
1674 OffsetB -= OffsetA;
1675 Val = OffsetB.trySExtValue();
1676 } else {
1677 // Otherwise compute the distance with SCEV between the base pointers.
1678 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1679 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1680 std::optional<APInt> Diff =
1681 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1682 if (!Diff)
1683 return std::nullopt;
1684 Val = Diff->trySExtValue();
1685 }
1686
1687 if (!Val)
1688 return std::nullopt;
1689
1690 int64_t Size = DL.getTypeStoreSize(ElemTyA);
1691 int64_t Dist = *Val / Size;
1692
1693 // Ensure that the calculated distance matches the type-based one after all
1694 // the bitcasts removal in the provided pointers.
1695 if (!StrictCheck || Dist * Size == Val)
1696 return Dist;
1697 return std::nullopt;
1698}
1699
1701 const DataLayout &DL, ScalarEvolution &SE,
1702 SmallVectorImpl<unsigned> &SortedIndices) {
1704 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1705 "Expected list of pointer operands.");
1706 // Walk over the pointers, and map each of them to an offset relative to
1707 // first pointer in the array.
1708 Value *Ptr0 = VL[0];
1709
1710 using DistOrdPair = std::pair<int64_t, unsigned>;
1711 auto Compare = llvm::less_first();
1712 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1713 Offsets.emplace(0, 0);
1714 bool IsConsecutive = true;
1715 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1716 std::optional<int64_t> Diff =
1717 getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1718 /*StrictCheck=*/true);
1719 if (!Diff)
1720 return false;
1721
1722 // Check if the pointer with the same offset is found.
1723 int64_t Offset = *Diff;
1724 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1725 if (!IsInserted)
1726 return false;
1727 // Consecutive order if the inserted element is the last one.
1728 IsConsecutive &= std::next(It) == Offsets.end();
1729 }
1730 SortedIndices.clear();
1731 if (!IsConsecutive) {
1732 // Fill SortedIndices array only if it is non-consecutive.
1733 SortedIndices.resize(VL.size());
1734 for (auto [Idx, Off] : enumerate(Offsets))
1735 SortedIndices[Idx] = Off.second;
1736 }
1737 return true;
1738}
1739
1740/// Returns true if the memory operations \p A and \p B are consecutive.
1742 ScalarEvolution &SE, bool CheckType) {
1745 if (!PtrA || !PtrB)
1746 return false;
1747 Type *ElemTyA = getLoadStoreType(A);
1748 Type *ElemTyB = getLoadStoreType(B);
1749 std::optional<int64_t> Diff =
1750 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1751 /*StrictCheck=*/true, CheckType);
1752 return Diff == 1;
1753}
1754
1756 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1757 [this, SI](Value *Ptr) {
1758 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1759 InstMap.push_back(SI);
1760 ++AccessIdx;
1761 });
1762}
1763
1765 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1766 [this, LI](Value *Ptr) {
1767 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1768 InstMap.push_back(LI);
1769 ++AccessIdx;
1770 });
1771}
1772
1791
1793 switch (Type) {
1794 case NoDep:
1795 case Forward:
1797 case Unknown:
1798 case IndirectUnsafe:
1799 return false;
1800
1802 case Backward:
1804 return true;
1805 }
1806 llvm_unreachable("unexpected DepType!");
1807}
1808
1812
1814 switch (Type) {
1815 case Forward:
1817 return true;
1818
1819 case NoDep:
1820 case Unknown:
1822 case Backward:
1824 case IndirectUnsafe:
1825 return false;
1826 }
1827 llvm_unreachable("unexpected DepType!");
1828}
1829
1830bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1831 uint64_t TypeByteSize,
1832 unsigned CommonStride) {
1833 // If loads occur at a distance that is not a multiple of a feasible vector
1834 // factor store-load forwarding does not take place.
1835 // Positive dependences might cause troubles because vectorizing them might
1836 // prevent store-load forwarding making vectorized code run a lot slower.
1837 // a[i] = a[i-3] ^ a[i-8];
1838 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1839 // hence on your typical architecture store-load forwarding does not take
1840 // place. Vectorizing in such cases does not make sense.
1841 // Store-load forwarding distance.
1842
1843 // After this many iterations store-to-load forwarding conflicts should not
1844 // cause any slowdowns.
1845 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1846 // Maximum vector factor.
1847 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1848 std::min(VectorizerParams::MaxVectorWidth * TypeByteSize,
1849 MaxStoreLoadForwardSafeDistanceInBits);
1850
1851 // Compute the smallest VF at which the store and load would be misaligned.
1852 for (uint64_t VF = 2 * TypeByteSize;
1853 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1854 // If the number of vector iteration between the store and the load are
1855 // small we could incur conflicts.
1856 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1857 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1858 break;
1859 }
1860 }
1861
1862 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1863 LLVM_DEBUG(
1864 dbgs() << "LAA: Distance " << Distance
1865 << " that could cause a store-load forwarding conflict\n");
1866 return true;
1867 }
1868
1869 if (CommonStride &&
1870 MaxVFWithoutSLForwardIssuesPowerOf2 <
1871 MaxStoreLoadForwardSafeDistanceInBits &&
1872 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1873 VectorizerParams::MaxVectorWidth * TypeByteSize) {
1874 uint64_t MaxVF =
1875 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1876 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1877 MaxStoreLoadForwardSafeDistanceInBits =
1878 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1879 }
1880 return false;
1881}
1882
1883void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1884 if (Status < S)
1885 Status = S;
1886}
1887
1888/// Given a dependence-distance \p Dist between two memory accesses, that have
1889/// strides in the same direction whose absolute value of the maximum stride is
1890/// given in \p MaxStride, in a loop whose maximum backedge taken count is \p
1891/// MaxBTC, check if it is possible to prove statically that the dependence
1892/// distance is larger than the range that the accesses will travel through the
1893/// execution of the loop. If so, return true; false otherwise. This is useful
1894/// for example in loops such as the following (PR31098):
1895///
1896/// for (i = 0; i < D; ++i) {
1897/// = out[i];
1898/// out[i+D] =
1899/// }
1901 const SCEV &MaxBTC, const SCEV &Dist,
1902 uint64_t MaxStride) {
1903
1904 // If we can prove that
1905 // (**) |Dist| > MaxBTC * Step
1906 // where Step is the absolute stride of the memory accesses in bytes,
1907 // then there is no dependence.
1908 //
1909 // Rationale:
1910 // We basically want to check if the absolute distance (|Dist/Step|)
1911 // is >= the loop iteration count (or > MaxBTC).
1912 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1913 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1914 // that the dependence distance is >= VF; This is checked elsewhere.
1915 // But in some cases we can prune dependence distances early, and
1916 // even before selecting the VF, and without a runtime test, by comparing
1917 // the distance against the loop iteration count. Since the vectorized code
1918 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1919 // also guarantees that distance >= VF.
1920 //
1921 const SCEV *Step = SE.getConstant(MaxBTC.getType(), MaxStride);
1922 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1923
1924 const SCEV *CastedDist = &Dist;
1925 const SCEV *CastedProduct = Product;
1926 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1927 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1928
1929 // The dependence distance can be positive/negative, so we sign extend Dist;
1930 // The multiplication of the absolute stride in bytes and the
1931 // backedgeTakenCount is non-negative, so we zero extend Product.
1932 if (DistTypeSizeBits > ProductTypeSizeBits)
1933 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1934 else
1935 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1936
1937 // Is Dist - (MaxBTC * Step) > 0 ?
1938 // (If so, then we have proven (**) because |Dist| >= Dist)
1939 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1940 if (SE.isKnownPositive(Minus))
1941 return true;
1942
1943 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1944 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1945 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1946 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1947 return SE.isKnownPositive(Minus);
1948}
1949
1950/// Check the dependence for two accesses with the same stride \p Stride.
1951/// \p Distance is the positive distance in bytes, and \p TypeByteSize is type
1952/// size in bytes.
1953///
1954/// \returns true if they are independent.
1956 uint64_t TypeByteSize) {
1957 assert(Stride > 1 && "The stride must be greater than 1");
1958 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1959 assert(Distance > 0 && "The distance must be non-zero");
1960
1961 // Skip if the distance is not multiple of type byte size.
1962 if (Distance % TypeByteSize)
1963 return false;
1964
1965 // No dependence if the distance is not multiple of the stride.
1966 // E.g.
1967 // for (i = 0; i < 1024 ; i += 4)
1968 // A[i+2] = A[i] + 1;
1969 //
1970 // Two accesses in memory (distance is 2, stride is 4):
1971 // | A[0] | | | | A[4] | | | |
1972 // | | | A[2] | | | | A[6] | |
1973 //
1974 // E.g.
1975 // for (i = 0; i < 1024 ; i += 3)
1976 // A[i+4] = A[i] + 1;
1977 //
1978 // Two accesses in memory (distance is 4, stride is 3):
1979 // | A[0] | | | A[3] | | | A[6] | | |
1980 // | | | | | A[4] | | | A[7] | |
1981 return Distance % Stride;
1982}
1983
1984bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
1985 Type *SrcTy,
1986 const SCEV *Sink,
1987 Type *SinkTy) {
1988 const SCEV *BTC = PSE.getBackedgeTakenCount();
1989 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
1990 ScalarEvolution &SE = *PSE.getSE();
1991 const auto &[SrcStart_, SrcEnd_] =
1992 getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
1993 &SE, &PointerBounds, DT, AC, LoopGuards);
1994 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
1995 return false;
1996
1997 const auto &[SinkStart_, SinkEnd_] =
1998 getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
1999 &SE, &PointerBounds, DT, AC, LoopGuards);
2000 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
2001 isa<SCEVCouldNotCompute>(SinkEnd_))
2002 return false;
2003
2004 if (!LoopGuards)
2005 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2006
2007 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
2008 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
2009 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
2010 return true;
2011
2012 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
2013 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
2014 return SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart);
2015}
2016
2018 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2019MemoryDepChecker::getDependenceDistanceStrideAndSize(
2020 const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
2021 const AccessAnalysis::MemAccessInfo &B, Instruction *BInst) {
2022 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
2023 auto &SE = *PSE.getSE();
2024 const auto &[APtr, AIsWrite] = A;
2025 const auto &[BPtr, BIsWrite] = B;
2026
2027 // Two reads are independent.
2028 if (!AIsWrite && !BIsWrite)
2030
2031 Type *ATy = getLoadStoreType(AInst);
2032 Type *BTy = getLoadStoreType(BInst);
2033
2034 // We cannot check pointers in different address spaces.
2035 if (APtr->getType()->getPointerAddressSpace() !=
2036 BPtr->getType()->getPointerAddressSpace())
2038
2039 std::optional<int64_t> StrideAPtr =
2040 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);
2041 std::optional<int64_t> StrideBPtr =
2042 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);
2043
2044 const SCEV *Src = PSE.getSCEV(APtr);
2045 const SCEV *Sink = PSE.getSCEV(BPtr);
2046
2047 // If the induction step is negative we have to invert source and sink of the
2048 // dependence when measuring the distance between them. We should not swap
2049 // AIsWrite with BIsWrite, as their uses expect them in program order.
2050 if (StrideAPtr && *StrideAPtr < 0) {
2051 std::swap(Src, Sink);
2052 std::swap(AInst, BInst);
2053 std::swap(ATy, BTy);
2054 std::swap(StrideAPtr, StrideBPtr);
2055 }
2056
2057 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
2058
2059 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
2060 << "\n");
2061 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
2062 << ": " << *Dist << "\n");
2063
2064 // Need accesses with constant strides and the same direction for further
2065 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
2066 // similar code or pointer arithmetic that could wrap in the address space.
2067
2068 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
2069 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
2070 // dependence further and also cannot generate runtime checks.
2071 if (!StrideAPtr || !StrideBPtr) {
2072 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2074 }
2075
2076 int64_t StrideAPtrInt = *StrideAPtr;
2077 int64_t StrideBPtrInt = *StrideBPtr;
2078 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
2079 << " Sink induction step: " << StrideBPtrInt << "\n");
2080 // At least Src or Sink are loop invariant and the other is strided or
2081 // invariant. We can generate a runtime check to disambiguate the accesses.
2082 if (!StrideAPtrInt || !StrideBPtrInt)
2084
2085 // Both Src and Sink have a constant stride, check if they are in the same
2086 // direction.
2087 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2088 LLVM_DEBUG(
2089 dbgs() << "Pointer access with strides in different directions\n");
2091 }
2092
2093 TypeSize AStoreSz = DL.getTypeStoreSize(ATy);
2094 TypeSize BStoreSz = DL.getTypeStoreSize(BTy);
2095
2096 // If store sizes are not the same, set TypeByteSize to zero, so we can check
2097 // it in the caller isDependent.
2098 uint64_t ASz = DL.getTypeAllocSize(ATy);
2099 uint64_t BSz = DL.getTypeAllocSize(BTy);
2100 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2101
2102 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2103 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2104
2105 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2106
2107 std::optional<uint64_t> CommonStride;
2108 if (StrideAScaled == StrideBScaled)
2109 CommonStride = StrideAScaled;
2110
2111 // TODO: Historically, we didn't retry with runtime checks when (unscaled)
2112 // strides were different but there is no inherent reason to.
2113 if (!isa<SCEVConstant>(Dist))
2114 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2115
2116 // If distance is a SCEVCouldNotCompute, return Unknown immediately.
2117 if (isa<SCEVCouldNotCompute>(Dist)) {
2118 LLVM_DEBUG(dbgs() << "LAA: Uncomputable distance.\n");
2119 return Dependence::Unknown;
2120 }
2121
2122 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2123 TypeByteSize, AIsWrite, BIsWrite);
2124}
2125
2127MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2128 const MemAccessInfo &B, unsigned BIdx) {
2129 assert(AIdx < BIdx && "Must pass arguments in program order");
2130
2131 // Check if we can prove that Sink only accesses memory after Src's end or
2132 // vice versa. The helper is used to perform the checks only on the exit paths
2133 // where it helps to improve the analysis result.
2134 auto CheckCompletelyBeforeOrAfter = [&]() {
2135 auto *APtr = A.getPointer();
2136 auto *BPtr = B.getPointer();
2137 Type *ATy = getLoadStoreType(InstMap[AIdx]);
2138 Type *BTy = getLoadStoreType(InstMap[BIdx]);
2139 const SCEV *Src = PSE.getSCEV(APtr);
2140 const SCEV *Sink = PSE.getSCEV(BPtr);
2141 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2142 };
2143
2144 // Get the dependence distance, stride, type size and what access writes for
2145 // the dependence between A and B.
2146 auto Res =
2147 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2148 if (std::holds_alternative<Dependence::DepType>(Res)) {
2149 if (std::get<Dependence::DepType>(Res) == Dependence::Unknown &&
2150 CheckCompletelyBeforeOrAfter())
2151 return Dependence::NoDep;
2152 return std::get<Dependence::DepType>(Res);
2153 }
2154
2155 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2156 std::get<DepDistanceStrideAndSizeInfo>(Res);
2157 bool HasSameSize = TypeByteSize > 0;
2158
2159 ScalarEvolution &SE = *PSE.getSE();
2160 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2161
2162 // If the distance between the acecsses is larger than their maximum absolute
2163 // stride multiplied by the symbolic maximum backedge taken count (which is an
2164 // upper bound of the number of iterations), the accesses are independet, i.e.
2165 // they are far enough appart that accesses won't access the same location
2166 // across all loop ierations.
2167 if (HasSameSize &&
2169 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2170 return Dependence::NoDep;
2171
2172 // The rest of this function relies on ConstDist being at most 64-bits, which
2173 // is checked earlier. Will assert if the calling code changes.
2174 const APInt *APDist = nullptr;
2175 uint64_t ConstDist =
2176 match(Dist, m_scev_APInt(APDist)) ? APDist->abs().getZExtValue() : 0;
2177
2178 // Attempt to prove strided accesses independent.
2179 if (APDist) {
2180 // If the distance between accesses and their strides are known constants,
2181 // check whether the accesses interlace each other.
2182 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2183 areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) {
2184 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2185 return Dependence::NoDep;
2186 }
2187 } else {
2188 if (!LoopGuards)
2189 LoopGuards.emplace(
2190 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2191 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2192 }
2193
2194 // Negative distances are not plausible dependencies.
2195 if (SE.isKnownNonPositive(Dist)) {
2196 if (SE.isKnownNonNegative(Dist)) {
2197 if (HasSameSize) {
2198 // Write to the same location with the same size.
2199 return Dependence::Forward;
2200 }
2201 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2202 "different type sizes\n");
2203 return Dependence::Unknown;
2204 }
2205
2206 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2207 // Check if the first access writes to a location that is read in a later
2208 // iteration, where the distance between them is not a multiple of a vector
2209 // factor and relatively small.
2210 //
2211 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2212 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2213 // forward dependency will allow vectorization using any width.
2214
2215 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2216 if (!ConstDist) {
2217 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2219 }
2220 if (!HasSameSize ||
2221 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2222 LLVM_DEBUG(
2223 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2225 }
2226 }
2227
2228 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2229 return Dependence::Forward;
2230 }
2231
2232 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2233 // Below we only handle strictly positive distances.
2234 if (MinDistance <= 0) {
2235 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2237 }
2238
2239 if (!HasSameSize) {
2240 if (CheckCompletelyBeforeOrAfter())
2241 return Dependence::NoDep;
2242 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2243 "different type sizes\n");
2244 return Dependence::Unknown;
2245 }
2246 // Bail out early if passed-in parameters make vectorization not feasible.
2247 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2249 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2251 // The minimum number of iterations for a vectorized/unrolled version.
2252 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2253
2254 // It's not vectorizable if the distance is smaller than the minimum distance
2255 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2256 // front needs MaxStride. Vectorizing the last iteration needs TypeByteSize.
2257 // (No need to plus the last gap distance).
2258 //
2259 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2260 // foo(int *A) {
2261 // int *B = (int *)((char *)A + 14);
2262 // for (i = 0 ; i < 1024 ; i += 2)
2263 // B[i] = A[i] + 1;
2264 // }
2265 //
2266 // Two accesses in memory (stride is 4 * 2):
2267 // | A[0] | | A[2] | | A[4] | | A[6] | |
2268 // | B[0] | | B[2] | | B[4] |
2269 //
2270 // MinDistance needs for vectorizing iterations except the last iteration:
2271 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2272 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2273 //
2274 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2275 // 12, which is less than distance.
2276 //
2277 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2278 // the minimum distance needed is 28, which is greater than distance. It is
2279 // not safe to do vectorization.
2280 //
2281 // We use MaxStride (maximum of src and sink strides) to get a conservative
2282 // lower bound on the MinDistanceNeeded in case of different strides.
2283
2284 // We know that Dist is positive, but it may not be constant. Use the signed
2285 // minimum for computations below, as this ensures we compute the closest
2286 // possible dependence distance.
2287 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2288 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2289 if (!ConstDist) {
2290 // For non-constant distances, we checked the lower bound of the
2291 // dependence distance and the distance may be larger at runtime (and safe
2292 // for vectorization). Classify it as Unknown, so we re-try with runtime
2293 // checks, unless we can prove both accesses cannot overlap.
2294 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2296 }
2297 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2298 << MinDistance << '\n');
2299 return Dependence::Backward;
2300 }
2301
2302 // Unsafe if the minimum distance needed is greater than smallest dependence
2303 // distance distance.
2304 if (MinDistanceNeeded > MinDepDistBytes) {
2305 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2306 << MinDistanceNeeded << " size in bytes\n");
2307 return Dependence::Backward;
2308 }
2309
2310 MinDepDistBytes =
2311 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2312
2313 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2314 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2315 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2317
2318 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2319 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2320 << " with max VF = " << MaxVF << '\n');
2321
2322 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2323 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2324 // For non-constant distances, we checked the lower bound of the dependence
2325 // distance and the distance may be larger at runtime (and safe for
2326 // vectorization). Classify it as Unknown, so we re-try with runtime checks,
2327 // unless we can prove both accesses cannot overlap.
2328 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2330 }
2331
2332 if (CheckCompletelyBeforeOrAfter())
2333 return Dependence::NoDep;
2334
2335 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2337}
2338
2340 const MemAccessInfoList &CheckDeps) {
2341
2342 MinDepDistBytes = -1;
2344 for (MemAccessInfo CurAccess : CheckDeps) {
2345 if (Visited.contains(CurAccess))
2346 continue;
2347
2348 // Check accesses within this set.
2350 DepCands.findLeader(CurAccess);
2352 DepCands.member_end();
2353
2354 // Check every access pair.
2355 while (AI != AE) {
2356 Visited.insert(*AI);
2357 bool AIIsWrite = AI->getInt();
2358 // Check loads only against next equivalent class, but stores also against
2359 // other stores in the same equivalence class - to the same address.
2361 (AIIsWrite ? AI : std::next(AI));
2362 while (OI != AE) {
2363 // Check every accessing instruction pair in program order.
2364 auto &Acc = Accesses[*AI];
2365 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2366 I1 != I1E; ++I1)
2367 // Scan all accesses of another equivalence class, but only the next
2368 // accesses of the same equivalent class.
2369 for (std::vector<unsigned>::iterator
2370 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2371 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2372 I2 != I2E; ++I2) {
2373 auto A = std::make_pair(&*AI, *I1);
2374 auto B = std::make_pair(&*OI, *I2);
2375
2376 assert(*I1 != *I2);
2377 if (*I1 > *I2)
2378 std::swap(A, B);
2379
2381 isDependent(*A.first, A.second, *B.first, B.second);
2383
2384 // Gather dependences unless we accumulated MaxDependences
2385 // dependences. In that case return as soon as we find the first
2386 // unsafe dependence. This puts a limit on this quadratic
2387 // algorithm.
2388 if (RecordDependences) {
2389 if (Type != Dependence::NoDep)
2390 Dependences.emplace_back(A.second, B.second, Type);
2391
2392 if (Dependences.size() >= MaxDependences) {
2393 RecordDependences = false;
2394 Dependences.clear();
2396 << "Too many dependences, stopped recording\n");
2397 }
2398 }
2399 if (!RecordDependences && !isSafeForVectorization())
2400 return false;
2401 }
2402 ++OI;
2403 }
2404 ++AI;
2405 }
2406 }
2407
2408 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2409 return isSafeForVectorization();
2410}
2411
2414 MemAccessInfo Access(Ptr, IsWrite);
2415 auto I = Accesses.find(Access);
2417 if (I != Accesses.end()) {
2418 transform(I->second, std::back_inserter(Insts),
2419 [&](unsigned Idx) { return this->InstMap[Idx]; });
2420 }
2421
2422 return Insts;
2423}
2424
2426 "NoDep",
2427 "Unknown",
2428 "IndirectUnsafe",
2429 "Forward",
2430 "ForwardButPreventsForwarding",
2431 "Backward",
2432 "BackwardVectorizable",
2433 "BackwardVectorizableButPreventsForwarding"};
2434
2436 raw_ostream &OS, unsigned Depth,
2437 const SmallVectorImpl<Instruction *> &Instrs) const {
2438 OS.indent(Depth) << DepName[Type] << ":\n";
2439 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2440 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2441}
2442
2443bool LoopAccessInfo::canAnalyzeLoop() {
2444 // We need to have a loop header.
2445 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2446 << TheLoop->getHeader()->getParent()->getName() << "' from "
2447 << TheLoop->getLocStr() << "\n");
2448
2449 // We can only analyze innermost loops.
2450 if (!TheLoop->isInnermost()) {
2451 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2452 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2453 return false;
2454 }
2455
2456 // We must have a single backedge.
2457 if (TheLoop->getNumBackEdges() != 1) {
2458 LLVM_DEBUG(
2459 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2460 recordAnalysis("CFGNotUnderstood")
2461 << "loop control flow is not understood by analyzer";
2462 return false;
2463 }
2464
2465 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2466 // count, which is an upper bound on the number of loop iterations. The loop
2467 // may execute fewer iterations, if it exits via an uncountable exit.
2468 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2469 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2470 recordAnalysis("CantComputeNumberOfIterations")
2471 << "could not determine number of loop iterations";
2472 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2473 return false;
2474 }
2475
2476 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2477 << TheLoop->getHeader()->getName() << "\n");
2478 return true;
2479}
2480
2481bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2482 const TargetLibraryInfo *TLI,
2483 DominatorTree *DT) {
2484 // Holds the Load and Store instructions.
2487 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2488
2489 // Holds all the different accesses in the loop.
2490 unsigned NumReads = 0;
2491 unsigned NumReadWrites = 0;
2492
2493 bool HasComplexMemInst = false;
2494
2495 // A runtime check is only legal to insert if there are no convergent calls.
2496 HasConvergentOp = false;
2497
2498 PtrRtChecking->Pointers.clear();
2499 PtrRtChecking->Need = false;
2500
2501 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2502
2503 const bool EnableMemAccessVersioningOfLoop =
2505 !TheLoop->getHeader()->getParent()->hasOptSize();
2506
2507 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2508 // loop info, as it may be arbitrary.
2509 LoopBlocksRPO RPOT(TheLoop);
2510 RPOT.perform(LI);
2511 for (BasicBlock *BB : RPOT) {
2512 // Scan the BB and collect legal loads and stores. Also detect any
2513 // convergent instructions.
2514 for (Instruction &I : *BB) {
2515 if (auto *Call = dyn_cast<CallBase>(&I)) {
2516 if (Call->isConvergent())
2517 HasConvergentOp = true;
2518 }
2519
2520 // With both a non-vectorizable memory instruction and a convergent
2521 // operation, found in this loop, no reason to continue the search.
2522 if (HasComplexMemInst && HasConvergentOp)
2523 return false;
2524
2525 // Avoid hitting recordAnalysis multiple times.
2526 if (HasComplexMemInst)
2527 continue;
2528
2529 // Record alias scopes defined inside the loop.
2530 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2531 for (Metadata *Op : Decl->getScopeList()->operands())
2532 LoopAliasScopes.insert(cast<MDNode>(Op));
2533
2534 // Many math library functions read the rounding mode. We will only
2535 // vectorize a loop if it contains known function calls that don't set
2536 // the flag. Therefore, it is safe to ignore this read from memory.
2537 auto *Call = dyn_cast<CallInst>(&I);
2539 continue;
2540
2541 // If this is a load, save it. If this instruction can read from memory
2542 // but is not a load, we only allow it if it's a call to a function with a
2543 // vector mapping and no pointer arguments.
2544 if (I.mayReadFromMemory()) {
2545 auto hasPointerArgs = [](CallBase *CB) {
2546 return any_of(CB->args(), [](Value const *Arg) {
2547 return Arg->getType()->isPointerTy();
2548 });
2549 };
2550
2551 // If the function has an explicit vectorized counterpart, and does not
2552 // take output/input pointers, we can safely assume that it can be
2553 // vectorized.
2554 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2555 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2556 continue;
2557
2558 auto *Ld = dyn_cast<LoadInst>(&I);
2559 if (!Ld) {
2560 recordAnalysis("CantVectorizeInstruction", Ld)
2561 << "instruction cannot be vectorized";
2562 HasComplexMemInst = true;
2563 continue;
2564 }
2565 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2566 recordAnalysis("NonSimpleLoad", Ld)
2567 << "read with atomic ordering or volatile read";
2568 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2569 HasComplexMemInst = true;
2570 continue;
2571 }
2572 NumLoads++;
2573 Loads.push_back(Ld);
2574 DepChecker->addAccess(Ld);
2575 if (EnableMemAccessVersioningOfLoop)
2576 collectStridedAccess(Ld);
2577 continue;
2578 }
2579
2580 // Save 'store' instructions. Abort if other instructions write to memory.
2581 if (I.mayWriteToMemory()) {
2582 auto *St = dyn_cast<StoreInst>(&I);
2583 if (!St) {
2584 recordAnalysis("CantVectorizeInstruction", St)
2585 << "instruction cannot be vectorized";
2586 HasComplexMemInst = true;
2587 continue;
2588 }
2589 if (!St->isSimple() && !IsAnnotatedParallel) {
2590 recordAnalysis("NonSimpleStore", St)
2591 << "write with atomic ordering or volatile write";
2592 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2593 HasComplexMemInst = true;
2594 continue;
2595 }
2596 NumStores++;
2597 Stores.push_back(St);
2598 DepChecker->addAccess(St);
2599 if (EnableMemAccessVersioningOfLoop)
2600 collectStridedAccess(St);
2601 }
2602 } // Next instr.
2603 } // Next block.
2604
2605 if (HasComplexMemInst)
2606 return false;
2607
2608 // Now we have two lists that hold the loads and the stores.
2609 // Next, we find the pointers that they use.
2610
2611 // Check if we see any stores. If there are no stores, then we don't
2612 // care if the pointers are *restrict*.
2613 if (!Stores.size()) {
2614 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2615 return true;
2616 }
2617
2619 AccessAnalysis Accesses(TheLoop, AA, LI, DepCands, *PSE, LoopAliasScopes);
2620
2621 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2622 // multiple times on the same object. If the ptr is accessed twice, once
2623 // for read and once for write, it will only appear once (on the write
2624 // list). This is okay, since we are going to check for conflicts between
2625 // writes and between reads and writes, but not between reads and reads.
2626 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2627
2628 // Record uniform store addresses to identify if we have multiple stores
2629 // to the same address.
2630 SmallPtrSet<Value *, 16> UniformStores;
2631
2632 for (StoreInst *ST : Stores) {
2633 Value *Ptr = ST->getPointerOperand();
2634
2635 if (isInvariant(Ptr)) {
2636 // Record store instructions to loop invariant addresses
2637 StoresToInvariantAddresses.push_back(ST);
2638 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2639 !UniformStores.insert(Ptr).second;
2640 }
2641
2642 // If we did *not* see this pointer before, insert it to the read-write
2643 // list. At this phase it is only a 'write' list.
2644 Type *AccessTy = getLoadStoreType(ST);
2645 if (Seen.insert({Ptr, AccessTy}).second) {
2646 ++NumReadWrites;
2647
2648 MemoryLocation Loc = MemoryLocation::get(ST);
2649 // The TBAA metadata could have a control dependency on the predication
2650 // condition, so we cannot rely on it when determining whether or not we
2651 // need runtime pointer checks.
2652 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2653 Loc.AATags.TBAA = nullptr;
2654
2655 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2656 [&Accesses, AccessTy, Loc](Value *Ptr) {
2657 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2658 Accesses.addStore(NewLoc, AccessTy);
2659 });
2660 }
2661 }
2662
2663 if (IsAnnotatedParallel) {
2664 LLVM_DEBUG(
2665 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2666 << "checks.\n");
2667 return true;
2668 }
2669
2670 for (LoadInst *LD : Loads) {
2671 Value *Ptr = LD->getPointerOperand();
2672 // If we did *not* see this pointer before, insert it to the
2673 // read list. If we *did* see it before, then it is already in
2674 // the read-write list. This allows us to vectorize expressions
2675 // such as A[i] += x; Because the address of A[i] is a read-write
2676 // pointer. This only works if the index of A[i] is consecutive.
2677 // If the address of i is unknown (for example A[B[i]]) then we may
2678 // read a few words, modify, and write a few words, and some of the
2679 // words may be written to the same address.
2680 bool IsReadOnlyPtr = false;
2681 Type *AccessTy = getLoadStoreType(LD);
2682 if (Seen.insert({Ptr, AccessTy}).second ||
2683 !getPtrStride(*PSE, AccessTy, Ptr, TheLoop, SymbolicStrides)) {
2684 ++NumReads;
2685 IsReadOnlyPtr = true;
2686 }
2687
2688 // See if there is an unsafe dependency between a load to a uniform address and
2689 // store to the same uniform address.
2690 if (UniformStores.contains(Ptr)) {
2691 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2692 "load and uniform store to the same address!\n");
2693 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2694 }
2695
2696 MemoryLocation Loc = MemoryLocation::get(LD);
2697 // The TBAA metadata could have a control dependency on the predication
2698 // condition, so we cannot rely on it when determining whether or not we
2699 // need runtime pointer checks.
2700 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2701 Loc.AATags.TBAA = nullptr;
2702
2703 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2704 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2705 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2706 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2707 });
2708 }
2709
2710 // If we write (or read-write) to a single destination and there are no
2711 // other reads in this loop then is it safe to vectorize.
2712 if (NumReadWrites == 1 && NumReads == 0) {
2713 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2714 return true;
2715 }
2716
2717 // Build dependence sets and check whether we need a runtime pointer bounds
2718 // check.
2719 Accesses.buildDependenceSets();
2720
2721 // Find pointers with computable bounds. We are going to use this information
2722 // to place a runtime bound check.
2723 Value *UncomputablePtr = nullptr;
2724 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2725 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2726 if (!HasCompletePtrRtChecking) {
2727 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2728 recordAnalysis("CantIdentifyArrayBounds", I)
2729 << "cannot identify array bounds";
2730 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2731 << "the array bounds.\n");
2732 return false;
2733 }
2734
2735 LLVM_DEBUG(
2736 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2737
2738 bool DepsAreSafe = true;
2739 if (Accesses.isDependencyCheckNeeded()) {
2740 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2741 DepsAreSafe =
2742 DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2743
2744 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) {
2745 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2746
2747 // Clear the dependency checks. We assume they are not needed.
2748 Accesses.resetDepChecks(*DepChecker);
2749
2750 PtrRtChecking->reset();
2751 PtrRtChecking->Need = true;
2752
2753 UncomputablePtr = nullptr;
2754 HasCompletePtrRtChecking =
2755 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2756 UncomputablePtr, AllowPartial);
2757
2758 // Check that we found the bounds for the pointer.
2759 if (!HasCompletePtrRtChecking) {
2760 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2761 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2762 << "cannot check memory dependencies at runtime";
2763 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2764 return false;
2765 }
2766 DepsAreSafe = true;
2767 }
2768 }
2769
2770 if (HasConvergentOp) {
2771 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2772 << "cannot add control dependency to convergent operation";
2773 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2774 "would be needed with a convergent operation\n");
2775 return false;
2776 }
2777
2778 if (DepsAreSafe) {
2779 LLVM_DEBUG(
2780 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2781 << (PtrRtChecking->Need ? "" : " don't")
2782 << " need runtime memory checks.\n");
2783 return true;
2784 }
2785
2786 emitUnsafeDependenceRemark();
2787 return false;
2788}
2789
2790void LoopAccessInfo::emitUnsafeDependenceRemark() {
2791 const auto *Deps = getDepChecker().getDependences();
2792 if (!Deps)
2793 return;
2794 const auto *Found =
2795 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2798 });
2799 if (Found == Deps->end())
2800 return;
2801 MemoryDepChecker::Dependence Dep = *Found;
2802
2803 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2804
2805 // Emit remark for first unsafe dependence
2806 bool HasForcedDistribution = false;
2807 std::optional<const MDOperand *> Value =
2808 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2809 if (Value) {
2810 const MDOperand *Op = *Value;
2811 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2812 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2813 }
2814
2815 const std::string Info =
2816 HasForcedDistribution
2817 ? "unsafe dependent memory operations in loop."
2818 : "unsafe dependent memory operations in loop. Use "
2819 "#pragma clang loop distribute(enable) to allow loop distribution "
2820 "to attempt to isolate the offending operations into a separate "
2821 "loop";
2822 OptimizationRemarkAnalysis &R =
2823 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2824
2825 switch (Dep.Type) {
2829 llvm_unreachable("Unexpected dependence");
2831 R << "\nBackward loop carried data dependence.";
2832 break;
2834 R << "\nForward loop carried data dependence that prevents "
2835 "store-to-load forwarding.";
2836 break;
2838 R << "\nBackward loop carried data dependence that prevents "
2839 "store-to-load forwarding.";
2840 break;
2842 R << "\nUnsafe indirect dependence.";
2843 break;
2845 R << "\nUnknown data dependence.";
2846 break;
2847 }
2848
2849 if (Instruction *I = Dep.getSource(getDepChecker())) {
2850 DebugLoc SourceLoc = I->getDebugLoc();
2852 SourceLoc = DD->getDebugLoc();
2853 if (SourceLoc)
2854 R << " Memory location is the same as accessed at "
2855 << ore::NV("Location", SourceLoc);
2856 }
2857}
2858
2860 DominatorTree *DT) {
2861 assert(TheLoop->contains(BB) && "Unknown block used");
2862
2863 // Blocks that do not dominate the latch need predication.
2864 const BasicBlock *Latch = TheLoop->getLoopLatch();
2865 return !DT->dominates(BB, Latch);
2866}
2867
2869LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2870 assert(!Report && "Multiple reports generated");
2871
2872 const BasicBlock *CodeRegion = TheLoop->getHeader();
2873 DebugLoc DL = TheLoop->getStartLoc();
2874
2875 if (I) {
2876 CodeRegion = I->getParent();
2877 // If there is no debug location attached to the instruction, revert back to
2878 // using the loop's.
2879 if (I->getDebugLoc())
2880 DL = I->getDebugLoc();
2881 }
2882
2883 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName,
2884 DL, CodeRegion);
2885 return *Report;
2886}
2887
2889 auto *SE = PSE->getSE();
2890 if (TheLoop->isLoopInvariant(V))
2891 return true;
2892 if (!SE->isSCEVable(V->getType()))
2893 return false;
2894 const SCEV *S = SE->getSCEV(V);
2895 return SE->isLoopInvariant(S, TheLoop);
2896}
2897
2898/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand.
2899/// Otherwise, return \p Ptr.
2901 Loop *Lp) {
2903 if (!GEP)
2904 return Ptr;
2905
2906 Value *V = Ptr;
2907 for (const Use &U : GEP->operands()) {
2908 if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) {
2909 if (V == Ptr)
2910 V = U;
2911 else
2912 // There must be exactly one loop-variant operand.
2913 return Ptr;
2914 }
2915 }
2916 return V;
2917}
2918
2919/// Get the stride of a pointer access in a loop. Looks for symbolic
2920/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2922 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2923 if (!PtrTy)
2924 return nullptr;
2925
2926 // Try to remove a gep instruction to make the pointer (actually index at this
2927 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2928 // pointer, otherwise, we are analyzing the index.
2929 Value *OrigPtr = Ptr;
2930
2931 Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp);
2932 const SCEV *V = SE->getSCEV(Ptr);
2933
2934 if (Ptr != OrigPtr)
2935 // Strip off casts.
2936 while (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2937 V = C->getOperand();
2938
2940 return nullptr;
2941
2942 // Note that the restriction after this loop invariant check are only
2943 // profitability restrictions.
2944 if (!SE->isLoopInvariant(V, Lp))
2945 return nullptr;
2946
2947 // Look for the loop invariant symbolic value.
2948 if (isa<SCEVUnknown>(V))
2949 return V;
2950
2951 if (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2952 if (isa<SCEVUnknown>(C->getOperand()))
2953 return V;
2954
2955 return nullptr;
2956}
2957
2958void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2959 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2960 if (!Ptr)
2961 return;
2962
2963 // Note: getStrideFromPointer is a *profitability* heuristic. We
2964 // could broaden the scope of values returned here - to anything
2965 // which happens to be loop invariant and contributes to the
2966 // computation of an interesting IV - but we chose not to as we
2967 // don't have a cost model here, and broadening the scope exposes
2968 // far too many unprofitable cases.
2969 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2970 if (!StrideExpr)
2971 return;
2972
2973 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2974 "versioning:");
2975 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2976
2977 if (!SpeculateUnitStride) {
2978 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2979 return;
2980 }
2981
2982 // Avoid adding the "Stride == 1" predicate when we know that
2983 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2984 // or zero iteration loop, as Trip-Count <= Stride == 1.
2985 //
2986 // TODO: We are currently not making a very informed decision on when it is
2987 // beneficial to apply stride versioning. It might make more sense that the
2988 // users of this analysis (such as the vectorizer) will trigger it, based on
2989 // their specific cost considerations; For example, in cases where stride
2990 // versioning does not help resolving memory accesses/dependences, the
2991 // vectorizer should evaluate the cost of the runtime test, and the benefit
2992 // of various possible stride specializations, considering the alternatives
2993 // of using gather/scatters (if available).
2994
2995 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2996
2997 // Match the types so we can compare the stride and the MaxBTC.
2998 // The Stride can be positive/negative, so we sign extend Stride;
2999 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
3000 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
3001 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3002 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
3003 const SCEV *CastedStride = StrideExpr;
3004 const SCEV *CastedBECount = MaxBTC;
3005 ScalarEvolution *SE = PSE->getSE();
3006 if (BETypeSizeBits >= StrideTypeSizeBits)
3007 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
3008 else
3009 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
3010 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3011 // Since TripCount == BackEdgeTakenCount + 1, checking:
3012 // "Stride >= TripCount" is equivalent to checking:
3013 // Stride - MaxBTC> 0
3014 if (SE->isKnownPositive(StrideMinusBETaken)) {
3015 LLVM_DEBUG(
3016 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3017 "Stride==1 predicate will imply that the loop executes "
3018 "at most once.\n");
3019 return;
3020 }
3021 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3022
3023 // Strip back off the integer cast, and check that our result is a
3024 // SCEVUnknown as we expect.
3025 const SCEV *StrideBase = StrideExpr;
3026 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3027 StrideBase = C->getOperand();
3028 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3029}
3030
3032 const TargetTransformInfo *TTI,
3033 const TargetLibraryInfo *TLI, AAResults *AA,
3034 DominatorTree *DT, LoopInfo *LI,
3035 AssumptionCache *AC, bool AllowPartial)
3036 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3037 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3038 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3039 if (TTI && !TTI->enableScalableVectorization())
3040 // Scale the vector width by 2 as rough estimate to also consider
3041 // interleaving.
3042 MaxTargetVectorWidthInBits =
3043 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;
3044
3045 DepChecker = std::make_unique<MemoryDepChecker>(
3046 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3047 PtrRtChecking =
3048 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3049 if (canAnalyzeLoop())
3050 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3051}
3052
3053void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3054 if (CanVecMem) {
3055 OS.indent(Depth) << "Memory dependences are safe";
3056 const MemoryDepChecker &DC = getDepChecker();
3057 if (!DC.isSafeForAnyVectorWidth())
3058 OS << " with a maximum safe vector width of "
3059 << DC.getMaxSafeVectorWidthInBits() << " bits";
3062 OS << ", with a maximum safe store-load forward width of " << SLDist
3063 << " bits";
3064 }
3065 if (PtrRtChecking->Need)
3066 OS << " with run-time checks";
3067 OS << "\n";
3068 }
3069
3070 if (HasConvergentOp)
3071 OS.indent(Depth) << "Has convergent operation in loop\n";
3072
3073 if (Report)
3074 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3075
3076 if (auto *Dependences = DepChecker->getDependences()) {
3077 OS.indent(Depth) << "Dependences:\n";
3078 for (const auto &Dep : *Dependences) {
3079 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3080 OS << "\n";
3081 }
3082 } else
3083 OS.indent(Depth) << "Too many dependences, not recorded\n";
3084
3085 // List the pair of accesses need run-time checks to prove independence.
3086 PtrRtChecking->print(OS, Depth);
3087 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3088 OS.indent(Depth) << "Generated run-time checks are incomplete\n";
3089 OS << "\n";
3090
3091 OS.indent(Depth)
3092 << "Non vectorizable stores to invariant address were "
3093 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3094 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3095 ? ""
3096 : "not ")
3097 << "found in loop.\n";
3098
3099 OS.indent(Depth) << "SCEV assumptions:\n";
3100 PSE->getPredicate().print(OS, Depth);
3101
3102 OS << "\n";
3103
3104 OS.indent(Depth) << "Expressions re-written:\n";
3105 PSE->print(OS, Depth);
3106}
3107
3109 bool AllowPartial) {
3110 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3111
3112 // We need to create the LoopAccessInfo if either we don't already have one,
3113 // or if it was created with a different value of AllowPartial.
3114 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3115 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3116 &LI, AC, AllowPartial);
3117
3118 return *It->second;
3119}
3121 // Collect LoopAccessInfo entries that may keep references to IR outside the
3122 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3123 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3124 // SCEVs, e.g. for pointer expressions.
3125 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3126 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3127 LAI->getPSE().getPredicate().isAlwaysTrue())
3128 continue;
3129 LoopAccessInfoMap.erase(L);
3130 }
3131}
3132
3134 Function &F, const PreservedAnalyses &PA,
3135 FunctionAnalysisManager::Invalidator &Inv) {
3136 // Check whether our analysis is preserved.
3137 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3138 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3139 // If not, give up now.
3140 return true;
3141
3142 // Check whether the analyses we depend on became invalid for any reason.
3143 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3144 // invalid.
3145 return Inv.invalidate<AAManager>(F, PA) ||
3146 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3147 Inv.invalidate<LoopAnalysis>(F, PA) ||
3148 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3149}
3150
3153 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3154 auto &AA = FAM.getResult<AAManager>(F);
3155 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3156 auto &LI = FAM.getResult<LoopAnalysis>(F);
3157 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3158 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3159 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
3160 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI, &AC);
3161}
3162
3163AnalysisKey LoopAccessAnalysis::Key;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
APInt abs() const
Get the absolute value.
Definition APInt.h:1795
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1574
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isNegative() const
Definition Constants.h:209
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
bool empty() const
Definition Function.h:857
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
static LLVM_ABI bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:570
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition LoopInfo.cpp:679
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition LoopInfo.cpp:577
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:644
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1443
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:226
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:816
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:649
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:330
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:843
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:294
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:738
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2138
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1970
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
TargetTransformInfo TTI
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1760
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:280
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:851
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1455