LLVM 23.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <variant>
67#include <vector>
68
69using namespace llvm;
70using namespace llvm::SCEVPatternMatch;
71
72#define DEBUG_TYPE "loop-accesses"
73
75VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
79
81VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
87
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
94
95/// The maximum iterations used to merge memory checks
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
100 cl::init(100));
101
102/// Maximum SIMD width.
103const unsigned VectorizerParams::MaxVectorWidth = 64;
104
105/// We collect dependences up to this threshold.
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
110 cl::init(100));
111
112/// This enables versioning on the strides of symbolically striding memory
113/// accesses in code like the following.
114/// for (i = 0; i < N; ++i)
115/// A[i * Stride1] += B[i * Stride2] ...
116///
117/// Will be roughly translated to
118/// if (Stride1 == 1 && Stride2 == 1) {
119/// for (i = 0; i < N; i+=4)
120/// A[i:i+3] += ...
121/// } else
122/// ...
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
126
127/// Enable store-to-load forwarding conflict detection. This option can
128/// be disabled for correctness testing.
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
132 cl::init(true));
133
135 "max-forked-scev-depth", cl::Hidden,
136 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
137 cl::init(5));
138
140 "laa-speculate-unit-stride", cl::Hidden,
141 cl::desc("Speculate that non-constant strides are unit in LAA"),
142 cl::init(true));
143
145 "hoist-runtime-checks", cl::Hidden,
146 cl::desc(
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
150
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
153}
154
156 const DenseMap<Value *, const SCEV *> &PtrToStride,
157 Value *Ptr) {
158 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
159
160 // If there is an entry in the map return the SCEV of the pointer with the
161 // symbolic stride replaced by one.
162 const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
163 if (!StrideSCEV)
164 // For a non-symbolic stride, just return the original expression.
165 return OrigSCEV;
166
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
194/// return nullptr. \p A and \p B must have the same type.
195static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
196 ScalarEvolution &SE) {
197 if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
198 return nullptr;
199 return SE.getAddExpr(A, B);
200}
201
202/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
203/// return nullptr. \p A and \p B must have the same type.
204static const SCEV *mulSCEVOverflow(const SCEV *A, const SCEV *B,
205 ScalarEvolution &SE) {
206 if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
207 return nullptr;
208 return SE.getMulExpr(A, B);
209}
210
211/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
212/// \p MaxBTC is guaranteed inbounds of the accessed object.
214 const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
216 AssumptionCache *AC,
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
218 auto *PointerBase = SE.getPointerBase(AR->getStart());
219 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
220 if (!StartPtr)
221 return false;
222 const Loop *L = AR->getLoop();
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
225 uint64_t DerefBytes = StartPtrV->getPointerDereferenceableBytes(
226 DL, CheckForNonNull, CheckForFreed);
227
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
229 return false;
230
231 const SCEV *Step = AR->getStepRecurrence(SE);
232 Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
233 const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);
234
235 // Check if we have a suitable dereferencable assumption we can use.
236 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
237 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
238 if (isa<BranchInst>(LoopPred->getTerminator()))
239 CtxI = LoopPred->getTerminator();
240 }
241 RetainedKnowledge DerefRK;
242 getKnowledgeForValue(StartPtrV, {Attribute::Dereferenceable}, *AC,
243 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
244 if (!isValidAssumeForContext(Assume, CtxI, DT))
245 return false;
246 if (StartPtrV->canBeFreed() &&
247 !willNotFreeBetween(Assume, CtxI))
248 return false;
249 DerefRK = std::max(DerefRK, RK);
250 return true;
251 });
252 if (DerefRK) {
253 DerefBytesSCEV =
254 SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue));
255 }
256
257 if (DerefBytesSCEV->isZero())
258 return false;
259
260 bool IsKnownNonNegative = SE.isKnownNonNegative(Step);
261 if (!IsKnownNonNegative && !SE.isKnownNegative(Step))
262 return false;
263
264 Step = SE.getNoopOrSignExtend(Step, WiderTy);
265 MaxBTC = SE.getNoopOrZeroExtend(MaxBTC, WiderTy);
266
267 // For the computations below, make sure they don't unsigned wrap.
268 if (!SE.isKnownPredicate(CmpInst::ICMP_UGE, AR->getStart(), StartPtr))
269 return false;
270 const SCEV *StartOffset = SE.getNoopOrZeroExtend(
271 SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);
272
273 if (!LoopGuards)
274 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));
275 MaxBTC = SE.applyLoopGuards(MaxBTC, *LoopGuards);
276
277 const SCEV *OffsetAtLastIter =
278 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
279 if (!OffsetAtLastIter) {
280 // Re-try with constant max backedge-taken count if using the symbolic one
281 // failed.
282 MaxBTC = SE.getConstantMaxBackedgeTakenCount(AR->getLoop());
283 if (isa<SCEVCouldNotCompute>(MaxBTC))
284 return false;
285 MaxBTC = SE.getNoopOrZeroExtend(
286 MaxBTC, WiderTy);
287 OffsetAtLastIter =
288 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
289 if (!OffsetAtLastIter)
290 return false;
291 }
292
293 const SCEV *OffsetEndBytes = addSCEVNoOverflow(
294 OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
295 if (!OffsetEndBytes)
296 return false;
297
298 if (IsKnownNonNegative) {
299 // For positive steps, check if
300 // (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
301 // while making sure none of the computations unsigned wrap themselves.
302 const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
303 if (!EndBytes)
304 return false;
305
306 DerefBytesSCEV = SE.applyLoopGuards(DerefBytesSCEV, *LoopGuards);
307 return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
308 }
309
310 // For negative steps check if
311 // * StartOffset >= (MaxBTC * Step + EltSize)
312 // * StartOffset <= DerefBytes.
313 assert(SE.isKnownNegative(Step) && "must be known negative");
314 return SE.isKnownPredicate(CmpInst::ICMP_SGE, StartOffset, OffsetEndBytes) &&
315 SE.isKnownPredicate(CmpInst::ICMP_ULE, StartOffset, DerefBytesSCEV);
316}
317
318std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
319 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
320 const SCEV *MaxBTC, ScalarEvolution *SE,
321 DenseMap<std::pair<const SCEV *, Type *>,
322 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
324 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
325 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
326 if (PointerBounds) {
327 auto [Iter, Ins] = PointerBounds->insert(
328 {{PtrExpr, AccessTy},
329 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
330 if (!Ins)
331 return Iter->second;
332 PtrBoundsPair = &Iter->second;
333 }
334
335 const SCEV *ScStart;
336 const SCEV *ScEnd;
337
338 auto &DL = Lp->getHeader()->getDataLayout();
339 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
340 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
341 if (SE->isLoopInvariant(PtrExpr, Lp)) {
342 ScStart = ScEnd = PtrExpr;
343 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
344 ScStart = AR->getStart();
345 if (!isa<SCEVCouldNotCompute>(BTC))
346 // Evaluating AR at an exact BTC is safe: LAA separately checks that
347 // accesses cannot wrap in the loop. If evaluating AR at BTC wraps, then
348 // the loop either triggers UB when executing a memory access with a
349 // poison pointer or the wrapping/poisoned pointer is not used.
350 ScEnd = AR->evaluateAtIteration(BTC, *SE);
351 else {
352 // Evaluating AR at MaxBTC may wrap and create an expression that is less
353 // than the start of the AddRec due to wrapping (for example consider
354 // MaxBTC = -2). If that's the case, set ScEnd to -(EltSize + 1). ScEnd
355 // will get incremented by EltSize before returning, so this effectively
356 // sets ScEnd to the maximum unsigned value for the type. Note that LAA
357 // separately checks that accesses cannot not wrap, so unsigned max
358 // represents an upper bound.
359 if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
360 DT, AC, LoopGuards)) {
361 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
362 } else {
363 ScEnd = SE->getAddExpr(
364 SE->getNegativeSCEV(EltSizeSCEV),
367 AR->getType())));
368 }
369 }
370 const SCEV *Step = AR->getStepRecurrence(*SE);
371
372 // For expressions with negative step, the upper bound is ScStart and the
373 // lower bound is ScEnd.
374 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
375 if (CStep->getValue()->isNegative())
376 std::swap(ScStart, ScEnd);
377 } else {
378 // Fallback case: the step is not constant, but we can still
379 // get the upper and lower bounds of the interval by using min/max
380 // expressions.
381 ScStart = SE->getUMinExpr(ScStart, ScEnd);
382 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
383 }
384 } else
385 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
386
387 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
388 assert(SE->isLoopInvariant(ScEnd, Lp) && "ScEnd needs to be invariant");
389
390 // Add the size of the pointed element to ScEnd.
391 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
392
393 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
394 if (PointerBounds)
395 *PtrBoundsPair = Res;
396 return Res;
397}
398
399/// Calculate Start and End points of memory access using
400/// getStartAndEndForAccess.
401void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
402 Type *AccessTy, bool WritePtr,
403 unsigned DepSetId, unsigned ASId,
405 bool NeedsFreeze) {
406 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
407 const SCEV *BTC = PSE.getBackedgeTakenCount();
408 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
409 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
410 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
412 !isa<SCEVCouldNotCompute>(ScEnd) &&
413 "must be able to compute both start and end expressions");
414 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
415 NeedsFreeze);
416}
417
418bool RuntimePointerChecking::tryToCreateDiffCheck(
419 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
420 // If either group contains multiple different pointers, bail out.
421 // TODO: Support multiple pointers by using the minimum or maximum pointer,
422 // depending on src & sink.
423 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
424 return false;
425
426 const PointerInfo *Src = &Pointers[CGI.Members[0]];
427 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
428
429 // If either pointer is read and written, multiple checks may be needed. Bail
430 // out.
431 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
432 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
433 return false;
434
435 ArrayRef<unsigned> AccSrc =
436 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
437 ArrayRef<unsigned> AccSink =
438 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
439 // If either pointer is accessed multiple times, there may not be a clear
440 // src/sink relation. Bail out for now.
441 if (AccSrc.size() != 1 || AccSink.size() != 1)
442 return false;
443
444 // If the sink is accessed before src, swap src/sink.
445 if (AccSink[0] < AccSrc[0])
446 std::swap(Src, Sink);
447
448 const SCEVConstant *Step;
449 const SCEV *SrcStart;
450 const SCEV *SinkStart;
451 const Loop *InnerLoop = DC.getInnermostLoop();
452 if (!match(Src->Expr,
454 m_SpecificLoop(InnerLoop))) ||
455 !match(Sink->Expr,
457 m_SpecificLoop(InnerLoop))))
458 return false;
459
461 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
463 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
464 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
465 Type *DstTy = getLoadStoreType(SinkInsts[0]);
467 return false;
468
469 const DataLayout &DL = InnerLoop->getHeader()->getDataLayout();
470 unsigned AllocSize =
471 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
472
473 // Only matching constant steps matching the AllocSize are supported at the
474 // moment. This simplifies the difference computation. Can be extended in the
475 // future.
476 if (Step->getAPInt().abs() != AllocSize)
477 return false;
478
479 IntegerType *IntTy =
480 IntegerType::get(Src->PointerValue->getContext(),
481 DL.getPointerSizeInBits(CGI.AddressSpace));
482
483 // When counting down, the dependence distance needs to be swapped.
484 if (Step->getValue()->isNegative())
485 std::swap(SinkStart, SrcStart);
486
487 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkStart, IntTy);
488 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcStart, IntTy);
489 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
490 isa<SCEVCouldNotCompute>(SrcStartInt))
491 return false;
492
493 // If the start values for both Src and Sink also vary according to an outer
494 // loop, then it's probably better to avoid creating diff checks because
495 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
496 // do the expanded full range overlap checks, which can be hoisted.
497 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
498 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
499 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
500 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
501 const Loop *StartARLoop = SrcStartAR->getLoop();
502 if (StartARLoop == SinkStartAR->getLoop() &&
503 StartARLoop == InnerLoop->getParentLoop() &&
504 // If the diff check would already be loop invariant (due to the
505 // recurrences being the same), then we prefer to keep the diff checks
506 // because they are cheaper.
507 SrcStartAR->getStepRecurrence(*SE) !=
508 SinkStartAR->getStepRecurrence(*SE)) {
509 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
510 "cannot be hoisted out of the outer loop\n");
511 return false;
512 }
513 }
514
515 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
516 << "SrcStart: " << *SrcStartInt << '\n'
517 << "SinkStartInt: " << *SinkStartInt << '\n');
518 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
519 Src->NeedsFreeze || Sink->NeedsFreeze);
520 return true;
521}
522
524 SmallVector<RuntimePointerCheck, 4> Checks;
525
526 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
527 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
530
531 if (needsChecking(CGI, CGJ)) {
532 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
533 Checks.emplace_back(&CGI, &CGJ);
534 }
535 }
536 }
537 return Checks;
538}
539
541 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
542 assert(Checks.empty() && "Checks is not empty");
543 groupChecks(DepCands, UseDependencies);
544 Checks = generateChecks();
545}
546
548 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
549 for (const auto &I : M.Members)
550 for (const auto &J : N.Members)
551 if (needsChecking(I, J))
552 return true;
553 return false;
554}
555
556/// Compare \p I and \p J and return the minimum.
557/// Return nullptr in case we couldn't find an answer.
558static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
559 ScalarEvolution *SE) {
560 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
561 if (!Diff)
562 return nullptr;
563 return Diff->isNegative() ? J : I;
564}
565
567 unsigned Index, const RuntimePointerChecking &RtCheck) {
568 return addPointer(
569 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
570 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
571 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
572}
573
574bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
575 const SCEV *End, unsigned AS,
576 bool NeedsFreeze,
577 ScalarEvolution &SE) {
578 assert(AddressSpace == AS &&
579 "all pointers in a checking group must be in the same address space");
580
581 // Compare the starts and ends with the known minimum and maximum
582 // of this set. We need to know how we compare against the min/max
583 // of the set in order to be able to emit memchecks.
584 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
585 if (!Min0)
586 return false;
587
588 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
589 if (!Min1)
590 return false;
591
592 // Update the low bound expression if we've found a new min value.
593 if (Min0 == Start)
594 Low = Start;
595
596 // Update the high bound expression if we've found a new max value.
597 if (Min1 != End)
598 High = End;
599
600 Members.push_back(Index);
601 this->NeedsFreeze |= NeedsFreeze;
602 return true;
603}
604
605void RuntimePointerChecking::groupChecks(
606 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
607 // We build the groups from dependency candidates equivalence classes
608 // because:
609 // - We know that pointers in the same equivalence class share
610 // the same underlying object and therefore there is a chance
611 // that we can compare pointers
612 // - We wouldn't be able to merge two pointers for which we need
613 // to emit a memcheck. The classes in DepCands are already
614 // conveniently built such that no two pointers in the same
615 // class need checking against each other.
616
617 // We use the following (greedy) algorithm to construct the groups
618 // For every pointer in the equivalence class:
619 // For each existing group:
620 // - if the difference between this pointer and the min/max bounds
621 // of the group is a constant, then make the pointer part of the
622 // group and update the min/max bounds of that group as required.
623
624 CheckingGroups.clear();
625
626 // If we need to check two pointers to the same underlying object
627 // with a non-constant difference, we shouldn't perform any pointer
628 // grouping with those pointers. This is because we can easily get
629 // into cases where the resulting check would return false, even when
630 // the accesses are safe.
631 //
632 // The following example shows this:
633 // for (i = 0; i < 1000; ++i)
634 // a[5000 + i * m] = a[i] + a[i + 9000]
635 //
636 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
637 // (0, 10000) which is always false. However, if m is 1, there is no
638 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
639 // us to perform an accurate check in this case.
640 //
641 // In the above case, we have a non-constant distance and an Unknown
642 // dependence between accesses to the same underlying object, and could retry
643 // with runtime checks. Therefore UseDependencies is false. In this case we
644 // will use the fallback path and create separate checking groups for all
645 // pointers.
646
647 // If we don't have the dependency partitions, construct a new
648 // checking pointer group for each pointer. This is also required
649 // for correctness, because in this case we can have checking between
650 // pointers to the same underlying object.
651 if (!UseDependencies) {
652 for (unsigned I = 0; I < Pointers.size(); ++I)
653 CheckingGroups.emplace_back(I, *this);
654 return;
655 }
656
657 unsigned TotalComparisons = 0;
658
660 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
661 PositionMap[Pointers[Index].PointerValue].push_back(Index);
662
663 // We need to keep track of what pointers we've already seen so we
664 // don't process them twice.
666
667 // Go through all equivalence classes, get the "pointer check groups"
668 // and add them to the overall solution. We use the order in which accesses
669 // appear in 'Pointers' to enforce determinism.
670 for (unsigned I = 0; I < Pointers.size(); ++I) {
671 // We've seen this pointer before, and therefore already processed
672 // its equivalence class.
673 if (Seen.contains(I))
674 continue;
675
677 Pointers[I].IsWritePtr);
678
680
681 // Because DepCands is constructed by visiting accesses in the order in
682 // which they appear in alias sets (which is deterministic) and the
683 // iteration order within an equivalence class member is only dependent on
684 // the order in which unions and insertions are performed on the
685 // equivalence class, the iteration order is deterministic.
686 for (auto M : DepCands.members(Access)) {
687 auto PointerI = PositionMap.find(M.getPointer());
688 // If we can't find the pointer in PositionMap that means we can't
689 // generate a memcheck for it.
690 if (PointerI == PositionMap.end())
691 continue;
692 for (unsigned Pointer : PointerI->second) {
693 bool Merged = false;
694 // Mark this pointer as seen.
695 Seen.insert(Pointer);
696
697 // Go through all the existing sets and see if we can find one
698 // which can include this pointer.
699 for (RuntimeCheckingPtrGroup &Group : Groups) {
700 // Don't perform more than a certain amount of comparisons.
701 // This should limit the cost of grouping the pointers to something
702 // reasonable. If we do end up hitting this threshold, the algorithm
703 // will create separate groups for all remaining pointers.
704 if (TotalComparisons > MemoryCheckMergeThreshold)
705 break;
706
707 TotalComparisons++;
708
709 if (Group.addPointer(Pointer, *this)) {
710 Merged = true;
711 break;
712 }
713 }
714
715 if (!Merged)
716 // We couldn't add this pointer to any existing set or the threshold
717 // for the number of comparisons has been reached. Create a new group
718 // to hold the current pointer.
719 Groups.emplace_back(Pointer, *this);
720 }
721 }
722
723 // We've computed the grouped checks for this partition.
724 // Save the results and continue with the next one.
726 }
727}
728
730 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
731 unsigned PtrIdx2) {
732 return (PtrToPartition[PtrIdx1] != -1 &&
733 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
734}
735
736bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
737 const PointerInfo &PointerI = Pointers[I];
738 const PointerInfo &PointerJ = Pointers[J];
739
740 // No need to check if two readonly pointers intersect.
741 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
742 return false;
743
744 // Only need to check pointers between two different dependency sets.
745 if (PointerI.DependencySetId == PointerJ.DependencySetId)
746 return false;
747
748 // Only need to check pointers in the same alias set.
749 return PointerI.AliasSetId == PointerJ.AliasSetId;
750}
751
752/// Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
756 for (const auto &[Idx, CG] : enumerate(CheckingGroups))
757 PtrIndices[&CG] = Idx;
758 return PtrIndices;
759}
760
763 unsigned Depth) const {
764 unsigned N = 0;
765 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
766 for (const auto &[Check1, Check2] : Checks) {
767 const auto &First = Check1->Members, &Second = Check2->Members;
768 OS.indent(Depth) << "Check " << N++ << ":\n";
769 OS.indent(Depth + 2) << "Comparing group GRP" << PtrIndices.at(Check1)
770 << ":\n";
771 for (unsigned K : First)
772 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
773 OS.indent(Depth + 2) << "Against group GRP" << PtrIndices.at(Check2)
774 << ":\n";
775 for (unsigned K : Second)
776 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
777 }
778}
779
781
782 OS.indent(Depth) << "Run-time memory checks:\n";
783 printChecks(OS, Checks, Depth);
784
785 OS.indent(Depth) << "Grouped accesses:\n";
786 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
787 for (const auto &CG : CheckingGroups) {
788 OS.indent(Depth + 2) << "Group GRP" << PtrIndices.at(&CG) << ":\n";
789 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
790 << ")\n";
791 for (unsigned Member : CG.Members) {
792 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
793 }
794 }
795}
796
797namespace {
798
799/// Analyses memory accesses in a loop.
800///
801/// Checks whether run time pointer checks are needed and builds sets for data
802/// dependence checking.
803class AccessAnalysis {
804public:
805 /// Read or write access location.
806 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
807 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
808
809 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
812 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
813 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DT(DT), DepCands(DA),
814 PSE(PSE), LoopAliasScopes(LoopAliasScopes) {
815 // We're analyzing dependences across loop iterations.
816 BAA.enableCrossIterationMode();
817 }
818
819 /// Register a load and whether it is only read from.
820 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
821 Value *Ptr = const_cast<Value *>(Loc.Ptr);
822 AST.add(adjustLoc(Loc));
823 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
824 if (IsReadOnly)
825 ReadOnlyPtr.insert(Ptr);
826 }
827
828 /// Register a store.
829 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
830 Value *Ptr = const_cast<Value *>(Loc.Ptr);
831 AST.add(adjustLoc(Loc));
832 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
833 }
834
835 /// Check if we can emit a run-time no-alias check for \p Access.
836 ///
837 /// Returns true if we can emit a run-time no alias check for \p Access.
838 /// If we can check this access, this also adds it to a dependence set and
839 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
840 /// we will attempt to use additional run-time checks in order to get
841 /// the bounds of the pointer.
842 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
843 MemAccessInfo Access, Type *AccessTy,
844 const DenseMap<Value *, const SCEV *> &Strides,
845 DenseMap<Value *, unsigned> &DepSetId,
846 Loop *TheLoop, unsigned &RunningDepId,
847 unsigned ASId, bool Assume);
848
849 /// Check whether we can check the pointers at runtime for
850 /// non-intersection.
851 ///
852 /// Returns true if we need no check or if we do and we can generate them
853 /// (i.e. the pointers have computable bounds). A return value of false means
854 /// we couldn't analyze and generate runtime checks for all pointers in the
855 /// loop, but if \p AllowPartial is set then we will have checks for those
856 /// pointers we could analyze.
857 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
858 const DenseMap<Value *, const SCEV *> &Strides,
859 Value *&UncomputablePtr, bool AllowPartial);
860
861 /// Goes over all memory accesses, checks whether a RT check is needed
862 /// and builds sets of dependent accesses.
863 void buildDependenceSets() {
864 processMemAccesses();
865 }
866
867 /// Initial processing of memory accesses determined that we need to
868 /// perform dependency checking.
869 ///
870 /// Note that this can later be cleared if we retry memcheck analysis without
871 /// dependency checking (i.e. ShouldRetryWithRuntimeChecks).
872 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
873
874 /// We decided that no dependence analysis would be used. Reset the state.
875 void resetDepChecks(MemoryDepChecker &DepChecker) {
876 CheckDeps.clear();
877 DepChecker.clearDependences();
878 }
879
880 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
881
882private:
883 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
884
885 /// Adjust the MemoryLocation so that it represents accesses to this
886 /// location across all iterations, rather than a single one.
887 MemoryLocation adjustLoc(MemoryLocation Loc) const {
888 // The accessed location varies within the loop, but remains within the
889 // underlying object.
891 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
892 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
893 return Loc;
894 }
895
896 /// Drop alias scopes that are only valid within a single loop iteration.
897 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
898 if (!ScopeList)
899 return nullptr;
900
901 // For the sake of simplicity, drop the whole scope list if any scope is
902 // iteration-local.
903 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
904 return LoopAliasScopes.contains(cast<MDNode>(Scope));
905 }))
906 return nullptr;
907
908 return ScopeList;
909 }
910
911 /// Go over all memory access and check whether runtime pointer checks
912 /// are needed and build sets of dependency check candidates.
913 void processMemAccesses();
914
915 /// Map of all accesses. Values are the types used to access memory pointed to
916 /// by the pointer.
917 PtrAccessMap Accesses;
918
919 /// The loop being checked.
920 const Loop *TheLoop;
921
922 /// List of accesses that need a further dependence check.
923 MemAccessInfoList CheckDeps;
924
925 /// Set of pointers that are read only.
926 SmallPtrSet<Value*, 16> ReadOnlyPtr;
927
928 /// Batched alias analysis results.
929 BatchAAResults BAA;
930
931 /// An alias set tracker to partition the access set by underlying object and
932 //intrinsic property (such as TBAA metadata).
933 AliasSetTracker AST;
934
935 /// The LoopInfo of the loop being checked.
936 const LoopInfo *LI;
937
938 /// The dominator tree of the function.
939 DominatorTree &DT;
940
941 /// Sets of potentially dependent accesses - members of one set share an
942 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
943 /// dependence check.
945
946 /// Initial processing of memory accesses determined that we may need
947 /// to add memchecks. Perform the analysis to determine the necessary checks.
948 ///
949 /// Note that, this is different from isDependencyCheckNeeded. When we retry
950 /// memcheck analysis without dependency checking
951 /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is
952 /// cleared while this remains set if we have potentially dependent accesses.
953 bool IsRTCheckAnalysisNeeded = false;
954
955 /// The SCEV predicate containing all the SCEV-related assumptions.
956 PredicatedScalarEvolution &PSE;
957
958 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
959
960 /// Alias scopes that are declared inside the loop, and as such not valid
961 /// across iterations.
962 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
963};
964
965} // end anonymous namespace
966
967/// Try to compute a constant stride for \p AR. Used by getPtrStride and
968/// isNoWrap.
969static std::optional<int64_t>
970getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
971 Value *Ptr, PredicatedScalarEvolution &PSE) {
972 if (isa<ScalableVectorType>(AccessTy)) {
973 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
974 << "\n");
975 return std::nullopt;
976 }
977
978 // The access function must stride over the innermost loop.
979 if (Lp != AR->getLoop()) {
980 LLVM_DEBUG({
981 dbgs() << "LAA: Bad stride - Not striding over innermost loop ";
982 if (Ptr)
983 dbgs() << *Ptr << " ";
984
985 dbgs() << "SCEV: " << *AR << "\n";
986 });
987 return std::nullopt;
988 }
989
990 // Check the step is constant.
991 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
992
993 // Calculate the pointer stride and check if it is constant.
994 const APInt *APStepVal;
995 if (!match(Step, m_scev_APInt(APStepVal))) {
996 LLVM_DEBUG({
997 dbgs() << "LAA: Bad stride - Not a constant strided ";
998 if (Ptr)
999 dbgs() << *Ptr << " ";
1000 dbgs() << "SCEV: " << *AR << "\n";
1001 });
1002 return std::nullopt;
1003 }
1004
1005 const auto &DL = Lp->getHeader()->getDataLayout();
1006 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1007 int64_t Size = AllocSize.getFixedValue();
1008
1009 // Huge step value - give up.
1010 std::optional<int64_t> StepVal = APStepVal->trySExtValue();
1011 if (!StepVal)
1012 return std::nullopt;
1013
1014 // Strided access.
1015 return *StepVal % Size ? std::nullopt : std::make_optional(*StepVal / Size);
1016}
1017
1018/// Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
1019/// informating from the IR pointer value to determine no-wrap.
1021 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
1022 const DominatorTree &DT,
1023 std::optional<int64_t> Stride = std::nullopt) {
1024 // FIXME: This should probably only return true for NUW.
1026 return true;
1027
1029 return true;
1030
1031 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1032 // the distance between the previously accessed location and the wrapped
1033 // location will be larger than half the pointer index type space. In that
1034 // case, the GEP would be poison and any memory access dependent on it would
1035 // be immediate UB when executed.
1037 GEP && GEP->hasNoUnsignedSignedWrap()) {
1038 // For the above reasoning to apply, the pointer must be dereferenced in
1039 // every iteration.
1040 if (L->getHeader() == L->getLoopLatch() ||
1041 any_of(GEP->users(), [L, &DT, GEP](User *U) {
1042 if (getLoadStorePointerOperand(U) != GEP)
1043 return false;
1044 BasicBlock *UserBB = cast<Instruction>(U)->getParent();
1045 if (!L->contains(UserBB))
1046 return false;
1047 return !LoopAccessInfo::blockNeedsPredication(UserBB, L, &DT);
1048 }))
1049 return true;
1050 }
1051
1052 if (!Stride)
1053 Stride = getStrideFromAddRec(AR, L, AccessTy, Ptr, PSE);
1054 if (Stride) {
1055 // If the null pointer is undefined, then a access sequence which would
1056 // otherwise access it can be assumed not to unsigned wrap. Note that this
1057 // assumes the object in memory is aligned to the natural alignment.
1058 unsigned AddrSpace = AR->getType()->getPointerAddressSpace();
1059 if (!NullPointerIsDefined(L->getHeader()->getParent(), AddrSpace) &&
1060 (Stride == 1 || Stride == -1))
1061 return true;
1062 }
1063
1064 if (Ptr && Assume) {
1066 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1067 << "LAA: Pointer: " << *Ptr << "\n"
1068 << "LAA: SCEV: " << *AR << "\n"
1069 << "LAA: Added an overflow assumption\n");
1070 return true;
1071 }
1072
1073 return false;
1074}
1075
1076static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
1077 function_ref<void(Value *)> AddPointer) {
1079 SmallVector<Value *> WorkList;
1080 WorkList.push_back(StartPtr);
1081
1082 while (!WorkList.empty()) {
1083 Value *Ptr = WorkList.pop_back_val();
1084 if (!Visited.insert(Ptr).second)
1085 continue;
1086 auto *PN = dyn_cast<PHINode>(Ptr);
1087 // SCEV does not look through non-header PHIs inside the loop. Such phis
1088 // can be analyzed by adding separate accesses for each incoming pointer
1089 // value.
1090 if (PN && InnermostLoop.contains(PN->getParent()) &&
1091 PN->getParent() != InnermostLoop.getHeader()) {
1092 llvm::append_range(WorkList, PN->incoming_values());
1093 } else
1094 AddPointer(Ptr);
1095 }
1096}
1097
1098// Walk back through the IR for a pointer, looking for a select like the
1099// following:
1100//
1101// %offset = select i1 %cmp, i64 %a, i64 %b
1102// %addr = getelementptr double, double* %base, i64 %offset
1103// %ld = load double, double* %addr, align 8
1104//
1105// We won't be able to form a single SCEVAddRecExpr from this since the
1106// address for each loop iteration depends on %cmp. We could potentially
1107// produce multiple valid SCEVAddRecExprs, though, and check all of them for
1108// memory safety/aliasing if needed.
1109//
1110// If we encounter some IR we don't yet handle, or something obviously fine
1111// like a constant, then we just add the SCEV for that term to the list passed
1112// in by the caller. If we have a node that may potentially yield a valid
1113// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
1114// ourselves before adding to the list.
1116 ScalarEvolution *SE, const Loop *L, Value *Ptr,
1118 unsigned Depth) {
1119 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
1120 // we've exceeded our limit on recursion, just return whatever we have
1121 // regardless of whether it can be used for a forked pointer or not, along
1122 // with an indication of whether it might be a poison or undef value.
1123 const SCEV *Scev = SE->getSCEV(Ptr);
1124 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
1125 !isa<Instruction>(Ptr) || Depth == 0) {
1126 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1127 return;
1128 }
1129
1130 Depth--;
1131
1132 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
1133 return get<1>(S);
1134 };
1135
1136 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
1137 switch (Opcode) {
1138 case Instruction::Add:
1139 return SE->getAddExpr(L, R);
1140 case Instruction::Sub:
1141 return SE->getMinusSCEV(L, R);
1142 default:
1143 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
1144 }
1145 };
1146
1148 unsigned Opcode = I->getOpcode();
1149 switch (Opcode) {
1150 case Instruction::GetElementPtr: {
1151 auto *GEP = cast<GetElementPtrInst>(I);
1152 Type *SourceTy = GEP->getSourceElementType();
1153 // We only handle base + single offset GEPs here for now.
1154 // Not dealing with preexisting gathers yet, so no vectors.
1155 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
1156 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
1157 break;
1158 }
1161 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
1162 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
1163
1164 // See if we need to freeze our fork...
1165 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
1166 any_of(OffsetScevs, UndefPoisonCheck);
1167
1168 // Check that we only have a single fork, on either the base or the offset.
1169 // Copy the SCEV across for the one without a fork in order to generate
1170 // the full SCEV for both sides of the GEP.
1171 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
1172 BaseScevs.push_back(BaseScevs[0]);
1173 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
1174 OffsetScevs.push_back(OffsetScevs[0]);
1175 else {
1176 ScevList.emplace_back(Scev, NeedsFreeze);
1177 break;
1178 }
1179
1180 Type *IntPtrTy = SE->getEffectiveSCEVType(GEP->getPointerOperandType());
1181
1182 // Find the size of the type being pointed to. We only have a single
1183 // index term (guarded above) so we don't need to index into arrays or
1184 // structures, just get the size of the scalar value.
1185 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
1186
1187 for (auto [B, O] : zip(BaseScevs, OffsetScevs)) {
1188 const SCEV *Base = get<0>(B);
1189 const SCEV *Offset = get<0>(O);
1190
1191 // Scale up the offsets by the size of the type, then add to the bases.
1192 const SCEV *Scaled =
1193 SE->getMulExpr(Size, SE->getTruncateOrSignExtend(Offset, IntPtrTy));
1194 ScevList.emplace_back(SE->getAddExpr(Base, Scaled), NeedsFreeze);
1195 }
1196 break;
1197 }
1198 case Instruction::Select: {
1200 // A select means we've found a forked pointer, but we currently only
1201 // support a single select per pointer so if there's another behind this
1202 // then we just bail out and return the generic SCEV.
1203 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1204 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
1205 if (ChildScevs.size() == 2)
1206 append_range(ScevList, ChildScevs);
1207 else
1208 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1209 break;
1210 }
1211 case Instruction::PHI: {
1213 // A phi means we've found a forked pointer, but we currently only
1214 // support a single phi per pointer so if there's another behind this
1215 // then we just bail out and return the generic SCEV.
1216 if (I->getNumOperands() == 2) {
1217 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
1218 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1219 }
1220 if (ChildScevs.size() == 2)
1221 append_range(ScevList, ChildScevs);
1222 else
1223 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1224 break;
1225 }
1226 case Instruction::Add:
1227 case Instruction::Sub: {
1230 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1231 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1232
1233 // See if we need to freeze our fork...
1234 bool NeedsFreeze =
1235 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1236
1237 // Check that we only have a single fork, on either the left or right side.
1238 // Copy the SCEV across for the one without a fork in order to generate
1239 // the full SCEV for both sides of the BinOp.
1240 if (LScevs.size() == 2 && RScevs.size() == 1)
1241 RScevs.push_back(RScevs[0]);
1242 else if (RScevs.size() == 2 && LScevs.size() == 1)
1243 LScevs.push_back(LScevs[0]);
1244 else {
1245 ScevList.emplace_back(Scev, NeedsFreeze);
1246 break;
1247 }
1248
1249 for (auto [L, R] : zip(LScevs, RScevs))
1250 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1251 NeedsFreeze);
1252 break;
1253 }
1254 default:
1255 // Just return the current SCEV if we haven't handled the instruction yet.
1256 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1257 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1258 break;
1259 }
1260}
1261
1262bool AccessAnalysis::createCheckForAccess(
1263 RuntimePointerChecking &RtCheck, MemAccessInfo Access, Type *AccessTy,
1264 const DenseMap<Value *, const SCEV *> &StridesMap,
1265 DenseMap<Value *, unsigned> &DepSetId, Loop *TheLoop,
1266 unsigned &RunningDepId, unsigned ASId, bool Assume) {
1267 Value *Ptr = Access.getPointer();
1268 ScalarEvolution *SE = PSE.getSE();
1269 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1270
1272 findForkedSCEVs(SE, TheLoop, Ptr, RTCheckPtrs, MaxForkedSCEVDepth);
1273 assert(!RTCheckPtrs.empty() &&
1274 "Must have some runtime-check pointer candidates");
1275
1276 // RTCheckPtrs must have size 2 if there are forked pointers. Otherwise, there
1277 // are no forked pointers; replaceSymbolicStridesSCEV in this case.
1278 auto IsLoopInvariantOrAR =
1279 [&SE, &TheLoop](const PointerIntPair<const SCEV *, 1, bool> &P) {
1280 return SE->isLoopInvariant(P.getPointer(), TheLoop) ||
1281 isa<SCEVAddRecExpr>(P.getPointer());
1282 };
1283 if (RTCheckPtrs.size() == 2 && all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1284 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n";
1285 for (const auto &[Idx, Q] : enumerate(RTCheckPtrs)) dbgs()
1286 << "\t(" << Idx << ") " << *Q.getPointer() << "\n");
1287 } else {
1288 RTCheckPtrs = {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1289 }
1290
1291 /// Check whether all pointers can participate in a runtime bounds check. They
1292 /// must either be invariant or non-wrapping affine AddRecs.
1293 for (auto &P : RTCheckPtrs) {
1294 // The bounds for loop-invariant pointer is trivial.
1295 if (SE->isLoopInvariant(P.getPointer(), TheLoop))
1296 continue;
1297
1298 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(P.getPointer());
1299 if (!AR && Assume)
1300 AR = PSE.getAsAddRec(Ptr);
1301 if (!AR || !AR->isAffine())
1302 return false;
1303
1304 // If there's only one option for Ptr, look it up after bounds and wrap
1305 // checking, because assumptions might have been added to PSE.
1306 if (RTCheckPtrs.size() == 1) {
1307 AR =
1308 cast<SCEVAddRecExpr>(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr));
1309 P.setPointer(AR);
1310 }
1311
1312 if (!isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr : nullptr, AccessTy,
1313 TheLoop, Assume, DT))
1314 return false;
1315 }
1316
1317 for (const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1318 // The id of the dependence set.
1319 unsigned DepId;
1320
1321 if (isDependencyCheckNeeded()) {
1322 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1323 unsigned &LeaderId = DepSetId[Leader];
1324 if (!LeaderId)
1325 LeaderId = RunningDepId++;
1326 DepId = LeaderId;
1327 } else
1328 // Each access has its own dependence set.
1329 DepId = RunningDepId++;
1330
1331 bool IsWrite = Access.getInt();
1332 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1333 NeedsFreeze);
1334 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1335 }
1336
1337 return true;
1338}
1339
1340bool AccessAnalysis::canCheckPtrAtRT(
1341 RuntimePointerChecking &RtCheck, Loop *TheLoop,
1342 const DenseMap<Value *, const SCEV *> &StridesMap, Value *&UncomputablePtr,
1343 bool AllowPartial) {
1344 // Find pointers with computable bounds. We are going to use this information
1345 // to place a runtime bound check.
1346 bool CanDoRT = true;
1347
1348 bool MayNeedRTCheck = false;
1349 if (!IsRTCheckAnalysisNeeded) return true;
1350
1351 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1352
1353 // We assign a consecutive id to access from different alias sets.
1354 // Accesses between different groups doesn't need to be checked.
1355 unsigned ASId = 0;
1356 for (const auto &AS : AST) {
1357 int NumReadPtrChecks = 0;
1358 int NumWritePtrChecks = 0;
1359 bool CanDoAliasSetRT = true;
1360 ++ASId;
1361 auto ASPointers = AS.getPointers();
1362
1363 // We assign consecutive id to access from different dependence sets.
1364 // Accesses within the same set don't need a runtime check.
1365 unsigned RunningDepId = 1;
1367
1369
1370 // First, count how many write and read accesses are in the alias set. Also
1371 // collect MemAccessInfos for later.
1373 for (const Value *ConstPtr : ASPointers) {
1374 Value *Ptr = const_cast<Value *>(ConstPtr);
1375 bool IsWrite = Accesses.contains(MemAccessInfo(Ptr, true));
1376 if (IsWrite)
1377 ++NumWritePtrChecks;
1378 else
1379 ++NumReadPtrChecks;
1380 AccessInfos.emplace_back(Ptr, IsWrite);
1381 }
1382
1383 // We do not need runtime checks for this alias set, if there are no writes
1384 // or a single write and no reads.
1385 if (NumWritePtrChecks == 0 ||
1386 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1387 assert((ASPointers.size() <= 1 ||
1388 all_of(ASPointers,
1389 [this](const Value *Ptr) {
1390 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1391 true);
1392 return !DepCands.contains(AccessWrite);
1393 })) &&
1394 "Can only skip updating CanDoRT below, if all entries in AS "
1395 "are reads or there is at most 1 entry");
1396 continue;
1397 }
1398
1399 for (auto &Access : AccessInfos) {
1400 for (const auto &AccessTy : Accesses[Access]) {
1401 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1402 DepSetId, TheLoop, RunningDepId, ASId,
1403 false)) {
1404 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1405 << *Access.getPointer() << '\n');
1406 Retries.emplace_back(Access, AccessTy);
1407 CanDoAliasSetRT = false;
1408 }
1409 }
1410 }
1411
1412 // Note that this function computes CanDoRT and MayNeedRTCheck
1413 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1414 // we have a pointer for which we couldn't find the bounds but we don't
1415 // actually need to emit any checks so it does not matter.
1416 //
1417 // We need runtime checks for this alias set, if there are at least 2
1418 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1419 // any bound checks (because in that case the number of dependence sets is
1420 // incomplete).
1421 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1422
1423 // We need to perform run-time alias checks, but some pointers had bounds
1424 // that couldn't be checked.
1425 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1426 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1427 // We know that we need these checks, so we can now be more aggressive
1428 // and add further checks if required (overflow checks).
1429 CanDoAliasSetRT = true;
1430 for (const auto &[Access, AccessTy] : Retries) {
1431 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1432 DepSetId, TheLoop, RunningDepId, ASId,
1433 /*Assume=*/true)) {
1434 CanDoAliasSetRT = false;
1435 UncomputablePtr = Access.getPointer();
1436 if (!AllowPartial)
1437 break;
1438 }
1439 }
1440 }
1441
1442 CanDoRT &= CanDoAliasSetRT;
1443 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1444 ++ASId;
1445 }
1446
1447 // If the pointers that we would use for the bounds comparison have different
1448 // address spaces, assume the values aren't directly comparable, so we can't
1449 // use them for the runtime check. We also have to assume they could
1450 // overlap. In the future there should be metadata for whether address spaces
1451 // are disjoint.
1452 unsigned NumPointers = RtCheck.Pointers.size();
1453 for (unsigned i = 0; i < NumPointers; ++i) {
1454 for (unsigned j = i + 1; j < NumPointers; ++j) {
1455 // Only need to check pointers between two different dependency sets.
1456 if (RtCheck.Pointers[i].DependencySetId ==
1457 RtCheck.Pointers[j].DependencySetId)
1458 continue;
1459 // Only need to check pointers in the same alias set.
1460 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1461 continue;
1462
1463 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1464 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1465
1466 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1467 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1468 if (ASi != ASj) {
1469 LLVM_DEBUG(
1470 dbgs() << "LAA: Runtime check would require comparison between"
1471 " different address spaces\n");
1472 return false;
1473 }
1474 }
1475 }
1476
1477 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1478 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1479
1480 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1481 << " pointer comparisons.\n");
1482
1483 // If we can do run-time checks, but there are no checks, no runtime checks
1484 // are needed. This can happen when all pointers point to the same underlying
1485 // object for example.
1486 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1487
1488 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1489 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1490 "CanDoRTIfNeeded depends on RtCheck.Need");
1491 if (!CanDoRTIfNeeded && !AllowPartial)
1492 RtCheck.reset();
1493 return CanDoRTIfNeeded;
1494}
1495
1496void AccessAnalysis::processMemAccesses() {
1497 // We process the set twice: first we process read-write pointers, last we
1498 // process read-only pointers. This allows us to skip dependence tests for
1499 // read-only pointers.
1500
1501 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1502 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1503 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1504 LLVM_DEBUG({
1505 for (const auto &[A, _] : Accesses)
1506 dbgs() << "\t" << *A.getPointer() << " ("
1507 << (A.getInt()
1508 ? "write"
1509 : (ReadOnlyPtr.contains(A.getPointer()) ? "read-only"
1510 : "read"))
1511 << ")\n";
1512 });
1513
1514 // The AliasSetTracker has nicely partitioned our pointers by metadata
1515 // compatibility and potential for underlying-object overlap. As a result, we
1516 // only need to check for potential pointer dependencies within each alias
1517 // set.
1518 for (const auto &AS : AST) {
1519 // Note that both the alias-set tracker and the alias sets themselves used
1520 // ordered collections internally and so the iteration order here is
1521 // deterministic.
1522 auto ASPointers = AS.getPointers();
1523
1524 bool SetHasWrite = false;
1525
1526 // Map of (pointer to underlying objects, accessed address space) to last
1527 // access encountered.
1528 typedef DenseMap<std::pair<const Value *, unsigned>, MemAccessInfo>
1529 UnderlyingObjToAccessMap;
1530 UnderlyingObjToAccessMap ObjToLastAccess;
1531
1532 // Set of access to check after all writes have been processed.
1533 PtrAccessMap DeferredAccesses;
1534
1535 // Iterate over each alias set twice, once to process read/write pointers,
1536 // and then to process read-only pointers.
1537 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1538 bool UseDeferred = SetIteration > 0;
1539 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1540
1541 for (const Value *ConstPtr : ASPointers) {
1542 Value *Ptr = const_cast<Value *>(ConstPtr);
1543
1544 // For a single memory access in AliasSetTracker, Accesses may contain
1545 // both read and write, and they both need to be handled for CheckDeps.
1546 for (const auto &[AC, _] : S) {
1547 if (AC.getPointer() != Ptr)
1548 continue;
1549
1550 bool IsWrite = AC.getInt();
1551
1552 // If we're using the deferred access set, then it contains only
1553 // reads.
1554 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1555 if (UseDeferred && !IsReadOnlyPtr)
1556 continue;
1557 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1558 // read or a write.
1559 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1560 S.contains(MemAccessInfo(Ptr, false))) &&
1561 "Alias-set pointer not in the access set?");
1562
1563 MemAccessInfo Access(Ptr, IsWrite);
1564 DepCands.insert(Access);
1565
1566 // Memorize read-only pointers for later processing and skip them in
1567 // the first round (they need to be checked after we have seen all
1568 // write pointers). Note: we also mark pointer that are not
1569 // consecutive as "read-only" pointers (so that we check
1570 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1571 if (!UseDeferred && IsReadOnlyPtr) {
1572 // We only use the pointer keys, the types vector values don't
1573 // matter.
1574 DeferredAccesses.insert({Access, {}});
1575 continue;
1576 }
1577
1578 // If this is a write - check other reads and writes for conflicts. If
1579 // this is a read only check other writes for conflicts (but only if
1580 // there is no other write to the ptr - this is an optimization to
1581 // catch "a[i] = a[i] + " without having to do a dependence check).
1582 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1583 CheckDeps.push_back(Access);
1584 IsRTCheckAnalysisNeeded = true;
1585 }
1586
1587 if (IsWrite)
1588 SetHasWrite = true;
1589
1590 // Create sets of pointers connected by a shared alias set and
1591 // underlying object.
1592 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1593 UOs = {};
1594 ::getUnderlyingObjects(Ptr, UOs, LI);
1596 << "Underlying objects for pointer " << *Ptr << "\n");
1597 for (const Value *UnderlyingObj : UOs) {
1598 // nullptr never alias, don't join sets for pointer that have "null"
1599 // in their UnderlyingObjects list.
1600 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1602 TheLoop->getHeader()->getParent(),
1603 UnderlyingObj->getType()->getPointerAddressSpace()))
1604 continue;
1605
1606 auto [It, Inserted] = ObjToLastAccess.try_emplace(
1607 {UnderlyingObj,
1608 cast<PointerType>(Ptr->getType())->getAddressSpace()},
1609 Access);
1610 if (!Inserted) {
1611 DepCands.unionSets(Access, It->second);
1612 It->second = Access;
1613 }
1614
1615 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1616 }
1617 }
1618 }
1619 }
1620 }
1621}
1622
1623/// Check whether the access through \p Ptr has a constant stride.
1624std::optional<int64_t>
1626 const Loop *Lp, const DominatorTree &DT,
1627 const DenseMap<Value *, const SCEV *> &StridesMap,
1628 bool Assume, bool ShouldCheckWrap) {
1629 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1630 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1631 return 0;
1632
1633 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
1634
1635 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1636 if (Assume && !AR)
1637 AR = PSE.getAsAddRec(Ptr);
1638
1639 if (!AR) {
1640 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1641 << " SCEV: " << *PtrScev << "\n");
1642 return std::nullopt;
1643 }
1644
1645 std::optional<int64_t> Stride =
1646 getStrideFromAddRec(AR, Lp, AccessTy, Ptr, PSE);
1647 if (!ShouldCheckWrap || !Stride)
1648 return Stride;
1649
1650 if (isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, DT, Stride))
1651 return Stride;
1652
1653 LLVM_DEBUG(
1654 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1655 << *Ptr << " SCEV: " << *AR << "\n");
1656 return std::nullopt;
1657}
1658
1659std::optional<int64_t> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1660 Type *ElemTyB, Value *PtrB,
1661 const DataLayout &DL,
1662 ScalarEvolution &SE,
1663 bool StrictCheck, bool CheckType) {
1664 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1665
1666 // Make sure that A and B are different pointers.
1667 if (PtrA == PtrB)
1668 return 0;
1669
1670 // Make sure that the element types are the same if required.
1671 if (CheckType && ElemTyA != ElemTyB)
1672 return std::nullopt;
1673
1674 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1675 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1676
1677 // Check that the address spaces match.
1678 if (ASA != ASB)
1679 return std::nullopt;
1680 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1681
1682 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1683 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1684 DL, OffsetA, /*AllowNonInbounds=*/true);
1685 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1686 DL, OffsetB, /*AllowNonInbounds=*/true);
1687
1688 std::optional<int64_t> Val;
1689 if (PtrA1 == PtrB1) {
1690 // Retrieve the address space again as pointer stripping now tracks through
1691 // `addrspacecast`.
1692 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1693 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1694 // Check that the address spaces match and that the pointers are valid.
1695 if (ASA != ASB)
1696 return std::nullopt;
1697
1698 IdxWidth = DL.getIndexSizeInBits(ASA);
1699 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1700 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1701
1702 OffsetB -= OffsetA;
1703 Val = OffsetB.trySExtValue();
1704 } else {
1705 // Otherwise compute the distance with SCEV between the base pointers.
1706 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1707 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1708 std::optional<APInt> Diff =
1709 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1710 if (!Diff)
1711 return std::nullopt;
1712 Val = Diff->trySExtValue();
1713 }
1714
1715 if (!Val)
1716 return std::nullopt;
1717
1718 int64_t Size = DL.getTypeStoreSize(ElemTyA);
1719 int64_t Dist = *Val / Size;
1720
1721 // Ensure that the calculated distance matches the type-based one after all
1722 // the bitcasts removal in the provided pointers.
1723 if (!StrictCheck || Dist * Size == Val)
1724 return Dist;
1725 return std::nullopt;
1726}
1727
1729 const DataLayout &DL, ScalarEvolution &SE,
1730 SmallVectorImpl<unsigned> &SortedIndices) {
1732 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1733 "Expected list of pointer operands.");
1734 // Walk over the pointers, and map each of them to an offset relative to
1735 // first pointer in the array.
1736 Value *Ptr0 = VL[0];
1737
1738 using DistOrdPair = std::pair<int64_t, unsigned>;
1739 auto Compare = llvm::less_first();
1740 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1741 Offsets.emplace(0, 0);
1742 bool IsConsecutive = true;
1743 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1744 std::optional<int64_t> Diff =
1745 getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1746 /*StrictCheck=*/true);
1747 if (!Diff)
1748 return false;
1749
1750 // Check if the pointer with the same offset is found.
1751 int64_t Offset = *Diff;
1752 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1753 if (!IsInserted)
1754 return false;
1755 // Consecutive order if the inserted element is the last one.
1756 IsConsecutive &= std::next(It) == Offsets.end();
1757 }
1758 SortedIndices.clear();
1759 if (!IsConsecutive) {
1760 // Fill SortedIndices array only if it is non-consecutive.
1761 SortedIndices.resize(VL.size());
1762 for (auto [Idx, Off] : enumerate(Offsets))
1763 SortedIndices[Idx] = Off.second;
1764 }
1765 return true;
1766}
1767
1768/// Returns true if the memory operations \p A and \p B are consecutive.
1770 ScalarEvolution &SE, bool CheckType) {
1773 if (!PtrA || !PtrB)
1774 return false;
1775 Type *ElemTyA = getLoadStoreType(A);
1776 Type *ElemTyB = getLoadStoreType(B);
1777 std::optional<int64_t> Diff =
1778 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1779 /*StrictCheck=*/true, CheckType);
1780 return Diff == 1;
1781}
1782
1784 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1785 [this, SI](Value *Ptr) {
1786 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1787 InstMap.push_back(SI);
1788 ++AccessIdx;
1789 });
1790}
1791
1793 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1794 [this, LI](Value *Ptr) {
1795 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1796 InstMap.push_back(LI);
1797 ++AccessIdx;
1798 });
1799}
1800
1819
1821 switch (Type) {
1822 case NoDep:
1823 case Forward:
1825 case Unknown:
1826 case IndirectUnsafe:
1827 return false;
1828
1830 case Backward:
1832 return true;
1833 }
1834 llvm_unreachable("unexpected DepType!");
1835}
1836
1840
1842 switch (Type) {
1843 case Forward:
1845 return true;
1846
1847 case NoDep:
1848 case Unknown:
1850 case Backward:
1852 case IndirectUnsafe:
1853 return false;
1854 }
1855 llvm_unreachable("unexpected DepType!");
1856}
1857
1858bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1859 uint64_t TypeByteSize,
1860 unsigned CommonStride) {
1861 // If loads occur at a distance that is not a multiple of a feasible vector
1862 // factor store-load forwarding does not take place.
1863 // Positive dependences might cause troubles because vectorizing them might
1864 // prevent store-load forwarding making vectorized code run a lot slower.
1865 // a[i] = a[i-3] ^ a[i-8];
1866 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1867 // hence on your typical architecture store-load forwarding does not take
1868 // place. Vectorizing in such cases does not make sense.
1869 // Store-load forwarding distance.
1870
1871 // After this many iterations store-to-load forwarding conflicts should not
1872 // cause any slowdowns.
1873 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1874 // Maximum vector factor.
1875 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1876 std::min(VectorizerParams::MaxVectorWidth * TypeByteSize,
1877 MaxStoreLoadForwardSafeDistanceInBits);
1878
1879 // Compute the smallest VF at which the store and load would be misaligned.
1880 for (uint64_t VF = 2 * TypeByteSize;
1881 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1882 // If the number of vector iteration between the store and the load are
1883 // small we could incur conflicts.
1884 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1885 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1886 break;
1887 }
1888 }
1889
1890 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1891 LLVM_DEBUG(
1892 dbgs() << "LAA: Distance " << Distance
1893 << " that could cause a store-load forwarding conflict\n");
1894 return true;
1895 }
1896
1897 if (CommonStride &&
1898 MaxVFWithoutSLForwardIssuesPowerOf2 <
1899 MaxStoreLoadForwardSafeDistanceInBits &&
1900 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1901 VectorizerParams::MaxVectorWidth * TypeByteSize) {
1902 uint64_t MaxVF =
1903 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1904 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1905 MaxStoreLoadForwardSafeDistanceInBits =
1906 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1907 }
1908 return false;
1909}
1910
1911void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1912 if (Status < S)
1913 Status = S;
1914}
1915
1916/// Given a dependence-distance \p Dist between two memory accesses, that have
1917/// strides in the same direction whose absolute value of the maximum stride is
1918/// given in \p MaxStride, in a loop whose maximum backedge taken count is \p
1919/// MaxBTC, check if it is possible to prove statically that the dependence
1920/// distance is larger than the range that the accesses will travel through the
1921/// execution of the loop. If so, return true; false otherwise. This is useful
1922/// for example in loops such as the following (PR31098):
1923///
1924/// for (i = 0; i < D; ++i) {
1925/// = out[i];
1926/// out[i+D] =
1927/// }
1929 const SCEV &MaxBTC, const SCEV &Dist,
1930 uint64_t MaxStride) {
1931
1932 // If we can prove that
1933 // (**) |Dist| > MaxBTC * Step
1934 // where Step is the absolute stride of the memory accesses in bytes,
1935 // then there is no dependence.
1936 //
1937 // Rationale:
1938 // We basically want to check if the absolute distance (|Dist/Step|)
1939 // is >= the loop iteration count (or > MaxBTC).
1940 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1941 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1942 // that the dependence distance is >= VF; This is checked elsewhere.
1943 // But in some cases we can prune dependence distances early, and
1944 // even before selecting the VF, and without a runtime test, by comparing
1945 // the distance against the loop iteration count. Since the vectorized code
1946 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1947 // also guarantees that distance >= VF.
1948 //
1949 const SCEV *Step = SE.getConstant(MaxBTC.getType(), MaxStride);
1950 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1951
1952 const SCEV *CastedDist = &Dist;
1953 const SCEV *CastedProduct = Product;
1954 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1955 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1956
1957 // The dependence distance can be positive/negative, so we sign extend Dist;
1958 // The multiplication of the absolute stride in bytes and the
1959 // backedgeTakenCount is non-negative, so we zero extend Product.
1960 if (DistTypeSizeBits > ProductTypeSizeBits)
1961 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1962 else
1963 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1964
1965 // Is Dist - (MaxBTC * Step) > 0 ?
1966 // (If so, then we have proven (**) because |Dist| >= Dist)
1967 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1968 if (SE.isKnownPositive(Minus))
1969 return true;
1970
1971 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1972 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1973 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1974 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1975 return SE.isKnownPositive(Minus);
1976}
1977
1978/// Check the dependence for two accesses with the same stride \p Stride.
1979/// \p Distance is the positive distance in bytes, and \p TypeByteSize is type
1980/// size in bytes.
1981///
1982/// \returns true if they are independent.
1984 uint64_t TypeByteSize) {
1985 assert(Stride > 1 && "The stride must be greater than 1");
1986 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1987 assert(Distance > 0 && "The distance must be non-zero");
1988
1989 // Skip if the distance is not multiple of type byte size.
1990 if (Distance % TypeByteSize)
1991 return false;
1992
1993 // No dependence if the distance is not multiple of the stride.
1994 // E.g.
1995 // for (i = 0; i < 1024 ; i += 4)
1996 // A[i+2] = A[i] + 1;
1997 //
1998 // Two accesses in memory (distance is 2, stride is 4):
1999 // | A[0] | | | | A[4] | | | |
2000 // | | | A[2] | | | | A[6] | |
2001 //
2002 // E.g.
2003 // for (i = 0; i < 1024 ; i += 3)
2004 // A[i+4] = A[i] + 1;
2005 //
2006 // Two accesses in memory (distance is 4, stride is 3):
2007 // | A[0] | | | A[3] | | | A[6] | | |
2008 // | | | | | A[4] | | | A[7] | |
2009 return Distance % Stride;
2010}
2011
2012bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
2013 Type *SrcTy,
2014 const SCEV *Sink,
2015 Type *SinkTy) {
2016 const SCEV *BTC = PSE.getBackedgeTakenCount();
2017 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
2018 ScalarEvolution &SE = *PSE.getSE();
2019 const auto &[SrcStart_, SrcEnd_] =
2020 getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
2021 &SE, &PointerBounds, DT, AC, LoopGuards);
2022 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
2023 return false;
2024
2025 const auto &[SinkStart_, SinkEnd_] =
2026 getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
2027 &SE, &PointerBounds, DT, AC, LoopGuards);
2028 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
2029 isa<SCEVCouldNotCompute>(SinkEnd_))
2030 return false;
2031
2032 if (!LoopGuards)
2033 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2034
2035 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
2036 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
2037 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
2038 return true;
2039
2040 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
2041 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
2042 return SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart);
2043}
2044
2046 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2047MemoryDepChecker::getDependenceDistanceStrideAndSize(
2048 const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
2049 const AccessAnalysis::MemAccessInfo &B, Instruction *BInst) {
2050 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
2051 auto &SE = *PSE.getSE();
2052 const auto &[APtr, AIsWrite] = A;
2053 const auto &[BPtr, BIsWrite] = B;
2054
2055 // Two reads are independent.
2056 if (!AIsWrite && !BIsWrite)
2058
2059 Type *ATy = getLoadStoreType(AInst);
2060 Type *BTy = getLoadStoreType(BInst);
2061
2062 // We cannot check pointers in different address spaces.
2063 if (APtr->getType()->getPointerAddressSpace() !=
2064 BPtr->getType()->getPointerAddressSpace())
2066
2067 std::optional<int64_t> StrideAPtr = getPtrStride(
2068 PSE, ATy, APtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2069 std::optional<int64_t> StrideBPtr = getPtrStride(
2070 PSE, BTy, BPtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2071
2072 const SCEV *Src = PSE.getSCEV(APtr);
2073 const SCEV *Sink = PSE.getSCEV(BPtr);
2074
2075 // If the induction step is negative we have to invert source and sink of the
2076 // dependence when measuring the distance between them. We should not swap
2077 // AIsWrite with BIsWrite, as their uses expect them in program order.
2078 if (StrideAPtr && *StrideAPtr < 0) {
2079 std::swap(Src, Sink);
2080 std::swap(AInst, BInst);
2081 std::swap(ATy, BTy);
2082 std::swap(StrideAPtr, StrideBPtr);
2083 }
2084
2085 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
2086
2087 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
2088 << "\n");
2089 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
2090 << ": " << *Dist << "\n");
2091
2092 // Need accesses with constant strides and the same direction for further
2093 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
2094 // similar code or pointer arithmetic that could wrap in the address space.
2095
2096 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
2097 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
2098 // dependence further and also cannot generate runtime checks.
2099 if (!StrideAPtr || !StrideBPtr) {
2100 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2102 }
2103
2104 int64_t StrideAPtrInt = *StrideAPtr;
2105 int64_t StrideBPtrInt = *StrideBPtr;
2106 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
2107 << " Sink induction step: " << StrideBPtrInt << "\n");
2108 // At least Src or Sink are loop invariant and the other is strided or
2109 // invariant. We can generate a runtime check to disambiguate the accesses.
2110 if (!StrideAPtrInt || !StrideBPtrInt)
2112
2113 // Both Src and Sink have a constant stride, check if they are in the same
2114 // direction.
2115 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2116 LLVM_DEBUG(
2117 dbgs() << "Pointer access with strides in different directions\n");
2119 }
2120
2121 TypeSize AStoreSz = DL.getTypeStoreSize(ATy);
2122 TypeSize BStoreSz = DL.getTypeStoreSize(BTy);
2123
2124 // If store sizes are not the same, set TypeByteSize to zero, so we can check
2125 // it in the caller isDependent.
2126 uint64_t ASz = DL.getTypeAllocSize(ATy);
2127 uint64_t BSz = DL.getTypeAllocSize(BTy);
2128 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2129
2130 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2131 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2132
2133 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2134
2135 std::optional<uint64_t> CommonStride;
2136 if (StrideAScaled == StrideBScaled)
2137 CommonStride = StrideAScaled;
2138
2139 // TODO: Historically, we didn't retry with runtime checks when (unscaled)
2140 // strides were different but there is no inherent reason to.
2141 if (!isa<SCEVConstant>(Dist))
2142 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2143
2144 // If distance is a SCEVCouldNotCompute, return Unknown immediately.
2145 if (isa<SCEVCouldNotCompute>(Dist)) {
2146 LLVM_DEBUG(dbgs() << "LAA: Uncomputable distance.\n");
2147 return Dependence::Unknown;
2148 }
2149
2150 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2151 TypeByteSize, AIsWrite, BIsWrite);
2152}
2153
2155MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2156 const MemAccessInfo &B, unsigned BIdx) {
2157 assert(AIdx < BIdx && "Must pass arguments in program order");
2158
2159 // Check if we can prove that Sink only accesses memory after Src's end or
2160 // vice versa. The helper is used to perform the checks only on the exit paths
2161 // where it helps to improve the analysis result.
2162 auto CheckCompletelyBeforeOrAfter = [&]() {
2163 auto *APtr = A.getPointer();
2164 auto *BPtr = B.getPointer();
2165 Type *ATy = getLoadStoreType(InstMap[AIdx]);
2166 Type *BTy = getLoadStoreType(InstMap[BIdx]);
2167 const SCEV *Src = PSE.getSCEV(APtr);
2168 const SCEV *Sink = PSE.getSCEV(BPtr);
2169 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2170 };
2171
2172 // Get the dependence distance, stride, type size and what access writes for
2173 // the dependence between A and B.
2174 auto Res =
2175 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2176 if (std::holds_alternative<Dependence::DepType>(Res)) {
2177 if (std::get<Dependence::DepType>(Res) == Dependence::Unknown &&
2178 CheckCompletelyBeforeOrAfter())
2179 return Dependence::NoDep;
2180 return std::get<Dependence::DepType>(Res);
2181 }
2182
2183 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2184 std::get<DepDistanceStrideAndSizeInfo>(Res);
2185 bool HasSameSize = TypeByteSize > 0;
2186
2187 ScalarEvolution &SE = *PSE.getSE();
2188 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2189
2190 // If the distance between the acecsses is larger than their maximum absolute
2191 // stride multiplied by the symbolic maximum backedge taken count (which is an
2192 // upper bound of the number of iterations), the accesses are independet, i.e.
2193 // they are far enough appart that accesses won't access the same location
2194 // across all loop ierations.
2195 if (HasSameSize &&
2197 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2198 return Dependence::NoDep;
2199
2200 // The rest of this function relies on ConstDist being at most 64-bits, which
2201 // is checked earlier. Will assert if the calling code changes.
2202 const APInt *APDist = nullptr;
2203 uint64_t ConstDist =
2204 match(Dist, m_scev_APInt(APDist)) ? APDist->abs().getZExtValue() : 0;
2205
2206 // Attempt to prove strided accesses independent.
2207 if (APDist) {
2208 // If the distance between accesses and their strides are known constants,
2209 // check whether the accesses interlace each other.
2210 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2211 areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) {
2212 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2213 return Dependence::NoDep;
2214 }
2215 } else {
2216 if (!LoopGuards)
2217 LoopGuards.emplace(
2218 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2219 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2220 }
2221
2222 // Negative distances are not plausible dependencies.
2223 if (SE.isKnownNonPositive(Dist)) {
2224 if (SE.isKnownNonNegative(Dist)) {
2225 if (HasSameSize) {
2226 // Write to the same location with the same size.
2227 return Dependence::Forward;
2228 }
2229 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2230 "different type sizes\n");
2231 return Dependence::Unknown;
2232 }
2233
2234 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2235 // Check if the first access writes to a location that is read in a later
2236 // iteration, where the distance between them is not a multiple of a vector
2237 // factor and relatively small.
2238 //
2239 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2240 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2241 // forward dependency will allow vectorization using any width.
2242
2243 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2244 if (!ConstDist) {
2245 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2247 }
2248 if (!HasSameSize ||
2249 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2250 LLVM_DEBUG(
2251 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2253 }
2254 }
2255
2256 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2257 return Dependence::Forward;
2258 }
2259
2260 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2261 // Below we only handle strictly positive distances.
2262 if (MinDistance <= 0) {
2263 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2265 }
2266
2267 if (!HasSameSize) {
2268 if (CheckCompletelyBeforeOrAfter())
2269 return Dependence::NoDep;
2270 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2271 "different type sizes\n");
2272 return Dependence::Unknown;
2273 }
2274 // Bail out early if passed-in parameters make vectorization not feasible.
2275 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2277 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2279 // The minimum number of iterations for a vectorized/unrolled version.
2280 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2281
2282 // It's not vectorizable if the distance is smaller than the minimum distance
2283 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2284 // front needs MaxStride. Vectorizing the last iteration needs TypeByteSize.
2285 // (No need to plus the last gap distance).
2286 //
2287 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2288 // foo(int *A) {
2289 // int *B = (int *)((char *)A + 14);
2290 // for (i = 0 ; i < 1024 ; i += 2)
2291 // B[i] = A[i] + 1;
2292 // }
2293 //
2294 // Two accesses in memory (stride is 4 * 2):
2295 // | A[0] | | A[2] | | A[4] | | A[6] | |
2296 // | B[0] | | B[2] | | B[4] |
2297 //
2298 // MinDistance needs for vectorizing iterations except the last iteration:
2299 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2300 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2301 //
2302 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2303 // 12, which is less than distance.
2304 //
2305 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2306 // the minimum distance needed is 28, which is greater than distance. It is
2307 // not safe to do vectorization.
2308 //
2309 // We use MaxStride (maximum of src and sink strides) to get a conservative
2310 // lower bound on the MinDistanceNeeded in case of different strides.
2311
2312 // We know that Dist is positive, but it may not be constant. Use the signed
2313 // minimum for computations below, as this ensures we compute the closest
2314 // possible dependence distance.
2315 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2316 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2317 if (!ConstDist) {
2318 // For non-constant distances, we checked the lower bound of the
2319 // dependence distance and the distance may be larger at runtime (and safe
2320 // for vectorization). Classify it as Unknown, so we re-try with runtime
2321 // checks, unless we can prove both accesses cannot overlap.
2322 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2324 }
2325 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2326 << MinDistance << '\n');
2327 return Dependence::Backward;
2328 }
2329
2330 // Unsafe if the minimum distance needed is greater than smallest dependence
2331 // distance distance.
2332 if (MinDistanceNeeded > MinDepDistBytes) {
2333 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2334 << MinDistanceNeeded << " size in bytes\n");
2335 return Dependence::Backward;
2336 }
2337
2338 MinDepDistBytes =
2339 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2340
2341 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2342 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2343 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2345
2346 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2347 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2348 << " with max VF = " << MaxVF << '\n');
2349
2350 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2351 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2352 // For non-constant distances, we checked the lower bound of the dependence
2353 // distance and the distance may be larger at runtime (and safe for
2354 // vectorization). Classify it as Unknown, so we re-try with runtime checks,
2355 // unless we can prove both accesses cannot overlap.
2356 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2358 }
2359
2360 if (CheckCompletelyBeforeOrAfter())
2361 return Dependence::NoDep;
2362
2363 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2365}
2366
2368 const MemAccessInfoList &CheckDeps) {
2369
2370 MinDepDistBytes = -1;
2372 for (MemAccessInfo CurAccess : CheckDeps) {
2373 if (Visited.contains(CurAccess))
2374 continue;
2375
2376 // Check accesses within this set.
2378 DepCands.findLeader(CurAccess);
2380 DepCands.member_end();
2381
2382 // Check every access pair.
2383 while (AI != AE) {
2384 Visited.insert(*AI);
2385 bool AIIsWrite = AI->getInt();
2386 // Check loads only against next equivalent class, but stores also against
2387 // other stores in the same equivalence class - to the same address.
2389 (AIIsWrite ? AI : std::next(AI));
2390 while (OI != AE) {
2391 // Check every accessing instruction pair in program order.
2392 auto &Acc = Accesses[*AI];
2393 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2394 I1 != I1E; ++I1)
2395 // Scan all accesses of another equivalence class, but only the next
2396 // accesses of the same equivalent class.
2397 for (std::vector<unsigned>::iterator
2398 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2399 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2400 I2 != I2E; ++I2) {
2401 auto A = std::make_pair(&*AI, *I1);
2402 auto B = std::make_pair(&*OI, *I2);
2403
2404 assert(*I1 != *I2);
2405 if (*I1 > *I2)
2406 std::swap(A, B);
2407
2409 isDependent(*A.first, A.second, *B.first, B.second);
2411
2412 // Gather dependences unless we accumulated MaxDependences
2413 // dependences. In that case return as soon as we find the first
2414 // unsafe dependence. This puts a limit on this quadratic
2415 // algorithm.
2416 if (RecordDependences) {
2417 if (Type != Dependence::NoDep)
2418 Dependences.emplace_back(A.second, B.second, Type);
2419
2420 if (Dependences.size() >= MaxDependences) {
2421 RecordDependences = false;
2422 Dependences.clear();
2424 << "Too many dependences, stopped recording\n");
2425 }
2426 }
2427 if (!RecordDependences && !isSafeForVectorization())
2428 return false;
2429 }
2430 ++OI;
2431 }
2432 ++AI;
2433 }
2434 }
2435
2436 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2437 return isSafeForVectorization();
2438}
2439
2442 MemAccessInfo Access(Ptr, IsWrite);
2443 auto I = Accesses.find(Access);
2445 if (I != Accesses.end()) {
2446 transform(I->second, std::back_inserter(Insts),
2447 [&](unsigned Idx) { return this->InstMap[Idx]; });
2448 }
2449
2450 return Insts;
2451}
2452
2454 "NoDep",
2455 "Unknown",
2456 "IndirectUnsafe",
2457 "Forward",
2458 "ForwardButPreventsForwarding",
2459 "Backward",
2460 "BackwardVectorizable",
2461 "BackwardVectorizableButPreventsForwarding"};
2462
2464 raw_ostream &OS, unsigned Depth,
2465 const SmallVectorImpl<Instruction *> &Instrs) const {
2466 OS.indent(Depth) << DepName[Type] << ":\n";
2467 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2468 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2469}
2470
2471bool LoopAccessInfo::canAnalyzeLoop() {
2472 // We need to have a loop header.
2473 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2474 << TheLoop->getHeader()->getParent()->getName() << "' from "
2475 << TheLoop->getLocStr() << "\n");
2476
2477 // We can only analyze innermost loops.
2478 if (!TheLoop->isInnermost()) {
2479 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2480 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2481 return false;
2482 }
2483
2484 // We must have a single backedge.
2485 if (TheLoop->getNumBackEdges() != 1) {
2486 LLVM_DEBUG(
2487 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2488 recordAnalysis("CFGNotUnderstood")
2489 << "loop control flow is not understood by analyzer";
2490 return false;
2491 }
2492
2493 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2494 // count, which is an upper bound on the number of loop iterations. The loop
2495 // may execute fewer iterations, if it exits via an uncountable exit.
2496 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2497 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2498 recordAnalysis("CantComputeNumberOfIterations")
2499 << "could not determine number of loop iterations";
2500 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2501 return false;
2502 }
2503
2504 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2505 << TheLoop->getHeader()->getName() << "\n");
2506 return true;
2507}
2508
2509bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2510 const TargetLibraryInfo *TLI,
2511 DominatorTree *DT) {
2512 // Holds the Load and Store instructions.
2515 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2516
2517 // Holds all the different accesses in the loop.
2518 unsigned NumReads = 0;
2519 unsigned NumReadWrites = 0;
2520
2521 bool HasComplexMemInst = false;
2522
2523 // A runtime check is only legal to insert if there are no convergent calls.
2524 HasConvergentOp = false;
2525
2526 PtrRtChecking->Pointers.clear();
2527 PtrRtChecking->Need = false;
2528
2529 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2530
2531 const bool EnableMemAccessVersioningOfLoop =
2533 !TheLoop->getHeader()->getParent()->hasOptSize();
2534
2535 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2536 // loop info, as it may be arbitrary.
2537 LoopBlocksRPO RPOT(TheLoop);
2538 RPOT.perform(LI);
2539 for (BasicBlock *BB : RPOT) {
2540 // Scan the BB and collect legal loads and stores. Also detect any
2541 // convergent instructions.
2542 for (Instruction &I : *BB) {
2543 if (auto *Call = dyn_cast<CallBase>(&I)) {
2544 if (Call->isConvergent())
2545 HasConvergentOp = true;
2546 }
2547
2548 // With both a non-vectorizable memory instruction and a convergent
2549 // operation, found in this loop, no reason to continue the search.
2550 if (HasComplexMemInst && HasConvergentOp)
2551 return false;
2552
2553 // Avoid hitting recordAnalysis multiple times.
2554 if (HasComplexMemInst)
2555 continue;
2556
2557 // Record alias scopes defined inside the loop.
2558 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2559 for (Metadata *Op : Decl->getScopeList()->operands())
2560 LoopAliasScopes.insert(cast<MDNode>(Op));
2561
2562 // Many math library functions read the rounding mode. We will only
2563 // vectorize a loop if it contains known function calls that don't set
2564 // the flag. Therefore, it is safe to ignore this read from memory.
2565 auto *Call = dyn_cast<CallInst>(&I);
2567 continue;
2568
2569 // If this is a load, save it. If this instruction can read from memory
2570 // but is not a load, we only allow it if it's a call to a function with a
2571 // vector mapping and no pointer arguments.
2572 if (I.mayReadFromMemory()) {
2573 auto hasPointerArgs = [](CallBase *CB) {
2574 return any_of(CB->args(), [](Value const *Arg) {
2575 return Arg->getType()->isPointerTy();
2576 });
2577 };
2578
2579 // If the function has an explicit vectorized counterpart, and does not
2580 // take output/input pointers, we can safely assume that it can be
2581 // vectorized.
2582 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2583 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2584 continue;
2585
2586 auto *Ld = dyn_cast<LoadInst>(&I);
2587 if (!Ld) {
2588 recordAnalysis("CantVectorizeInstruction", Ld)
2589 << "instruction cannot be vectorized";
2590 HasComplexMemInst = true;
2591 continue;
2592 }
2593 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2594 recordAnalysis("NonSimpleLoad", Ld)
2595 << "read with atomic ordering or volatile read";
2596 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2597 HasComplexMemInst = true;
2598 continue;
2599 }
2600 NumLoads++;
2601 Loads.push_back(Ld);
2602 DepChecker->addAccess(Ld);
2603 if (EnableMemAccessVersioningOfLoop)
2604 collectStridedAccess(Ld);
2605 continue;
2606 }
2607
2608 // Save 'store' instructions. Abort if other instructions write to memory.
2609 if (I.mayWriteToMemory()) {
2610 auto *St = dyn_cast<StoreInst>(&I);
2611 if (!St) {
2612 recordAnalysis("CantVectorizeInstruction", St)
2613 << "instruction cannot be vectorized";
2614 HasComplexMemInst = true;
2615 continue;
2616 }
2617 if (!St->isSimple() && !IsAnnotatedParallel) {
2618 recordAnalysis("NonSimpleStore", St)
2619 << "write with atomic ordering or volatile write";
2620 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2621 HasComplexMemInst = true;
2622 continue;
2623 }
2624 NumStores++;
2625 Stores.push_back(St);
2626 DepChecker->addAccess(St);
2627 if (EnableMemAccessVersioningOfLoop)
2628 collectStridedAccess(St);
2629 }
2630 } // Next instr.
2631 } // Next block.
2632
2633 if (HasComplexMemInst)
2634 return false;
2635
2636 // Now we have two lists that hold the loads and the stores.
2637 // Next, we find the pointers that they use.
2638
2639 // Check if we see any stores. If there are no stores, then we don't
2640 // care if the pointers are *restrict*.
2641 if (!Stores.size()) {
2642 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2643 return true;
2644 }
2645
2647 AccessAnalysis Accesses(TheLoop, AA, LI, *DT, DepCands, *PSE,
2648 LoopAliasScopes);
2649
2650 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2651 // multiple times on the same object. If the ptr is accessed twice, once
2652 // for read and once for write, it will only appear once (on the write
2653 // list). This is okay, since we are going to check for conflicts between
2654 // writes and between reads and writes, but not between reads and reads.
2655 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2656
2657 // Record uniform store addresses to identify if we have multiple stores
2658 // to the same address.
2659 SmallPtrSet<Value *, 16> UniformStores;
2660
2661 for (StoreInst *ST : Stores) {
2662 Value *Ptr = ST->getPointerOperand();
2663
2664 if (isInvariant(Ptr)) {
2665 // Record store instructions to loop invariant addresses
2666 StoresToInvariantAddresses.push_back(ST);
2667 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2668 !UniformStores.insert(Ptr).second;
2669 }
2670
2671 // If we did *not* see this pointer before, insert it to the read-write
2672 // list. At this phase it is only a 'write' list.
2673 Type *AccessTy = getLoadStoreType(ST);
2674 if (Seen.insert({Ptr, AccessTy}).second) {
2675 ++NumReadWrites;
2676
2677 MemoryLocation Loc = MemoryLocation::get(ST);
2678 // The TBAA metadata could have a control dependency on the predication
2679 // condition, so we cannot rely on it when determining whether or not we
2680 // need runtime pointer checks.
2681 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2682 Loc.AATags.TBAA = nullptr;
2683
2684 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2685 [&Accesses, AccessTy, Loc](Value *Ptr) {
2686 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2687 Accesses.addStore(NewLoc, AccessTy);
2688 });
2689 }
2690 }
2691
2692 if (IsAnnotatedParallel) {
2693 LLVM_DEBUG(
2694 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2695 << "checks.\n");
2696 return true;
2697 }
2698
2699 for (LoadInst *LD : Loads) {
2700 Value *Ptr = LD->getPointerOperand();
2701 // If we did *not* see this pointer before, insert it to the
2702 // read list. If we *did* see it before, then it is already in
2703 // the read-write list. This allows us to vectorize expressions
2704 // such as A[i] += x; Because the address of A[i] is a read-write
2705 // pointer. This only works if the index of A[i] is consecutive.
2706 // If the address of i is unknown (for example A[B[i]]) then we may
2707 // read a few words, modify, and write a few words, and some of the
2708 // words may be written to the same address.
2709 bool IsReadOnlyPtr = false;
2710 Type *AccessTy = getLoadStoreType(LD);
2711 if (Seen.insert({Ptr, AccessTy}).second ||
2712 !getPtrStride(*PSE, AccessTy, Ptr, TheLoop, *DT, SymbolicStrides, false,
2713 true)) {
2714 ++NumReads;
2715 IsReadOnlyPtr = true;
2716 }
2717
2718 // See if there is an unsafe dependency between a load to a uniform address and
2719 // store to the same uniform address.
2720 if (UniformStores.contains(Ptr)) {
2721 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2722 "load and uniform store to the same address!\n");
2723 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2724 }
2725
2726 MemoryLocation Loc = MemoryLocation::get(LD);
2727 // The TBAA metadata could have a control dependency on the predication
2728 // condition, so we cannot rely on it when determining whether or not we
2729 // need runtime pointer checks.
2730 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2731 Loc.AATags.TBAA = nullptr;
2732
2733 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2734 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2735 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2736 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2737 });
2738 }
2739
2740 // If we write (or read-write) to a single destination and there are no
2741 // other reads in this loop then is it safe to vectorize.
2742 if (NumReadWrites == 1 && NumReads == 0) {
2743 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2744 return true;
2745 }
2746
2747 // Build dependence sets and check whether we need a runtime pointer bounds
2748 // check.
2749 Accesses.buildDependenceSets();
2750
2751 // Find pointers with computable bounds. We are going to use this information
2752 // to place a runtime bound check.
2753 Value *UncomputablePtr = nullptr;
2754 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2755 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2756 if (!HasCompletePtrRtChecking) {
2757 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2758 recordAnalysis("CantIdentifyArrayBounds", I)
2759 << "cannot identify array bounds";
2760 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2761 << "the array bounds.\n");
2762 return false;
2763 }
2764
2765 LLVM_DEBUG(
2766 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2767
2768 bool DepsAreSafe = true;
2769 if (Accesses.isDependencyCheckNeeded()) {
2770 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2771 DepsAreSafe =
2772 DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2773
2774 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) {
2775 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2776
2777 // Clear the dependency checks. We assume they are not needed.
2778 Accesses.resetDepChecks(*DepChecker);
2779
2780 PtrRtChecking->reset();
2781 PtrRtChecking->Need = true;
2782
2783 UncomputablePtr = nullptr;
2784 HasCompletePtrRtChecking =
2785 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2786 UncomputablePtr, AllowPartial);
2787
2788 // Check that we found the bounds for the pointer.
2789 if (!HasCompletePtrRtChecking) {
2790 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2791 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2792 << "cannot check memory dependencies at runtime";
2793 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2794 return false;
2795 }
2796 DepsAreSafe = true;
2797 }
2798 }
2799
2800 if (HasConvergentOp) {
2801 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2802 << "cannot add control dependency to convergent operation";
2803 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2804 "would be needed with a convergent operation\n");
2805 return false;
2806 }
2807
2808 if (DepsAreSafe) {
2809 LLVM_DEBUG(
2810 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2811 << (PtrRtChecking->Need ? "" : " don't")
2812 << " need runtime memory checks.\n");
2813 return true;
2814 }
2815
2816 emitUnsafeDependenceRemark();
2817 return false;
2818}
2819
2820void LoopAccessInfo::emitUnsafeDependenceRemark() {
2821 const auto *Deps = getDepChecker().getDependences();
2822 if (!Deps)
2823 return;
2824 const auto *Found =
2825 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2828 });
2829 if (Found == Deps->end())
2830 return;
2831 MemoryDepChecker::Dependence Dep = *Found;
2832
2833 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2834
2835 // Emit remark for first unsafe dependence
2836 bool HasForcedDistribution = false;
2837 std::optional<const MDOperand *> Value =
2838 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2839 if (Value) {
2840 const MDOperand *Op = *Value;
2841 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2842 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2843 }
2844
2845 const std::string Info =
2846 HasForcedDistribution
2847 ? "unsafe dependent memory operations in loop."
2848 : "unsafe dependent memory operations in loop. Use "
2849 "#pragma clang loop distribute(enable) to allow loop distribution "
2850 "to attempt to isolate the offending operations into a separate "
2851 "loop";
2852 OptimizationRemarkAnalysis &R =
2853 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2854
2855 switch (Dep.Type) {
2859 llvm_unreachable("Unexpected dependence");
2861 R << "\nBackward loop carried data dependence.";
2862 break;
2864 R << "\nForward loop carried data dependence that prevents "
2865 "store-to-load forwarding.";
2866 break;
2868 R << "\nBackward loop carried data dependence that prevents "
2869 "store-to-load forwarding.";
2870 break;
2872 R << "\nUnsafe indirect dependence.";
2873 break;
2875 R << "\nUnknown data dependence.";
2876 break;
2877 }
2878
2879 if (Instruction *I = Dep.getSource(getDepChecker())) {
2880 DebugLoc SourceLoc = I->getDebugLoc();
2882 SourceLoc = DD->getDebugLoc();
2883 if (SourceLoc)
2884 R << " Memory location is the same as accessed at "
2885 << ore::NV("Location", SourceLoc);
2886 }
2887}
2888
2890 const Loop *TheLoop,
2891 const DominatorTree *DT) {
2892 assert(TheLoop->contains(BB) && "Unknown block used");
2893
2894 // Blocks that do not dominate the latch need predication.
2895 const BasicBlock *Latch = TheLoop->getLoopLatch();
2896 return !DT->dominates(BB, Latch);
2897}
2898
2900LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2901 assert(!Report && "Multiple reports generated");
2902
2903 const BasicBlock *CodeRegion = TheLoop->getHeader();
2904 DebugLoc DL = TheLoop->getStartLoc();
2905
2906 if (I) {
2907 CodeRegion = I->getParent();
2908 // If there is no debug location attached to the instruction, revert back to
2909 // using the loop's.
2910 if (I->getDebugLoc())
2911 DL = I->getDebugLoc();
2912 }
2913
2914 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName,
2915 DL, CodeRegion);
2916 return *Report;
2917}
2918
2920 auto *SE = PSE->getSE();
2921 if (TheLoop->isLoopInvariant(V))
2922 return true;
2923 if (!SE->isSCEVable(V->getType()))
2924 return false;
2925 const SCEV *S = SE->getSCEV(V);
2926 return SE->isLoopInvariant(S, TheLoop);
2927}
2928
2929/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand.
2930/// Otherwise, return \p Ptr.
2932 Loop *Lp) {
2933 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2934 if (!GEP)
2935 return Ptr;
2936
2937 Value *V = Ptr;
2938 for (const Use &U : GEP->operands()) {
2939 if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) {
2940 if (V == Ptr)
2941 V = U;
2942 else
2943 // There must be exactly one loop-variant operand.
2944 return Ptr;
2945 }
2946 }
2947 return V;
2948}
2949
2950/// Get the stride of a pointer access in a loop. Looks for symbolic
2951/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2952static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2953 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2954 if (!PtrTy)
2955 return nullptr;
2956
2957 // Try to remove a gep instruction to make the pointer (actually index at this
2958 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2959 // pointer, otherwise, we are analyzing the index.
2960 Value *OrigPtr = Ptr;
2961
2962 Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp);
2963 const SCEV *V = SE->getSCEV(Ptr);
2964
2965 if (Ptr != OrigPtr)
2966 // Strip off casts.
2967 while (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2968 V = C->getOperand();
2969
2971 return nullptr;
2972
2973 // Note that the restriction after this loop invariant check are only
2974 // profitability restrictions.
2975 if (!SE->isLoopInvariant(V, Lp))
2976 return nullptr;
2977
2978 // Look for the loop invariant symbolic value.
2979 if (isa<SCEVUnknown>(V))
2980 return V;
2981
2982 if (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2983 if (isa<SCEVUnknown>(C->getOperand()))
2984 return V;
2985
2986 return nullptr;
2987}
2988
2989void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2990 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2991 if (!Ptr)
2992 return;
2993
2994 // Note: getStrideFromPointer is a *profitability* heuristic. We
2995 // could broaden the scope of values returned here - to anything
2996 // which happens to be loop invariant and contributes to the
2997 // computation of an interesting IV - but we chose not to as we
2998 // don't have a cost model here, and broadening the scope exposes
2999 // far too many unprofitable cases.
3000 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
3001 if (!StrideExpr)
3002 return;
3003
3004 if (match(StrideExpr, m_scev_UndefOrPoison()))
3005 return;
3006
3007 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
3008 "versioning:");
3009 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
3010
3011 if (!SpeculateUnitStride) {
3012 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
3013 return;
3014 }
3015
3016 // Avoid adding the "Stride == 1" predicate when we know that
3017 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
3018 // or zero iteration loop, as Trip-Count <= Stride == 1.
3019 //
3020 // TODO: We are currently not making a very informed decision on when it is
3021 // beneficial to apply stride versioning. It might make more sense that the
3022 // users of this analysis (such as the vectorizer) will trigger it, based on
3023 // their specific cost considerations; For example, in cases where stride
3024 // versioning does not help resolving memory accesses/dependences, the
3025 // vectorizer should evaluate the cost of the runtime test, and the benefit
3026 // of various possible stride specializations, considering the alternatives
3027 // of using gather/scatters (if available).
3028
3029 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
3030
3031 // Match the types so we can compare the stride and the MaxBTC.
3032 // The Stride can be positive/negative, so we sign extend Stride;
3033 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
3034 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
3035 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3036 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
3037 const SCEV *CastedStride = StrideExpr;
3038 const SCEV *CastedBECount = MaxBTC;
3039 ScalarEvolution *SE = PSE->getSE();
3040 if (BETypeSizeBits >= StrideTypeSizeBits)
3041 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
3042 else
3043 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
3044 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3045 // Since TripCount == BackEdgeTakenCount + 1, checking:
3046 // "Stride >= TripCount" is equivalent to checking:
3047 // Stride - MaxBTC> 0
3048 if (SE->isKnownPositive(StrideMinusBETaken)) {
3049 LLVM_DEBUG(
3050 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3051 "Stride==1 predicate will imply that the loop executes "
3052 "at most once.\n");
3053 return;
3054 }
3055 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3056
3057 // Strip back off the integer cast, and check that our result is a
3058 // SCEVUnknown as we expect.
3059 const SCEV *StrideBase = StrideExpr;
3060 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3061 StrideBase = C->getOperand();
3062 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3063}
3064
3066 const TargetTransformInfo *TTI,
3067 const TargetLibraryInfo *TLI, AAResults *AA,
3068 DominatorTree *DT, LoopInfo *LI,
3069 AssumptionCache *AC, bool AllowPartial)
3070 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3071 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3072 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3073 if (TTI && !TTI->enableScalableVectorization())
3074 // Scale the vector width by 2 as rough estimate to also consider
3075 // interleaving.
3076 MaxTargetVectorWidthInBits =
3077 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;
3078
3079 DepChecker = std::make_unique<MemoryDepChecker>(
3080 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3081 PtrRtChecking =
3082 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3083 if (canAnalyzeLoop())
3084 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3085}
3086
3087void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3088 if (CanVecMem) {
3089 OS.indent(Depth) << "Memory dependences are safe";
3090 const MemoryDepChecker &DC = getDepChecker();
3091 if (!DC.isSafeForAnyVectorWidth())
3092 OS << " with a maximum safe vector width of "
3093 << DC.getMaxSafeVectorWidthInBits() << " bits";
3096 OS << ", with a maximum safe store-load forward width of " << SLDist
3097 << " bits";
3098 }
3099 if (PtrRtChecking->Need)
3100 OS << " with run-time checks";
3101 OS << "\n";
3102 }
3103
3104 if (HasConvergentOp)
3105 OS.indent(Depth) << "Has convergent operation in loop\n";
3106
3107 if (Report)
3108 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3109
3110 if (auto *Dependences = DepChecker->getDependences()) {
3111 OS.indent(Depth) << "Dependences:\n";
3112 for (const auto &Dep : *Dependences) {
3113 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3114 OS << "\n";
3115 }
3116 } else
3117 OS.indent(Depth) << "Too many dependences, not recorded\n";
3118
3119 // List the pair of accesses need run-time checks to prove independence.
3120 PtrRtChecking->print(OS, Depth);
3121 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3122 OS.indent(Depth) << "Generated run-time checks are incomplete\n";
3123 OS << "\n";
3124
3125 OS.indent(Depth)
3126 << "Non vectorizable stores to invariant address were "
3127 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3128 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3129 ? ""
3130 : "not ")
3131 << "found in loop.\n";
3132
3133 OS.indent(Depth) << "SCEV assumptions:\n";
3134 PSE->getPredicate().print(OS, Depth);
3135
3136 OS << "\n";
3137
3138 OS.indent(Depth) << "Expressions re-written:\n";
3139 PSE->print(OS, Depth);
3140}
3141
3143 bool AllowPartial) {
3144 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3145
3146 // We need to create the LoopAccessInfo if either we don't already have one,
3147 // or if it was created with a different value of AllowPartial.
3148 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3149 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3150 &LI, AC, AllowPartial);
3151
3152 return *It->second;
3153}
3155 // Collect LoopAccessInfo entries that may keep references to IR outside the
3156 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3157 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3158 // SCEVs, e.g. for pointer expressions.
3159 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3160 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3161 LAI->getPSE().getPredicate().isAlwaysTrue())
3162 continue;
3163 LoopAccessInfoMap.erase(L);
3164 }
3165}
3166
3168 Function &F, const PreservedAnalyses &PA,
3169 FunctionAnalysisManager::Invalidator &Inv) {
3170 // Check whether our analysis is preserved.
3171 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3172 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3173 // If not, give up now.
3174 return true;
3175
3176 // Check whether the analyses we depend on became invalid for any reason.
3177 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3178 // invalid.
3179 return Inv.invalidate<AAManager>(F, PA) ||
3180 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3181 Inv.invalidate<LoopAnalysis>(F, PA) ||
3182 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3183}
3184
3187 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3188 auto &AA = FAM.getResult<AAManager>(F);
3189 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3190 auto &LI = FAM.getResult<LoopAnalysis>(F);
3191 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3192 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3193 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
3194 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI, &AC);
3195}
3196
3197AnalysisKey LoopAccessAnalysis::Key;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, const DominatorTree &DT, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
APInt abs() const
Get the absolute value.
Definition APInt.h:1804
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1052
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1583
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isNegative() const
Definition Constants.h:214
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
bool empty() const
Definition Function.h:857
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static LLVM_ABI bool blockNeedsPredication(const BasicBlock *BB, const Loop *TheLoop, const DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:823
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:888
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:650
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:367
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2016
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
TargetTransformInfo TTI
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DominatorTree &DT, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition Metadata.h:784
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:778
MDNode * NoAlias
The tag specifying the noalias scope.
Definition Metadata.h:787
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1437