LLVM 23.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <variant>
67#include <vector>
68
69using namespace llvm;
70using namespace llvm::SCEVPatternMatch;
71
72#define DEBUG_TYPE "loop-accesses"
73
75VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
79
81VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
87
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
94
95/// The maximum iterations used to merge memory checks
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
100 cl::init(100));
101
102/// Maximum SIMD width.
103const unsigned VectorizerParams::MaxVectorWidth = 64;
104
105/// We collect dependences up to this threshold.
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
110 cl::init(100));
111
112/// This enables versioning on the strides of symbolically striding memory
113/// accesses in code like the following.
114/// for (i = 0; i < N; ++i)
115/// A[i * Stride1] += B[i * Stride2] ...
116///
117/// Will be roughly translated to
118/// if (Stride1 == 1 && Stride2 == 1) {
119/// for (i = 0; i < N; i+=4)
120/// A[i:i+3] += ...
121/// } else
122/// ...
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
126
127/// Enable store-to-load forwarding conflict detection. This option can
128/// be disabled for correctness testing.
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
132 cl::init(true));
133
135 "max-forked-scev-depth", cl::Hidden,
136 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
137 cl::init(5));
138
140 "laa-speculate-unit-stride", cl::Hidden,
141 cl::desc("Speculate that non-constant strides are unit in LAA"),
142 cl::init(true));
143
145 "hoist-runtime-checks", cl::Hidden,
146 cl::desc(
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
150
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
153}
154
156 const DenseMap<Value *, const SCEV *> &PtrToStride,
157 Value *Ptr) {
158 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
159
160 // If there is an entry in the map return the SCEV of the pointer with the
161 // symbolic stride replaced by one.
162 const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
163 if (!StrideSCEV)
164 // For a non-symbolic stride, just return the original expression.
165 return OrigSCEV;
166
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
194/// return nullptr. \p A and \p B must have the same type.
195static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
196 ScalarEvolution &SE) {
197 if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
198 return nullptr;
199 return SE.getAddExpr(A, B);
200}
201
202/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
203/// return nullptr. \p A and \p B must have the same type.
204static const SCEV *mulSCEVNoOverflow(const SCEV *A, const SCEV *B,
205 ScalarEvolution &SE) {
206 if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
207 return nullptr;
208 return SE.getMulExpr(A, B);
209}
210
211/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
212/// \p MaxBTC is guaranteed inbounds of the accessed object.
214 const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
216 AssumptionCache *AC,
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
218 auto *PointerBase = SE.getPointerBase(AR->getStart());
219 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
220 if (!StartPtr)
221 return false;
222 const Loop *L = AR->getLoop();
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
225 uint64_t DerefBytes = StartPtrV->getPointerDereferenceableBytes(
226 DL, CheckForNonNull, CheckForFreed);
227
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
229 return false;
230
231 const SCEV *Step = AR->getStepRecurrence(SE);
232 Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
233 const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);
234
235 // Check if we have a suitable dereferencable assumption we can use.
236 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
237 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
238 if (isa<UncondBrInst, CondBrInst>(LoopPred->getTerminator()))
239 CtxI = LoopPred->getTerminator();
240 }
241 RetainedKnowledge DerefRK;
242 getKnowledgeForValue(StartPtrV, {Attribute::Dereferenceable}, *AC,
243 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
244 if (!isValidAssumeForContext(Assume, CtxI, DT))
245 return false;
246 if (StartPtrV->canBeFreed() &&
247 !willNotFreeBetween(Assume, CtxI))
248 return false;
249 DerefRK = std::max(DerefRK, RK);
250 return true;
251 });
252 if (DerefRK) {
253 const SCEV *DerefRKSCEV = SE.getSCEV(DerefRK.IRArgValue);
254 Type *CommonTy =
255 SE.getWiderType(DerefBytesSCEV->getType(), DerefRKSCEV->getType());
256 DerefBytesSCEV = SE.getNoopOrZeroExtend(DerefBytesSCEV, CommonTy);
257 DerefRKSCEV = SE.getNoopOrZeroExtend(DerefRKSCEV, CommonTy);
258 DerefBytesSCEV = SE.getUMaxExpr(DerefBytesSCEV, DerefRKSCEV);
259 }
260
261 if (DerefBytesSCEV->isZero())
262 return false;
263
264 bool IsKnownNonNegative = SE.isKnownNonNegative(Step);
265 if (!IsKnownNonNegative && !SE.isKnownNegative(Step))
266 return false;
267
268 Step = SE.getNoopOrSignExtend(Step, WiderTy);
269 MaxBTC = SE.getNoopOrZeroExtend(MaxBTC, WiderTy);
270
271 // For the computations below, make sure they don't unsigned wrap.
272 if (!SE.isKnownPredicate(CmpInst::ICMP_UGE, AR->getStart(), StartPtr))
273 return false;
274 const SCEV *StartOffset = SE.getNoopOrZeroExtend(
275 SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);
276
277 if (!LoopGuards)
278 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));
279 MaxBTC = SE.applyLoopGuards(MaxBTC, *LoopGuards);
280
281 const SCEV *OffsetAtLastIter =
282 mulSCEVNoOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
283 if (!OffsetAtLastIter) {
284 // Re-try with constant max backedge-taken count if using the symbolic one
285 // failed.
286 MaxBTC = SE.getConstantMaxBackedgeTakenCount(AR->getLoop());
287 if (isa<SCEVCouldNotCompute>(MaxBTC))
288 return false;
289 MaxBTC = SE.getNoopOrZeroExtend(
290 MaxBTC, WiderTy);
291 OffsetAtLastIter =
292 mulSCEVNoOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
293 if (!OffsetAtLastIter)
294 return false;
295 }
296
297 const SCEV *OffsetEndBytes = addSCEVNoOverflow(
298 OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
299 if (!OffsetEndBytes)
300 return false;
301
302 if (IsKnownNonNegative) {
303 // For positive steps, check if
304 // (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
305 // while making sure none of the computations unsigned wrap themselves.
306 const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
307 if (!EndBytes)
308 return false;
309
310 DerefBytesSCEV = SE.applyLoopGuards(DerefBytesSCEV, *LoopGuards);
311 return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
312 }
313
314 // For negative steps check if
315 // * StartOffset >= (MaxBTC * Step + EltSize)
316 // * StartOffset <= DerefBytes.
317 assert(SE.isKnownNegative(Step) && "must be known negative");
318 return SE.isKnownPredicate(CmpInst::ICMP_SGE, StartOffset, OffsetEndBytes) &&
319 SE.isKnownPredicate(CmpInst::ICMP_ULE, StartOffset, DerefBytesSCEV);
320}
321
322std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
323 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
324 const SCEV *MaxBTC, ScalarEvolution *SE,
325 DenseMap<std::pair<const SCEV *, const SCEV *>,
326 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
328 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
329 auto &DL = Lp->getHeader()->getDataLayout();
330 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
331 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
332
333 // Delegate to the SCEV-based overload, passing through the cache.
334 return getStartAndEndForAccess(Lp, PtrExpr, EltSizeSCEV, BTC, MaxBTC, SE,
335 PointerBounds, DT, AC, LoopGuards);
336}
337
338std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
339 const Loop *Lp, const SCEV *PtrExpr, const SCEV *EltSizeSCEV,
340 const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE,
341 DenseMap<std::pair<const SCEV *, const SCEV *>,
342 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
344 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
345 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
346 if (PointerBounds) {
347 auto [Iter, Ins] = PointerBounds->insert(
348 {{PtrExpr, EltSizeSCEV},
349 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
350 if (!Ins)
351 return Iter->second;
352 PtrBoundsPair = &Iter->second;
353 }
354
355 const SCEV *ScStart;
356 const SCEV *ScEnd;
357
358 auto &DL = Lp->getHeader()->getDataLayout();
359 if (SE->isLoopInvariant(PtrExpr, Lp)) {
360 ScStart = ScEnd = PtrExpr;
361 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
362 ScStart = AR->getStart();
363 if (!isa<SCEVCouldNotCompute>(BTC))
364 // Evaluating AR at an exact BTC is safe: LAA separately checks that
365 // accesses cannot wrap in the loop. If evaluating AR at BTC wraps, then
366 // the loop either triggers UB when executing a memory access with a
367 // poison pointer or the wrapping/poisoned pointer is not used.
368 ScEnd = AR->evaluateAtIteration(BTC, *SE);
369 else {
370 // Evaluating AR at MaxBTC may wrap and create an expression that is less
371 // than the start of the AddRec due to wrapping (for example consider
372 // MaxBTC = -2). If that's the case, set ScEnd to -(EltSize + 1). ScEnd
373 // will get incremented by EltSize before returning, so this effectively
374 // sets ScEnd to the maximum unsigned value for the type. Note that LAA
375 // separately checks that accesses cannot not wrap, so unsigned max
376 // represents an upper bound.
377 if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
378 DT, AC, LoopGuards)) {
379 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
380 } else {
381 ScEnd = SE->getAddExpr(
382 SE->getNegativeSCEV(EltSizeSCEV),
385 AR->getType())));
386 }
387 }
388 const SCEV *Step = AR->getStepRecurrence(*SE);
389
390 // For expressions with negative step, the upper bound is ScStart and the
391 // lower bound is ScEnd.
392 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
393 if (CStep->getValue()->isNegative())
394 std::swap(ScStart, ScEnd);
395 } else {
396 // Fallback case: the step is not constant, but we can still
397 // get the upper and lower bounds of the interval by using min/max
398 // expressions.
399 ScStart = SE->getUMinExpr(ScStart, ScEnd);
400 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
401 }
402 } else
403 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
404
405 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
406 assert(SE->isLoopInvariant(ScEnd, Lp) && "ScEnd needs to be invariant");
407
408 // Add the size of the pointed element to ScEnd.
409 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
410
411 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
412 if (PointerBounds)
413 *PtrBoundsPair = Res;
414 return Res;
415}
416
417/// Calculate Start and End points of memory access using
418/// getStartAndEndForAccess.
419void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
420 Type *AccessTy, bool WritePtr,
421 unsigned DepSetId, unsigned ASId,
423 bool NeedsFreeze) {
424 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
425 const SCEV *BTC = PSE.getBackedgeTakenCount();
426 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
427 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
428 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
430 !isa<SCEVCouldNotCompute>(ScEnd) &&
431 "must be able to compute both start and end expressions");
432 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
433 NeedsFreeze);
434}
435
436bool RuntimePointerChecking::tryToCreateDiffCheck(
437 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
438 // If either group contains multiple different pointers, bail out.
439 // TODO: Support multiple pointers by using the minimum or maximum pointer,
440 // depending on src & sink.
441 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
442 return false;
443
444 const PointerInfo *Src = &Pointers[CGI.Members[0]];
445 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
446
447 // If either pointer is read and written, multiple checks may be needed. Bail
448 // out.
449 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
450 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
451 return false;
452
453 ArrayRef<unsigned> AccSrc =
454 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
455 ArrayRef<unsigned> AccSink =
456 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
457 // If either pointer is accessed multiple times, there may not be a clear
458 // src/sink relation. Bail out for now.
459 if (AccSrc.size() != 1 || AccSink.size() != 1)
460 return false;
461
462 // If the sink is accessed before src, swap src/sink.
463 if (AccSink[0] < AccSrc[0])
464 std::swap(Src, Sink);
465
466 const SCEVConstant *Step;
467 const SCEV *SrcStart;
468 const SCEV *SinkStart;
469 const Loop *InnerLoop = DC.getInnermostLoop();
470 if (!match(Src->Expr,
472 m_SpecificLoop(InnerLoop))) ||
473 !match(Sink->Expr,
475 m_SpecificLoop(InnerLoop))))
476 return false;
477
479 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
481 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
482 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
483 Type *DstTy = getLoadStoreType(SinkInsts[0]);
485 return false;
486
487 const DataLayout &DL = InnerLoop->getHeader()->getDataLayout();
488 unsigned AllocSize =
489 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
490
491 // Only matching constant steps matching the AllocSize are supported at the
492 // moment. This simplifies the difference computation. Can be extended in the
493 // future.
494 if (Step->getAPInt().abs() != AllocSize)
495 return false;
496
497 // When counting down, the dependence distance needs to be swapped.
498 if (Step->getValue()->isNegative())
499 std::swap(SinkStart, SrcStart);
500
501 const SCEV *SinkStartInt = SE->getPtrToAddrExpr(SinkStart);
502 const SCEV *SrcStartInt = SE->getPtrToAddrExpr(SrcStart);
503 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
504 isa<SCEVCouldNotCompute>(SrcStartInt))
505 return false;
506
507 // If the start values for both Src and Sink also vary according to an outer
508 // loop, then it's probably better to avoid creating diff checks because
509 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
510 // do the expanded full range overlap checks, which can be hoisted.
511 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
512 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
513 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
514 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
515 const Loop *StartARLoop = SrcStartAR->getLoop();
516 if (StartARLoop == SinkStartAR->getLoop() &&
517 StartARLoop == InnerLoop->getParentLoop() &&
518 // If the diff check would already be loop invariant (due to the
519 // recurrences being the same), then we prefer to keep the diff checks
520 // because they are cheaper.
521 SrcStartAR->getStepRecurrence(*SE) !=
522 SinkStartAR->getStepRecurrence(*SE)) {
523 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
524 "cannot be hoisted out of the outer loop\n");
525 return false;
526 }
527 }
528
529 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
530 << "SrcStart: " << *SrcStartInt << '\n'
531 << "SinkStartInt: " << *SinkStartInt << '\n');
532 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
533 Src->NeedsFreeze || Sink->NeedsFreeze);
534 return true;
535}
536
538 SmallVector<RuntimePointerCheck, 4> Checks;
539
540 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
541 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
544
545 if (needsChecking(CGI, CGJ)) {
546 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
547 Checks.emplace_back(&CGI, &CGJ);
548 }
549 }
550 }
551 return Checks;
552}
553
556 assert(Checks.empty() && "Checks is not empty");
557 groupChecks(DepCands);
558 Checks = generateChecks();
559}
560
562 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
563 for (const auto &I : M.Members)
564 for (const auto &J : N.Members)
565 if (needsChecking(I, J))
566 return true;
567 return false;
568}
569
570/// Compare \p I and \p J and return the minimum.
571/// Return nullptr in case we couldn't find an answer.
572static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
573 ScalarEvolution *SE) {
574 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
575 if (!Diff)
576 return nullptr;
577 return Diff->isNegative() ? J : I;
578}
579
581 unsigned Index, const RuntimePointerChecking &RtCheck) {
582 return addPointer(
583 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
584 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
585 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
586}
587
588bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
589 const SCEV *End, unsigned AS,
590 bool NeedsFreeze,
591 ScalarEvolution &SE) {
592 assert(AddressSpace == AS &&
593 "all pointers in a checking group must be in the same address space");
594
595 // Compare the starts and ends with the known minimum and maximum
596 // of this set. We need to know how we compare against the min/max
597 // of the set in order to be able to emit memchecks.
598 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
599 if (!Min0)
600 return false;
601
602 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
603 if (!Min1)
604 return false;
605
606 // Update the low bound expression if we've found a new min value.
607 if (Min0 == Start)
608 Low = Start;
609
610 // Update the high bound expression if we've found a new max value.
611 if (Min1 != End)
612 High = End;
613
614 Members.push_back(Index);
615 this->NeedsFreeze |= NeedsFreeze;
616 return true;
617}
618
619void RuntimePointerChecking::groupChecks(
621 // We build the groups from dependency candidates equivalence classes
622 // because:
623 // - We know that pointers in the same equivalence class share
624 // the same underlying object and therefore there is a chance
625 // that we can compare pointers
626 // - We wouldn't be able to merge two pointers for which we need
627 // to emit a memcheck. The classes in DepCands are already
628 // conveniently built such that no two pointers in the same
629 // class need checking against each other.
630
631 // We use the following (greedy) algorithm to construct the groups
632 // For every pointer in the equivalence class:
633 // For each existing group:
634 // - if the difference between this pointer and the min/max bounds
635 // of the group is a constant, then make the pointer part of the
636 // group and update the min/max bounds of that group as required.
637
638 CheckingGroups.clear();
639
640 // If we need to check two pointers to the same underlying object
641 // with a non-constant difference, we shouldn't perform any pointer
642 // grouping with those pointers. This is because we can easily get
643 // into cases where the resulting check would return false, even when
644 // the accesses are safe.
645 //
646 // The following example shows this:
647 // for (i = 0; i < 1000; ++i)
648 // a[5000 + i * m] = a[i] + a[i + 9000]
649 //
650 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
651 // (0, 10000) which is always false. However, if m is 1, there is no
652 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
653 // us to perform an accurate check in this case.
654 //
655 // In the above case, we have a non-constant distance and an Unknown
656 // dependence between accesses to the same underlying object, and could retry
657 // with runtime checks without dependency information being available. In this
658 // case we will use the fallback path and create separate checking groups for
659 // accesses not present in DepCands.
660
661 unsigned TotalComparisons = 0;
662
664 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
665 PositionMap[Pointers[Index].PointerValue].push_back(Index);
666
667 // We need to keep track of what pointers we've already seen so we
668 // don't process them twice.
670
671 // Go through all equivalence classes, get the "pointer check groups"
672 // and add them to the overall solution. We use the order in which accesses
673 // appear in 'Pointers' to enforce determinism.
674 for (unsigned I = 0; I < Pointers.size(); ++I) {
675 // We've seen this pointer before, and therefore already processed
676 // its equivalence class.
677 if (Seen.contains(I))
678 continue;
679
681 Pointers[I].IsWritePtr);
682
683 // If there is no entry in the dependency partition, there are no potential
684 // accesses to merge; simply add a new pointer checking group.
685 if (!DepCands.contains(Access)) {
686 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
687 continue;
688 }
689
691
692 // Because DepCands is constructed by visiting accesses in the order in
693 // which they appear in alias sets (which is deterministic) and the
694 // iteration order within an equivalence class member is only dependent on
695 // the order in which unions and insertions are performed on the
696 // equivalence class, the iteration order is deterministic.
697 for (auto M : DepCands.members(Access)) {
698 auto PointerI = PositionMap.find(M.getPointer());
699 // If we can't find the pointer in PositionMap that means we can't
700 // generate a memcheck for it.
701 if (PointerI == PositionMap.end())
702 continue;
703 for (unsigned Pointer : PointerI->second) {
704 bool Merged = false;
705 // Mark this pointer as seen.
706 Seen.insert(Pointer);
707
708 // Go through all the existing sets and see if we can find one
709 // which can include this pointer.
710 for (RuntimeCheckingPtrGroup &Group : Groups) {
711 // Don't perform more than a certain amount of comparisons.
712 // This should limit the cost of grouping the pointers to something
713 // reasonable. If we do end up hitting this threshold, the algorithm
714 // will create separate groups for all remaining pointers.
715 if (TotalComparisons > MemoryCheckMergeThreshold)
716 break;
717
718 TotalComparisons++;
719
720 if (Group.addPointer(Pointer, *this)) {
721 Merged = true;
722 break;
723 }
724 }
725
726 if (!Merged)
727 // We couldn't add this pointer to any existing set or the threshold
728 // for the number of comparisons has been reached. Create a new group
729 // to hold the current pointer.
730 Groups.emplace_back(Pointer, *this);
731 }
732 }
733
734 // We've computed the grouped checks for this partition.
735 // Save the results and continue with the next one.
737 }
738}
739
741 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
742 unsigned PtrIdx2) {
743 return (PtrToPartition[PtrIdx1] != -1 &&
744 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
745}
746
747bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
748 const PointerInfo &PointerI = Pointers[I];
749 const PointerInfo &PointerJ = Pointers[J];
750
751 // No need to check if two readonly pointers intersect.
752 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
753 return false;
754
755 // Only need to check pointers between two different dependency sets.
756 if (PointerI.DependencySetId == PointerJ.DependencySetId)
757 return false;
758
759 // Only need to check pointers in the same alias set.
760 return PointerI.AliasSetId == PointerJ.AliasSetId;
761}
762
763/// Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
767 for (const auto &[Idx, CG] : enumerate(CheckingGroups))
768 PtrIndices[&CG] = Idx;
769 return PtrIndices;
770}
771
774 unsigned Depth) const {
775 unsigned N = 0;
776 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
777 for (const auto &[Check1, Check2] : Checks) {
778 const auto &First = Check1->Members, &Second = Check2->Members;
779 OS.indent(Depth) << "Check " << N++ << ":\n";
780 OS.indent(Depth + 2) << "Comparing group GRP" << PtrIndices.at(Check1)
781 << ":\n";
782 for (unsigned K : First)
783 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
784 OS.indent(Depth + 2) << "Against group GRP" << PtrIndices.at(Check2)
785 << ":\n";
786 for (unsigned K : Second)
787 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
788 }
789}
790
792
793 OS.indent(Depth) << "Run-time memory checks:\n";
794 printChecks(OS, Checks, Depth);
795
796 OS.indent(Depth) << "Grouped accesses:\n";
797 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
798 for (const auto &CG : CheckingGroups) {
799 OS.indent(Depth + 2) << "Group GRP" << PtrIndices.at(&CG) << ":\n";
800 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
801 << ")\n";
802 for (unsigned Member : CG.Members) {
803 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
804 }
805 }
806}
807
808namespace {
809
810/// Analyses memory accesses in a loop.
811///
812/// Checks whether run time pointer checks are needed and builds sets for data
813/// dependence checking.
814class AccessAnalysis {
815public:
816 using MemAccessInfo =
817 PointerIntPair<Value * /* AccessPtr */, 1, bool /* IsWrite */>;
818
819 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
822 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
823 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DT(DT), DepCands(DA),
824 PSE(PSE), LoopAliasScopes(LoopAliasScopes) {
825 // We're analyzing dependences across loop iterations.
826 BAA.enableCrossIterationMode();
827 }
828
829 /// Register a load and whether it is only read from.
830 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
831 Value *Ptr = const_cast<Value *>(Loc.Ptr);
832 AST.add(adjustLoc(Loc));
833 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
834 if (IsReadOnly)
835 ReadOnlyPtr.insert(Ptr);
836 }
837
838 /// Register a store.
839 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
840 Value *Ptr = const_cast<Value *>(Loc.Ptr);
841 AST.add(adjustLoc(Loc));
842 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
843 }
844
845 /// Check if we can emit a run-time no-alias check for \p Access.
846 ///
847 /// Returns true if we can emit a run-time no alias check for \p Access.
848 /// If we can check this access, this also adds it to a dependence set and
849 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
850 /// we will attempt to use additional run-time checks in order to get
851 /// the bounds of the pointer.
852 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
853 MemAccessInfo Access, Type *AccessTy,
854 const DenseMap<Value *, const SCEV *> &Strides,
855 DenseMap<Value *, unsigned> &DepSetId,
856 Loop *TheLoop, unsigned &RunningDepId,
857 unsigned ASId, bool Assume);
858
859 /// Check whether we can check the pointers at runtime for
860 /// non-intersection.
861 ///
862 /// Returns true if we need no check or if we do and we can generate them
863 /// (i.e. the pointers have computable bounds). A return value of false means
864 /// we couldn't analyze and generate runtime checks for all pointers in the
865 /// loop, but if \p AllowPartial is set then we will have checks for those
866 /// pointers we could analyze. \p DepChecker is used to remove unknown
867 /// dependences from DepCands.
868 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
869 const DenseMap<Value *, const SCEV *> &Strides,
870 Value *&UncomputablePtr, bool AllowPartial,
871 const MemoryDepChecker &DepChecker);
872
873 /// Goes over all memory accesses, checks whether a RT check is needed
874 /// and builds sets of dependent accesses.
875 void buildDependenceSets();
876
877 /// Initial processing of memory accesses determined that we need to
878 /// perform dependency checking.
879 ///
880 /// Note that this can later be cleared if we retry memcheck analysis without
881 /// dependency checking (i.e. ShouldRetryWithRuntimeChecks).
882 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
883
884 /// We decided that no dependence analysis would be used. Reset the state.
885 void resetDepChecks(MemoryDepChecker &DepChecker) {
886 CheckDeps.clear();
887 DepChecker.clearDependences();
888 }
889
890 ArrayRef<MemAccessInfo> getDependenciesToCheck() const { return CheckDeps; }
891
892private:
893 using PtrAccessMap = MapVector<MemAccessInfo, SmallSetVector<Type *, 1>>;
894
895 /// Adjust the MemoryLocation so that it represents accesses to this
896 /// location across all iterations, rather than a single one.
897 MemoryLocation adjustLoc(MemoryLocation Loc) const {
898 // The accessed location varies within the loop, but remains within the
899 // underlying object.
901 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
902 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
903 return Loc;
904 }
905
906 /// Drop alias scopes that are only valid within a single loop iteration.
907 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
908 if (!ScopeList)
909 return nullptr;
910
911 // For the sake of simplicity, drop the whole scope list if any scope is
912 // iteration-local.
913 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
914 return LoopAliasScopes.contains(cast<MDNode>(Scope));
915 }))
916 return nullptr;
917
918 return ScopeList;
919 }
920
921 /// Map of all accesses. Values are the types used to access memory pointed to
922 /// by the pointer.
923 PtrAccessMap Accesses;
924
925 /// The loop being checked.
926 const Loop *TheLoop;
927
928 /// List of accesses that need a further dependence check.
930
931 /// Set of pointers that are read only.
932 SmallPtrSet<Value*, 16> ReadOnlyPtr;
933
934 /// Batched alias analysis results.
935 BatchAAResults BAA;
936
937 /// An alias set tracker to partition the access set by underlying object and
938 //intrinsic property (such as TBAA metadata).
939 AliasSetTracker AST;
940
941 /// The LoopInfo of the loop being checked.
942 const LoopInfo *LI;
943
944 /// The dominator tree of the function.
945 DominatorTree &DT;
946
947 /// Sets of potentially dependent accesses - members of one set share an
948 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
949 /// dependence check.
951
952 /// Initial processing of memory accesses determined that we may need
953 /// to add memchecks. Perform the analysis to determine the necessary checks.
954 ///
955 /// Note that, this is different from isDependencyCheckNeeded. When we retry
956 /// memcheck analysis without dependency checking
957 /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is
958 /// cleared while this remains set if we have potentially dependent accesses.
959 bool IsRTCheckAnalysisNeeded = false;
960
961 /// The SCEV predicate containing all the SCEV-related assumptions.
962 PredicatedScalarEvolution &PSE;
963
964 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
965
966 /// Alias scopes that are declared inside the loop, and as such not valid
967 /// across iterations.
968 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
969};
970
971} // end anonymous namespace
972
973/// Try to compute a constant stride for \p AR. Used by getPtrStride and
974/// isNoWrap.
975static std::optional<int64_t>
976getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
977 Value *Ptr, PredicatedScalarEvolution &PSE) {
978 if (isa<ScalableVectorType>(AccessTy)) {
979 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
980 << "\n");
981 return std::nullopt;
982 }
983
984 // The access function must stride over the innermost loop.
985 if (Lp != AR->getLoop()) {
986 LLVM_DEBUG({
987 dbgs() << "LAA: Bad stride - Not striding over innermost loop ";
988 if (Ptr)
989 dbgs() << *Ptr << " ";
990
991 dbgs() << "SCEV: " << *AR << "\n";
992 });
993 return std::nullopt;
994 }
995
996 // Check the step is constant.
997 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
998
999 // Calculate the pointer stride and check if it is constant.
1000 const APInt *APStepVal;
1001 if (!match(Step, m_scev_APInt(APStepVal))) {
1002 LLVM_DEBUG({
1003 dbgs() << "LAA: Bad stride - Not a constant strided ";
1004 if (Ptr)
1005 dbgs() << *Ptr << " ";
1006 dbgs() << "SCEV: " << *AR << "\n";
1007 });
1008 return std::nullopt;
1009 }
1010
1011 const auto &DL = Lp->getHeader()->getDataLayout();
1012 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1013 int64_t Size = AllocSize.getFixedValue();
1014
1015 // Huge step value - give up.
1016 std::optional<int64_t> StepVal = APStepVal->trySExtValue();
1017 if (!StepVal)
1018 return std::nullopt;
1019
1020 // Strided access.
1021 return *StepVal % Size ? std::nullopt : std::make_optional(*StepVal / Size);
1022}
1023
1024/// Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
1025/// informating from the IR pointer value to determine no-wrap.
1027 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
1028 const DominatorTree &DT,
1029 std::optional<int64_t> Stride = std::nullopt) {
1030 // FIXME: This should probably only return true for NUW.
1032 return true;
1033
1035 return true;
1036
1037 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1038 // the distance between the previously accessed location and the wrapped
1039 // location will be larger than half the pointer index type space. In that
1040 // case, the GEP would be poison and any memory access dependent on it would
1041 // be immediate UB when executed.
1043 GEP && GEP->hasNoUnsignedSignedWrap()) {
1044 // For the above reasoning to apply, the pointer must be dereferenced in
1045 // every iteration.
1046 if (L->getHeader() == L->getLoopLatch() ||
1047 any_of(GEP->users(), [L, &DT, GEP](User *U) {
1048 if (getLoadStorePointerOperand(U) != GEP)
1049 return false;
1050 BasicBlock *UserBB = cast<Instruction>(U)->getParent();
1051 if (!L->contains(UserBB))
1052 return false;
1053 return !LoopAccessInfo::blockNeedsPredication(UserBB, L, &DT);
1054 }))
1055 return true;
1056 }
1057
1058 if (!Stride)
1059 Stride = getStrideFromAddRec(AR, L, AccessTy, Ptr, PSE);
1060 if (Stride) {
1061 // If the null pointer is undefined, then a access sequence which would
1062 // otherwise access it can be assumed not to unsigned wrap. Note that this
1063 // assumes the object in memory is aligned to the natural alignment.
1064 unsigned AddrSpace = AR->getType()->getPointerAddressSpace();
1065 if (!NullPointerIsDefined(L->getHeader()->getParent(), AddrSpace) &&
1066 (Stride == 1 || Stride == -1))
1067 return true;
1068 }
1069
1070 if (Ptr && Assume) {
1072 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1073 << "LAA: Pointer: " << *Ptr << "\n"
1074 << "LAA: SCEV: " << *AR << "\n"
1075 << "LAA: Added an overflow assumption\n");
1076 return true;
1077 }
1078
1079 return false;
1080}
1081
1082static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
1083 function_ref<void(Value *)> AddPointer) {
1085 SmallVector<Value *> WorkList;
1086 WorkList.push_back(StartPtr);
1087
1088 while (!WorkList.empty()) {
1089 Value *Ptr = WorkList.pop_back_val();
1090 if (!Visited.insert(Ptr).second)
1091 continue;
1092 auto *PN = dyn_cast<PHINode>(Ptr);
1093 // SCEV does not look through non-header PHIs inside the loop. Such phis
1094 // can be analyzed by adding separate accesses for each incoming pointer
1095 // value.
1096 if (PN && InnermostLoop.contains(PN->getParent()) &&
1097 PN->getParent() != InnermostLoop.getHeader()) {
1098 llvm::append_range(WorkList, PN->incoming_values());
1099 } else
1100 AddPointer(Ptr);
1101 }
1102}
1103
1104// Walk back through the IR for a pointer, looking for a select like the
1105// following:
1106//
1107// %offset = select i1 %cmp, i64 %a, i64 %b
1108// %addr = getelementptr double, double* %base, i64 %offset
1109// %ld = load double, double* %addr, align 8
1110//
1111// We won't be able to form a single SCEVAddRecExpr from this since the
1112// address for each loop iteration depends on %cmp. We could potentially
1113// produce multiple valid SCEVAddRecExprs, though, and check all of them for
1114// memory safety/aliasing if needed.
1115//
1116// If we encounter some IR we don't yet handle, or something obviously fine
1117// like a constant, then we just add the SCEV for that term to the list passed
1118// in by the caller. If we have a node that may potentially yield a valid
1119// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
1120// ourselves before adding to the list.
1122 ScalarEvolution *SE, const Loop *L, Value *Ptr,
1124 unsigned Depth) {
1125 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
1126 // we've exceeded our limit on recursion, just return whatever we have
1127 // regardless of whether it can be used for a forked pointer or not, along
1128 // with an indication of whether it might be a poison or undef value.
1129 const SCEV *Scev = SE->getSCEV(Ptr);
1130 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
1131 !isa<Instruction>(Ptr) || Depth == 0) {
1132 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1133 return;
1134 }
1135
1136 Depth--;
1137
1138 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
1139 return get<1>(S);
1140 };
1141
1142 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
1143 switch (Opcode) {
1144 case Instruction::Add:
1145 return SE->getAddExpr(L, R);
1146 case Instruction::Sub:
1147 return SE->getMinusSCEV(L, R);
1148 default:
1149 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
1150 }
1151 };
1152
1154 unsigned Opcode = I->getOpcode();
1155 switch (Opcode) {
1156 case Instruction::GetElementPtr: {
1157 auto *GEP = cast<GetElementPtrInst>(I);
1158 Type *SourceTy = GEP->getSourceElementType();
1159 // We only handle base + single offset GEPs here for now.
1160 // Not dealing with preexisting gathers yet, so no vectors.
1161 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
1162 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
1163 break;
1164 }
1167 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
1168 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
1169
1170 // See if we need to freeze our fork...
1171 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
1172 any_of(OffsetScevs, UndefPoisonCheck);
1173
1174 // Check that we only have a single fork, on either the base or the offset.
1175 // Copy the SCEV across for the one without a fork in order to generate
1176 // the full SCEV for both sides of the GEP.
1177 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
1178 BaseScevs.push_back(BaseScevs[0]);
1179 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
1180 OffsetScevs.push_back(OffsetScevs[0]);
1181 else {
1182 ScevList.emplace_back(Scev, NeedsFreeze);
1183 break;
1184 }
1185
1186 Type *IntPtrTy = SE->getEffectiveSCEVType(GEP->getPointerOperandType());
1187
1188 // Find the size of the type being pointed to. We only have a single
1189 // index term (guarded above) so we don't need to index into arrays or
1190 // structures, just get the size of the scalar value.
1191 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
1192
1193 for (auto [B, O] : zip(BaseScevs, OffsetScevs)) {
1194 const SCEV *Base = get<0>(B);
1195 const SCEV *Offset = get<0>(O);
1196
1197 // Scale up the offsets by the size of the type, then add to the bases.
1198 const SCEV *Scaled =
1199 SE->getMulExpr(Size, SE->getTruncateOrSignExtend(Offset, IntPtrTy));
1200 ScevList.emplace_back(SE->getAddExpr(Base, Scaled), NeedsFreeze);
1201 }
1202 break;
1203 }
1204 case Instruction::Select: {
1206 // A select means we've found a forked pointer, but we currently only
1207 // support a single select per pointer so if there's another behind this
1208 // then we just bail out and return the generic SCEV.
1209 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1210 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
1211 if (ChildScevs.size() == 2)
1212 append_range(ScevList, ChildScevs);
1213 else
1214 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1215 break;
1216 }
1217 case Instruction::PHI: {
1219 // A phi means we've found a forked pointer, but we currently only
1220 // support a single phi per pointer so if there's another behind this
1221 // then we just bail out and return the generic SCEV.
1222 if (I->getNumOperands() == 2) {
1223 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
1224 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1225 }
1226 if (ChildScevs.size() == 2)
1227 append_range(ScevList, ChildScevs);
1228 else
1229 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1230 break;
1231 }
1232 case Instruction::Add:
1233 case Instruction::Sub: {
1236 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1237 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1238
1239 // See if we need to freeze our fork...
1240 bool NeedsFreeze =
1241 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1242
1243 // Check that we only have a single fork, on either the left or right side.
1244 // Copy the SCEV across for the one without a fork in order to generate
1245 // the full SCEV for both sides of the BinOp.
1246 if (LScevs.size() == 2 && RScevs.size() == 1)
1247 RScevs.push_back(RScevs[0]);
1248 else if (RScevs.size() == 2 && LScevs.size() == 1)
1249 LScevs.push_back(LScevs[0]);
1250 else {
1251 ScevList.emplace_back(Scev, NeedsFreeze);
1252 break;
1253 }
1254
1255 for (auto [L, R] : zip(LScevs, RScevs))
1256 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1257 NeedsFreeze);
1258 break;
1259 }
1260 default:
1261 // Just return the current SCEV if we haven't handled the instruction yet.
1262 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1263 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1264 break;
1265 }
1266}
1267
1268bool AccessAnalysis::createCheckForAccess(
1269 RuntimePointerChecking &RtCheck, MemAccessInfo Access, Type *AccessTy,
1270 const DenseMap<Value *, const SCEV *> &StridesMap,
1271 DenseMap<Value *, unsigned> &DepSetId, Loop *TheLoop,
1272 unsigned &RunningDepId, unsigned ASId, bool Assume) {
1273 Value *Ptr = Access.getPointer();
1274 ScalarEvolution *SE = PSE.getSE();
1275 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1276
1278 findForkedSCEVs(SE, TheLoop, Ptr, RTCheckPtrs, MaxForkedSCEVDepth);
1279 assert(!RTCheckPtrs.empty() &&
1280 "Must have some runtime-check pointer candidates");
1281
1282 // RTCheckPtrs must have size 2 if there are forked pointers. Otherwise, there
1283 // are no forked pointers; replaceSymbolicStridesSCEV in this case.
1284 auto IsLoopInvariantOrAR =
1285 [&SE, &TheLoop](const PointerIntPair<const SCEV *, 1, bool> &P) {
1286 return SE->isLoopInvariant(P.getPointer(), TheLoop) ||
1287 isa<SCEVAddRecExpr>(P.getPointer());
1288 };
1289 if (RTCheckPtrs.size() == 2 && all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1290 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n";
1291 for (const auto &[Idx, Q] : enumerate(RTCheckPtrs)) dbgs()
1292 << "\t(" << Idx << ") " << *Q.getPointer() << "\n");
1293 } else {
1294 RTCheckPtrs = {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1295 }
1296
1297 /// Check whether all pointers can participate in a runtime bounds check. They
1298 /// must either be invariant or non-wrapping affine AddRecs.
1299 for (auto &P : RTCheckPtrs) {
1300 // The bounds for loop-invariant pointer is trivial.
1301 if (SE->isLoopInvariant(P.getPointer(), TheLoop))
1302 continue;
1303
1304 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(P.getPointer());
1305 if (!AR && Assume)
1306 AR = PSE.getAsAddRec(Ptr);
1307 if (!AR || !AR->isAffine())
1308 return false;
1309
1310 // If there's only one option for Ptr, look it up after bounds and wrap
1311 // checking, because assumptions might have been added to PSE.
1312 if (RTCheckPtrs.size() == 1) {
1313 AR =
1314 cast<SCEVAddRecExpr>(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr));
1315 P.setPointer(AR);
1316 }
1317
1318 if (!isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr : nullptr, AccessTy,
1319 TheLoop, Assume, DT))
1320 return false;
1321 }
1322
1323 for (const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1324 // The id of the dependence set.
1325 unsigned DepId;
1326
1327 if (DepCands.contains(Access)) {
1328 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1329 unsigned &LeaderId = DepSetId[Leader];
1330 if (!LeaderId)
1331 LeaderId = RunningDepId++;
1332 DepId = LeaderId;
1333 } else
1334 // Each access has its own dependence set.
1335 DepId = RunningDepId++;
1336
1337 bool IsWrite = Access.getInt();
1338 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1339 NeedsFreeze);
1340 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1341 }
1342
1343 return true;
1344}
1345
1346bool AccessAnalysis::canCheckPtrAtRT(
1347 RuntimePointerChecking &RtCheck, Loop *TheLoop,
1348 const DenseMap<Value *, const SCEV *> &StridesMap, Value *&UncomputablePtr,
1349 bool AllowPartial, const MemoryDepChecker &DepChecker) {
1350 // Find pointers with computable bounds. We are going to use this information
1351 // to place a runtime bound check.
1352 bool CanDoRT = true;
1353
1354 bool MayNeedRTCheck = false;
1355 if (!IsRTCheckAnalysisNeeded) return true;
1356
1357 if (auto *Deps = DepChecker.getDependences()) {
1358 // If there are unknown dependences, this means runtime checks are needed to
1359 // ensure there's no overlap between accesses to the same underlying object.
1360 // Remove the equivalence classes containing both source and destination
1361 // accesses from DepCands. This ensures runtime checks will be generated
1362 // between those accesses and prevents them from being grouped together.
1363 for (const auto &Dep : *Deps) {
1364 if (Dep.Type != MemoryDepChecker::Dependence::Unknown) {
1367 "Should only skip safe dependences");
1368 continue;
1369 }
1370 Instruction *Src = Dep.getSource(DepChecker);
1371 Instruction *Dst = Dep.getDestination(DepChecker);
1372 DepCands.eraseClass({getPointerOperand(Src), Src->mayWriteToMemory()});
1373 DepCands.eraseClass({getPointerOperand(Dst), Dst->mayWriteToMemory()});
1374 }
1375 } else {
1376 CheckDeps.clear();
1377 DepCands = {};
1378 }
1379
1380 // We assign a consecutive id to access from different alias sets.
1381 // Accesses between different groups doesn't need to be checked.
1382 unsigned ASId = 0;
1383 for (const auto &AS : AST) {
1384 int NumReadPtrChecks = 0;
1385 int NumWritePtrChecks = 0;
1386 bool CanDoAliasSetRT = true;
1387 ++ASId;
1388 auto ASPointers = AS.getPointers();
1389
1390 // We assign consecutive id to access from different dependence sets.
1391 // Accesses within the same set don't need a runtime check.
1392 unsigned RunningDepId = 1;
1394
1396
1397 // First, count how many write and read accesses are in the alias set. Also
1398 // collect MemAccessInfos for later.
1400 for (const Value *ConstPtr : ASPointers) {
1401 Value *Ptr = const_cast<Value *>(ConstPtr);
1402 bool IsWrite = Accesses.contains(MemAccessInfo(Ptr, true));
1403 if (IsWrite)
1404 ++NumWritePtrChecks;
1405 else
1406 ++NumReadPtrChecks;
1407 AccessInfos.emplace_back(Ptr, IsWrite);
1408 }
1409
1410 // We do not need runtime checks for this alias set, if there are no writes
1411 // or a single write and no reads.
1412 if (NumWritePtrChecks == 0 ||
1413 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1414 assert((ASPointers.size() <= 1 ||
1415 all_of(ASPointers,
1416 [this](const Value *Ptr) {
1417 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1418 true);
1419 return !DepCands.contains(AccessWrite);
1420 })) &&
1421 "Can only skip updating CanDoRT below, if all entries in AS "
1422 "are reads or there is at most 1 entry");
1423 continue;
1424 }
1425
1426 for (auto &Access : AccessInfos) {
1427 for (const auto &AccessTy : Accesses[Access]) {
1428 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1429 DepSetId, TheLoop, RunningDepId, ASId,
1430 false)) {
1431 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1432 << *Access.getPointer() << '\n');
1433 Retries.emplace_back(Access, AccessTy);
1434 CanDoAliasSetRT = false;
1435 }
1436 }
1437 }
1438
1439 // Note that this function computes CanDoRT and MayNeedRTCheck
1440 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1441 // we have a pointer for which we couldn't find the bounds but we don't
1442 // actually need to emit any checks so it does not matter.
1443 //
1444 // We need runtime checks for this alias set, if there are at least 2
1445 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1446 // any bound checks (because in that case the number of dependence sets is
1447 // incomplete).
1448 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1449
1450 // We need to perform run-time alias checks, but some pointers had bounds
1451 // that couldn't be checked.
1452 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1453 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1454 // We know that we need these checks, so we can now be more aggressive
1455 // and add further checks if required (overflow checks).
1456 CanDoAliasSetRT = true;
1457 for (const auto &[Access, AccessTy] : Retries) {
1458 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1459 DepSetId, TheLoop, RunningDepId, ASId,
1460 /*Assume=*/true)) {
1461 CanDoAliasSetRT = false;
1462 UncomputablePtr = Access.getPointer();
1463 if (!AllowPartial)
1464 break;
1465 }
1466 }
1467 }
1468
1469 CanDoRT &= CanDoAliasSetRT;
1470 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1471 ++ASId;
1472 }
1473
1474 // If the pointers that we would use for the bounds comparison have different
1475 // address spaces, assume the values aren't directly comparable, so we can't
1476 // use them for the runtime check. We also have to assume they could
1477 // overlap. In the future there should be metadata for whether address spaces
1478 // are disjoint.
1479 unsigned NumPointers = RtCheck.Pointers.size();
1480 for (unsigned i = 0; i < NumPointers; ++i) {
1481 for (unsigned j = i + 1; j < NumPointers; ++j) {
1482 // Only need to check pointers between two different dependency sets.
1483 if (RtCheck.Pointers[i].DependencySetId ==
1484 RtCheck.Pointers[j].DependencySetId)
1485 continue;
1486 // Only need to check pointers in the same alias set.
1487 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1488 continue;
1489
1490 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1491 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1492
1493 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1494 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1495 if (ASi != ASj) {
1496 LLVM_DEBUG(
1497 dbgs() << "LAA: Runtime check would require comparison between"
1498 " different address spaces\n");
1499 return false;
1500 }
1501 }
1502 }
1503
1504 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1505 RtCheck.generateChecks(DepCands);
1506
1507 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1508 << " pointer comparisons.\n");
1509
1510 // If we can do run-time checks, but there are no checks, no runtime checks
1511 // are needed. This can happen when all pointers point to the same underlying
1512 // object for example.
1513 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1514
1515 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1516 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1517 "CanDoRTIfNeeded depends on RtCheck.Need");
1518 if (!CanDoRTIfNeeded && !AllowPartial)
1519 RtCheck.reset();
1520 return CanDoRTIfNeeded;
1521}
1522
1523void AccessAnalysis::buildDependenceSets() {
1524 // We process the set twice: first we process read-write pointers, last we
1525 // process read-only pointers. This allows us to skip dependence tests for
1526 // read-only pointers.
1527
1528 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1529 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1530 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1531 LLVM_DEBUG({
1532 for (const auto &[A, _] : Accesses)
1533 dbgs() << "\t" << *A.getPointer() << " ("
1534 << (A.getInt()
1535 ? "write"
1536 : (ReadOnlyPtr.contains(A.getPointer()) ? "read-only"
1537 : "read"))
1538 << ")\n";
1539 });
1540
1541 // The AliasSetTracker has nicely partitioned our pointers by metadata
1542 // compatibility and potential for underlying-object overlap. As a result, we
1543 // only need to check for potential pointer dependencies within each alias
1544 // set.
1545 for (const auto &AS : AST) {
1546 bool AliasSetHasWrite = false;
1547
1548 // Map of (pointer to underlying objects, accessed address space) to last
1549 // access encountered.
1550 using UnderlyingObjToAccessMap =
1552 UnderlyingObjToAccessMap ObjToLastAccess;
1553
1554 // Set of access to check after all writes have been processed.
1555 PtrAccessMap DeferredAccesses;
1556
1557 // Iterate over each alias set twice, once to process read/write pointers,
1558 // and then to process read-only pointers.
1559
1560 auto ProcessAccesses = [&](bool UseDeferred) {
1561 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1562
1563 // Note that both the alias-set tracker and the alias sets themselves used
1564 // ordered collections internally and so the iteration order here is
1565 // deterministic.
1566 for (const Value *ConstPtr : AS.getPointers()) {
1567 Value *Ptr = const_cast<Value *>(ConstPtr);
1568
1569 // For a single memory access in AliasSetTracker, Accesses may contain
1570 // both read and write, and they both need to be handled for CheckDeps.
1571 for (auto [AccessPtr, IsWrite] : S.keys()) {
1572 if (AccessPtr != Ptr)
1573 continue;
1574
1575 // If we're using the deferred access set, then it contains only
1576 // reads.
1577 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1578 if (UseDeferred && !IsReadOnlyPtr)
1579 continue;
1580 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1581 // read or a write.
1582 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1583 S.contains(MemAccessInfo(Ptr, false))) &&
1584 "Alias-set pointer not in the access set?");
1585
1586 MemAccessInfo Access(Ptr, IsWrite);
1587 DepCands.insert(Access);
1588
1589 // Memorize read-only pointers for later processing and skip them in
1590 // the first round (they need to be checked after we have seen all
1591 // write pointers). Note: we also mark pointer that are not
1592 // consecutive as "read-only" pointers (so that we check
1593 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1594 if (!UseDeferred && IsReadOnlyPtr) {
1595 // We only use the pointer keys, the types vector values don't
1596 // matter.
1597 DeferredAccesses.insert({Access, {}});
1598 continue;
1599 }
1600
1601 // If this is a write - check other reads and writes for conflicts. If
1602 // this is a read only check other writes for conflicts (but only if
1603 // there is no other write to the ptr - this is an optimization to
1604 // catch "a[i] = a[i] + " without having to do a dependence check).
1605 if ((IsWrite || IsReadOnlyPtr) && AliasSetHasWrite) {
1606 CheckDeps.push_back(Access);
1607 IsRTCheckAnalysisNeeded = true;
1608 }
1609
1610 if (IsWrite)
1611 AliasSetHasWrite = true;
1612
1613 // Create sets of pointers connected by a shared alias set and
1614 // underlying object.
1615 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1616 UOs = {};
1617 ::getUnderlyingObjects(Ptr, UOs, LI);
1619 << "Underlying objects for pointer " << *Ptr << "\n");
1620 for (const Value *UnderlyingObj : UOs) {
1621 // nullptr never alias, don't join sets for pointer that have "null"
1622 // in their UnderlyingObjects list.
1623 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1625 TheLoop->getHeader()->getParent(),
1626 UnderlyingObj->getType()->getPointerAddressSpace()))
1627 continue;
1628
1629 auto [It, Inserted] = ObjToLastAccess.try_emplace(
1630 {UnderlyingObj,
1631 cast<PointerType>(Ptr->getType())->getAddressSpace()},
1632 Access);
1633 if (!Inserted) {
1634 DepCands.unionSets(Access, It->second);
1635 It->second = Access;
1636 }
1637
1638 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1639 }
1640 }
1641 }
1642 };
1643
1644 ProcessAccesses(false);
1645 ProcessAccesses(true);
1646 }
1647}
1648
1649/// Check whether the access through \p Ptr has a constant stride.
1650std::optional<int64_t>
1652 const Loop *Lp, const DominatorTree &DT,
1653 const DenseMap<Value *, const SCEV *> &StridesMap,
1654 bool Assume, bool ShouldCheckWrap) {
1655 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1656 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1657 return 0;
1658
1659 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
1660
1661 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1662 if (Assume && !AR)
1663 AR = PSE.getAsAddRec(Ptr);
1664
1665 if (!AR) {
1666 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1667 << " SCEV: " << *PtrScev << "\n");
1668 return std::nullopt;
1669 }
1670
1671 std::optional<int64_t> Stride =
1672 getStrideFromAddRec(AR, Lp, AccessTy, Ptr, PSE);
1673 if (!ShouldCheckWrap || !Stride)
1674 return Stride;
1675
1676 if (isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, DT, Stride))
1677 return Stride;
1678
1679 LLVM_DEBUG(
1680 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1681 << *Ptr << " SCEV: " << *AR << "\n");
1682 return std::nullopt;
1683}
1684
1685std::optional<int64_t> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1686 Type *ElemTyB, Value *PtrB,
1687 const DataLayout &DL,
1688 ScalarEvolution &SE,
1689 bool StrictCheck, bool CheckType) {
1690 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1691
1692 // Make sure that A and B are different pointers.
1693 if (PtrA == PtrB)
1694 return 0;
1695
1696 // Make sure that the element types are the same if required.
1697 if (CheckType && ElemTyA != ElemTyB)
1698 return std::nullopt;
1699
1700 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1701 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1702
1703 // Check that the address spaces match.
1704 if (ASA != ASB)
1705 return std::nullopt;
1706 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1707
1708 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1709 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1710 DL, OffsetA, /*AllowNonInbounds=*/true);
1711 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1712 DL, OffsetB, /*AllowNonInbounds=*/true);
1713
1714 std::optional<int64_t> Val;
1715 if (PtrA1 == PtrB1) {
1716 // Retrieve the address space again as pointer stripping now tracks through
1717 // `addrspacecast`.
1718 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1719 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1720 // Check that the address spaces match and that the pointers are valid.
1721 if (ASA != ASB)
1722 return std::nullopt;
1723
1724 IdxWidth = DL.getIndexSizeInBits(ASA);
1725 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1726 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1727
1728 OffsetB -= OffsetA;
1729 Val = OffsetB.trySExtValue();
1730 } else {
1731 // Otherwise compute the distance with SCEV between the base pointers.
1732 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1733 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1734 std::optional<APInt> Diff =
1735 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1736 if (!Diff)
1737 return std::nullopt;
1738 Val = Diff->trySExtValue();
1739 }
1740
1741 if (!Val)
1742 return std::nullopt;
1743
1744 int64_t Size = DL.getTypeStoreSize(ElemTyA);
1745 int64_t Dist = *Val / Size;
1746
1747 // Ensure that the calculated distance matches the type-based one after all
1748 // the bitcasts removal in the provided pointers.
1749 if (!StrictCheck || Dist * Size == Val)
1750 return Dist;
1751 return std::nullopt;
1752}
1753
1755 const DataLayout &DL, ScalarEvolution &SE,
1756 SmallVectorImpl<unsigned> &SortedIndices) {
1758 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1759 "Expected list of pointer operands.");
1760 // Walk over the pointers, and map each of them to an offset relative to
1761 // first pointer in the array.
1762 Value *Ptr0 = VL[0];
1763
1764 using DistOrdPair = std::pair<int64_t, unsigned>;
1765 auto Compare = llvm::less_first();
1766 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1767 Offsets.emplace(0, 0);
1768 bool IsConsecutive = true;
1769 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1770 std::optional<int64_t> Diff =
1771 getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1772 /*StrictCheck=*/true);
1773 if (!Diff)
1774 return false;
1775
1776 // Check if the pointer with the same offset is found.
1777 int64_t Offset = *Diff;
1778 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1779 if (!IsInserted)
1780 return false;
1781 // Consecutive order if the inserted element is the last one.
1782 IsConsecutive &= std::next(It) == Offsets.end();
1783 }
1784 SortedIndices.clear();
1785 if (!IsConsecutive) {
1786 // Fill SortedIndices array only if it is non-consecutive.
1787 SortedIndices.resize(VL.size());
1788 for (auto [Idx, Off] : enumerate(Offsets))
1789 SortedIndices[Idx] = Off.second;
1790 }
1791 return true;
1792}
1793
1794/// Returns true if the memory operations \p A and \p B are consecutive.
1796 ScalarEvolution &SE, bool CheckType) {
1799 if (!PtrA || !PtrB)
1800 return false;
1801 Type *ElemTyA = getLoadStoreType(A);
1802 Type *ElemTyB = getLoadStoreType(B);
1803 std::optional<int64_t> Diff =
1804 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1805 /*StrictCheck=*/true, CheckType);
1806 return Diff == 1;
1807}
1808
1810 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1811 [this, SI](Value *Ptr) {
1812 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1813 InstMap.push_back(SI);
1814 ++AccessIdx;
1815 });
1816}
1817
1819 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1820 [this, LI](Value *Ptr) {
1821 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1822 InstMap.push_back(LI);
1823 ++AccessIdx;
1824 });
1825}
1826
1845
1847 switch (Type) {
1848 case NoDep:
1849 case Forward:
1851 case Unknown:
1852 case IndirectUnsafe:
1853 return false;
1854
1856 case Backward:
1858 return true;
1859 }
1860 llvm_unreachable("unexpected DepType!");
1861}
1862
1866
1868 switch (Type) {
1869 case Forward:
1871 return true;
1872
1873 case NoDep:
1874 case Unknown:
1876 case Backward:
1878 case IndirectUnsafe:
1879 return false;
1880 }
1881 llvm_unreachable("unexpected DepType!");
1882}
1883
1884bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1885 uint64_t TypeByteSize,
1886 unsigned CommonStride) {
1887 // If loads occur at a distance that is not a multiple of a feasible vector
1888 // factor store-load forwarding does not take place.
1889 // Positive dependences might cause troubles because vectorizing them might
1890 // prevent store-load forwarding making vectorized code run a lot slower.
1891 // a[i] = a[i-3] ^ a[i-8];
1892 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1893 // hence on your typical architecture store-load forwarding does not take
1894 // place. Vectorizing in such cases does not make sense.
1895 // Store-load forwarding distance.
1896
1897 // After this many iterations store-to-load forwarding conflicts should not
1898 // cause any slowdowns.
1899 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1900 // Maximum vector factor.
1901 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1902 std::min(VectorizerParams::MaxVectorWidth * TypeByteSize,
1903 MaxStoreLoadForwardSafeDistanceInBits);
1904
1905 // Compute the smallest VF at which the store and load would be misaligned.
1906 for (uint64_t VF = 2 * TypeByteSize;
1907 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1908 // If the number of vector iteration between the store and the load are
1909 // small we could incur conflicts.
1910 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1911 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1912 break;
1913 }
1914 }
1915
1916 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1917 LLVM_DEBUG(
1918 dbgs() << "LAA: Distance " << Distance
1919 << " that could cause a store-load forwarding conflict\n");
1920 return true;
1921 }
1922
1923 if (CommonStride &&
1924 MaxVFWithoutSLForwardIssuesPowerOf2 <
1925 MaxStoreLoadForwardSafeDistanceInBits &&
1926 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1927 VectorizerParams::MaxVectorWidth * TypeByteSize) {
1928 uint64_t MaxVF =
1929 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1930 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1931 MaxStoreLoadForwardSafeDistanceInBits =
1932 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1933 }
1934 return false;
1935}
1936
1937void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1938 if (Status < S)
1939 Status = S;
1940}
1941
1942/// Given a dependence-distance \p Dist between two memory accesses, that have
1943/// strides in the same direction whose absolute value of the maximum stride is
1944/// given in \p MaxStride, in a loop whose maximum backedge taken count is \p
1945/// MaxBTC, check if it is possible to prove statically that the dependence
1946/// distance is larger than the range that the accesses will travel through the
1947/// execution of the loop. If so, return true; false otherwise. This is useful
1948/// for example in loops such as the following (PR31098):
1949///
1950/// for (i = 0; i < D; ++i) {
1951/// = out[i];
1952/// out[i+D] =
1953/// }
1955 const SCEV &MaxBTC, const SCEV &Dist,
1956 uint64_t MaxStride) {
1957
1958 // If we can prove that
1959 // (**) |Dist| > MaxBTC * Step
1960 // where Step is the absolute stride of the memory accesses in bytes,
1961 // then there is no dependence.
1962 //
1963 // Rationale:
1964 // We basically want to check if the absolute distance (|Dist/Step|)
1965 // is >= the loop iteration count (or > MaxBTC).
1966 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1967 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1968 // that the dependence distance is >= VF; This is checked elsewhere.
1969 // But in some cases we can prune dependence distances early, and
1970 // even before selecting the VF, and without a runtime test, by comparing
1971 // the distance against the loop iteration count. Since the vectorized code
1972 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1973 // also guarantees that distance >= VF.
1974 //
1975 const SCEV *Step = SE.getConstant(MaxBTC.getType(), MaxStride);
1976 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1977
1978 const SCEV *CastedDist = &Dist;
1979 const SCEV *CastedProduct = Product;
1980 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1981 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1982
1983 // The dependence distance can be positive/negative, so we sign extend Dist;
1984 // The multiplication of the absolute stride in bytes and the
1985 // backedgeTakenCount is non-negative, so we zero extend Product.
1986 if (DistTypeSizeBits > ProductTypeSizeBits)
1987 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1988 else
1989 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1990
1991 // Is Dist - (MaxBTC * Step) > 0 ?
1992 // (If so, then we have proven (**) because |Dist| >= Dist)
1993 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1994 if (SE.isKnownPositive(Minus))
1995 return true;
1996
1997 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1998 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1999 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
2000 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
2001 return SE.isKnownPositive(Minus);
2002}
2003
2004/// Check the dependence for two accesses with the same stride \p Stride.
2005/// \p Distance is the positive distance in bytes, and \p TypeByteSize is type
2006/// size in bytes.
2007///
2008/// \returns true if they are independent.
2010 uint64_t TypeByteSize) {
2011 assert(Stride > 1 && "The stride must be greater than 1");
2012 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
2013 assert(Distance > 0 && "The distance must be non-zero");
2014
2015 // Skip if the distance is not multiple of type byte size.
2016 if (Distance % TypeByteSize)
2017 return false;
2018
2019 // No dependence if the distance is not multiple of the stride.
2020 // E.g.
2021 // for (i = 0; i < 1024 ; i += 4)
2022 // A[i+2] = A[i] + 1;
2023 //
2024 // Two accesses in memory (distance is 2, stride is 4):
2025 // | A[0] | | | | A[4] | | | |
2026 // | | | A[2] | | | | A[6] | |
2027 //
2028 // E.g.
2029 // for (i = 0; i < 1024 ; i += 3)
2030 // A[i+4] = A[i] + 1;
2031 //
2032 // Two accesses in memory (distance is 4, stride is 3):
2033 // | A[0] | | | A[3] | | | A[6] | | |
2034 // | | | | | A[4] | | | A[7] | |
2035 return Distance % Stride;
2036}
2037
2038bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
2039 Type *SrcTy,
2040 const SCEV *Sink,
2041 Type *SinkTy) {
2042 const SCEV *BTC = PSE.getBackedgeTakenCount();
2043 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
2044 ScalarEvolution &SE = *PSE.getSE();
2045 const auto &[SrcStart_, SrcEnd_] =
2046 getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
2047 &SE, &PointerBounds, DT, AC, LoopGuards);
2048 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
2049 return false;
2050
2051 const auto &[SinkStart_, SinkEnd_] =
2052 getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
2053 &SE, &PointerBounds, DT, AC, LoopGuards);
2054 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
2055 isa<SCEVCouldNotCompute>(SinkEnd_))
2056 return false;
2057
2058 if (!LoopGuards)
2059 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2060
2061 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
2062 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
2063 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
2064 return true;
2065
2066 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
2067 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
2068 return SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart);
2069}
2070
2072 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2073MemoryDepChecker::getDependenceDistanceStrideAndSize(
2074 const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
2075 const AccessAnalysis::MemAccessInfo &B, Instruction *BInst) {
2076 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
2077 auto &SE = *PSE.getSE();
2078 const auto &[APtr, AIsWrite] = A;
2079 const auto &[BPtr, BIsWrite] = B;
2080
2081 // Two reads are independent.
2082 if (!AIsWrite && !BIsWrite)
2084
2085 Type *ATy = getLoadStoreType(AInst);
2086 Type *BTy = getLoadStoreType(BInst);
2087
2088 // We cannot check pointers in different address spaces.
2089 if (APtr->getType()->getPointerAddressSpace() !=
2090 BPtr->getType()->getPointerAddressSpace())
2092
2093 std::optional<int64_t> StrideAPtr = getPtrStride(
2094 PSE, ATy, APtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2095 std::optional<int64_t> StrideBPtr = getPtrStride(
2096 PSE, BTy, BPtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2097
2098 const SCEV *Src = PSE.getSCEV(APtr);
2099 const SCEV *Sink = PSE.getSCEV(BPtr);
2100
2101 // If the induction step is negative we have to invert source and sink of the
2102 // dependence when measuring the distance between them. We should not swap
2103 // AIsWrite with BIsWrite, as their uses expect them in program order.
2104 if (StrideAPtr && *StrideAPtr < 0) {
2105 std::swap(Src, Sink);
2106 std::swap(AInst, BInst);
2107 std::swap(ATy, BTy);
2108 std::swap(StrideAPtr, StrideBPtr);
2109 }
2110
2111 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
2112
2113 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
2114 << "\n");
2115 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
2116 << ": " << *Dist << "\n");
2117
2118 // Need accesses with constant strides and the same direction for further
2119 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
2120 // similar code or pointer arithmetic that could wrap in the address space.
2121
2122 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
2123 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
2124 // dependence further and also cannot generate runtime checks.
2125 if (!StrideAPtr || !StrideBPtr) {
2126 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2128 }
2129
2130 int64_t StrideAPtrInt = *StrideAPtr;
2131 int64_t StrideBPtrInt = *StrideBPtr;
2132 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
2133 << " Sink induction step: " << StrideBPtrInt << "\n");
2134 // At least Src or Sink are loop invariant and the other is strided or
2135 // invariant. We can generate a runtime check to disambiguate the accesses.
2136 if (!StrideAPtrInt || !StrideBPtrInt)
2138
2139 // Both Src and Sink have a constant stride, check if they are in the same
2140 // direction.
2141 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2142 LLVM_DEBUG(
2143 dbgs() << "Pointer access with strides in different directions\n");
2145 }
2146
2147 TypeSize AStoreSz = DL.getTypeStoreSize(ATy);
2148 TypeSize BStoreSz = DL.getTypeStoreSize(BTy);
2149
2150 // If store sizes are not the same, set TypeByteSize to zero, so we can check
2151 // it in the caller isDependent.
2152 uint64_t ASz = DL.getTypeAllocSize(ATy);
2153 uint64_t BSz = DL.getTypeAllocSize(BTy);
2154 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2155
2156 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2157 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2158
2159 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2160
2161 std::optional<uint64_t> CommonStride;
2162 if (StrideAScaled == StrideBScaled)
2163 CommonStride = StrideAScaled;
2164
2165 // TODO: Historically, we didn't retry with runtime checks when (unscaled)
2166 // strides were different but there is no inherent reason to.
2167 if (!isa<SCEVConstant>(Dist))
2168 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2169
2170 // If distance is a SCEVCouldNotCompute, return Unknown immediately.
2171 if (isa<SCEVCouldNotCompute>(Dist)) {
2172 LLVM_DEBUG(dbgs() << "LAA: Uncomputable distance.\n");
2173 return Dependence::Unknown;
2174 }
2175
2176 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2177 TypeByteSize, AIsWrite, BIsWrite);
2178}
2179
2181MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2182 const MemAccessInfo &B, unsigned BIdx) {
2183 assert(AIdx < BIdx && "Must pass arguments in program order");
2184
2185 // Check if we can prove that Sink only accesses memory after Src's end or
2186 // vice versa. The helper is used to perform the checks only on the exit paths
2187 // where it helps to improve the analysis result.
2188 auto CheckCompletelyBeforeOrAfter = [&]() {
2189 auto *APtr = A.getPointer();
2190 auto *BPtr = B.getPointer();
2191 Type *ATy = getLoadStoreType(InstMap[AIdx]);
2192 Type *BTy = getLoadStoreType(InstMap[BIdx]);
2193 const SCEV *Src = PSE.getSCEV(APtr);
2194 const SCEV *Sink = PSE.getSCEV(BPtr);
2195 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2196 };
2197
2198 // Get the dependence distance, stride, type size and what access writes for
2199 // the dependence between A and B.
2200 auto Res =
2201 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2202 if (std::holds_alternative<Dependence::DepType>(Res)) {
2203 if (std::get<Dependence::DepType>(Res) == Dependence::Unknown &&
2204 CheckCompletelyBeforeOrAfter())
2205 return Dependence::NoDep;
2206 return std::get<Dependence::DepType>(Res);
2207 }
2208
2209 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2210 std::get<DepDistanceStrideAndSizeInfo>(Res);
2211 bool HasSameSize = TypeByteSize > 0;
2212
2213 ScalarEvolution &SE = *PSE.getSE();
2214 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2215
2216 // If the distance between the acecsses is larger than their maximum absolute
2217 // stride multiplied by the symbolic maximum backedge taken count (which is an
2218 // upper bound of the number of iterations), the accesses are independet, i.e.
2219 // they are far enough appart that accesses won't access the same location
2220 // across all loop ierations.
2221 if (HasSameSize &&
2223 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2224 return Dependence::NoDep;
2225
2226 // The rest of this function relies on ConstDist being at most 64-bits, which
2227 // is checked earlier. Will assert if the calling code changes.
2228 const APInt *APDist = nullptr;
2229 uint64_t ConstDist =
2230 match(Dist, m_scev_APInt(APDist)) ? APDist->abs().getZExtValue() : 0;
2231
2232 // Attempt to prove strided accesses independent.
2233 if (APDist) {
2234 // If the distance between accesses and their strides are known constants,
2235 // check whether the accesses interlace each other.
2236 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2237 areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) {
2238 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2239 return Dependence::NoDep;
2240 }
2241 } else {
2242 if (!LoopGuards)
2243 LoopGuards.emplace(
2244 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2245 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2246 }
2247
2248 // Negative distances are not plausible dependencies.
2249 if (SE.isKnownNonPositive(Dist)) {
2250 if (SE.isKnownNonNegative(Dist)) {
2251 if (HasSameSize) {
2252 // Write to the same location with the same size.
2253 return Dependence::Forward;
2254 }
2255 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2256 "different type sizes\n");
2257 return Dependence::Unknown;
2258 }
2259
2260 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2261 // Check if the first access writes to a location that is read in a later
2262 // iteration, where the distance between them is not a multiple of a vector
2263 // factor and relatively small.
2264 //
2265 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2266 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2267 // forward dependency will allow vectorization using any width.
2268
2269 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2270 if (!ConstDist) {
2271 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2273 }
2274 if (!HasSameSize ||
2275 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2276 LLVM_DEBUG(
2277 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2279 }
2280 }
2281
2282 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2283 return Dependence::Forward;
2284 }
2285
2286 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2287 // Below we only handle strictly positive distances.
2288 if (MinDistance <= 0) {
2289 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2291 }
2292
2293 if (!HasSameSize) {
2294 if (CheckCompletelyBeforeOrAfter())
2295 return Dependence::NoDep;
2296 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2297 "different type sizes\n");
2298 return Dependence::Unknown;
2299 }
2300 // Bail out early if passed-in parameters make vectorization not feasible.
2301 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2303 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2305 // The minimum number of iterations for a vectorized/unrolled version.
2306 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2307
2308 // It's not vectorizable if the distance is smaller than the minimum distance
2309 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2310 // front needs MaxStride. Vectorizing the last iteration needs TypeByteSize.
2311 // (No need to plus the last gap distance).
2312 //
2313 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2314 // foo(int *A) {
2315 // int *B = (int *)((char *)A + 14);
2316 // for (i = 0 ; i < 1024 ; i += 2)
2317 // B[i] = A[i] + 1;
2318 // }
2319 //
2320 // Two accesses in memory (stride is 4 * 2):
2321 // | A[0] | | A[2] | | A[4] | | A[6] | |
2322 // | B[0] | | B[2] | | B[4] |
2323 //
2324 // MinDistance needs for vectorizing iterations except the last iteration:
2325 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2326 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2327 //
2328 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2329 // 12, which is less than distance.
2330 //
2331 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2332 // the minimum distance needed is 28, which is greater than distance. It is
2333 // not safe to do vectorization.
2334 //
2335 // We use MaxStride (maximum of src and sink strides) to get a conservative
2336 // lower bound on the MinDistanceNeeded in case of different strides.
2337
2338 // We know that Dist is positive, but it may not be constant. Use the signed
2339 // minimum for computations below, as this ensures we compute the closest
2340 // possible dependence distance.
2341 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2342 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2343 if (!ConstDist) {
2344 // For non-constant distances, we checked the lower bound of the
2345 // dependence distance and the distance may be larger at runtime (and safe
2346 // for vectorization). Classify it as Unknown, so we re-try with runtime
2347 // checks, unless we can prove both accesses cannot overlap.
2348 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2350 }
2351 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2352 << MinDistance << '\n');
2353 return Dependence::Backward;
2354 }
2355
2356 // Unsafe if the minimum distance needed is greater than smallest dependence
2357 // distance distance.
2358 if (MinDistanceNeeded > MinDepDistBytes) {
2359 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2360 << MinDistanceNeeded << " size in bytes\n");
2361 return Dependence::Backward;
2362 }
2363
2364 MinDepDistBytes =
2365 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2366
2367 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2368 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2369 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2371
2372 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2373 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2374 << " with max VF = " << MaxVF << '\n');
2375
2376 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2377 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2378 // For non-constant distances, we checked the lower bound of the dependence
2379 // distance and the distance may be larger at runtime (and safe for
2380 // vectorization). Classify it as Unknown, so we re-try with runtime checks,
2381 // unless we can prove both accesses cannot overlap.
2382 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2384 }
2385
2386 if (CheckCompletelyBeforeOrAfter())
2387 return Dependence::NoDep;
2388
2389 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2391}
2392
2394 ArrayRef<MemAccessInfo> CheckDeps) {
2395
2396 MinDepDistBytes = -1;
2398 for (MemAccessInfo CurAccess : CheckDeps) {
2399 if (Visited.contains(CurAccess))
2400 continue;
2401
2402 // Check accesses within this set.
2404 DepCands.findLeader(CurAccess);
2406 DepCands.member_end();
2407
2408 // Check every access pair.
2409 while (AI != AE) {
2410 Visited.insert(*AI);
2411 bool AIIsWrite = AI->getInt();
2412 // Reads from the same pointer don't create extra hazards, but multiple
2413 // stores do (WAW), so start from AI for writes and next(AI) for reads.
2415 (AIIsWrite ? AI : std::next(AI));
2416 while (OI != AE) {
2417 // Check every accessing instruction pair in program order.
2418 auto &Acc = Accesses[*AI];
2419 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2420 I1 != I1E; ++I1)
2421 // When checking for WAW (OI == AI) caused by multiple writes to the
2422 // same pointer, start I2 at the next access past I1 to avoid
2423 // self-comparison.
2424 for (std::vector<unsigned>::iterator
2425 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2426 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2427 I2 != I2E; ++I2) {
2428 auto A = std::make_pair(&*AI, *I1);
2429 auto B = std::make_pair(&*OI, *I2);
2430
2431 assert(*I1 != *I2);
2432 if (*I1 > *I2)
2433 std::swap(A, B);
2434
2436 isDependent(*A.first, A.second, *B.first, B.second);
2438
2439 // Gather dependences unless we accumulated MaxDependences
2440 // dependences. In that case return as soon as we find the first
2441 // unsafe dependence. This puts a limit on this quadratic
2442 // algorithm.
2443 if (RecordDependences) {
2444 if (Type != Dependence::NoDep)
2445 Dependences.emplace_back(A.second, B.second, Type);
2446
2447 if (Dependences.size() >= MaxDependences) {
2448 RecordDependences = false;
2449 Dependences.clear();
2451 << "Too many dependences, stopped recording\n");
2452 }
2453 }
2454 if (!RecordDependences && !isSafeForVectorization())
2455 return false;
2456 }
2457 ++OI;
2458 }
2459 ++AI;
2460 }
2461 }
2462
2463 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2464 return isSafeForVectorization();
2465}
2466
2469 MemAccessInfo Access(Ptr, IsWrite);
2470 auto I = Accesses.find(Access);
2472 if (I != Accesses.end()) {
2473 transform(I->second, std::back_inserter(Insts),
2474 [&](unsigned Idx) { return this->InstMap[Idx]; });
2475 }
2476
2477 return Insts;
2478}
2479
2481 "NoDep",
2482 "Unknown",
2483 "IndirectUnsafe",
2484 "Forward",
2485 "ForwardButPreventsForwarding",
2486 "Backward",
2487 "BackwardVectorizable",
2488 "BackwardVectorizableButPreventsForwarding"};
2489
2491 raw_ostream &OS, unsigned Depth,
2492 const SmallVectorImpl<Instruction *> &Instrs) const {
2493 OS.indent(Depth) << DepName[Type] << ":\n";
2494 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2495 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2496}
2497
2498bool LoopAccessInfo::canAnalyzeLoop() {
2499 // We need to have a loop header.
2500 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2501 << TheLoop->getHeader()->getParent()->getName() << "' from "
2502 << TheLoop->getLocStr() << "\n");
2503
2504 // We can only analyze innermost loops.
2505 if (!TheLoop->isInnermost()) {
2506 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2507 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2508 return false;
2509 }
2510
2511 // We must have a single backedge.
2512 if (TheLoop->getNumBackEdges() != 1) {
2513 LLVM_DEBUG(
2514 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2515 recordAnalysis("CFGNotUnderstood")
2516 << "loop control flow is not understood by analyzer";
2517 return false;
2518 }
2519
2520 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2521 // count, which is an upper bound on the number of loop iterations. The loop
2522 // may execute fewer iterations, if it exits via an uncountable exit.
2523 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2524 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2525 recordAnalysis("CantComputeNumberOfIterations")
2526 << "could not determine number of loop iterations";
2527 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2528 return false;
2529 }
2530
2531 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2532 << TheLoop->getHeader()->getName() << "\n");
2533 return true;
2534}
2535
2536bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2537 const TargetLibraryInfo *TLI,
2538 DominatorTree *DT) {
2539 // Holds the Load and Store instructions.
2542 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2543
2544 // Holds all the different accesses in the loop.
2545 unsigned NumReads = 0;
2546 unsigned NumReadWrites = 0;
2547
2548 bool HasComplexMemInst = false;
2549
2550 // A runtime check is only legal to insert if there are no convergent calls.
2551 HasConvergentOp = false;
2552
2553 PtrRtChecking->Pointers.clear();
2554 PtrRtChecking->Need = false;
2555
2556 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2557
2558 const bool EnableMemAccessVersioningOfLoop =
2560 !TheLoop->getHeader()->getParent()->hasOptSize();
2561
2562 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2563 // loop info, as it may be arbitrary.
2564 LoopBlocksRPO RPOT(TheLoop);
2565 RPOT.perform(LI);
2566
2567 // Don't return early as soon as we found a memory access that cannot be
2568 // vectorize - HasConvergentOp must still be computed as it is part of LAI's
2569 // public API (used by LoopDistribute).
2570 for (BasicBlock *BB : RPOT) {
2571 // Scan the BB and collect legal loads and stores. Also detect any
2572 // convergent instructions.
2573 for (Instruction &I : *BB) {
2574 if (auto *Call = dyn_cast<CallBase>(&I)) {
2575 if (Call->isConvergent())
2576 HasConvergentOp = true;
2577 }
2578
2579 // Unsafe to vectorize and we already found a convergent operation, can
2580 // early return now.
2581 if (HasComplexMemInst && HasConvergentOp)
2582 return false;
2583
2584 // Already unsafe to vectorize; keep scanning for convergent ops.
2585 if (HasComplexMemInst)
2586 continue;
2587
2588 // Record alias scopes defined inside the loop.
2589 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2590 for (Metadata *Op : Decl->getScopeList()->operands())
2591 LoopAliasScopes.insert(cast<MDNode>(Op));
2592
2593 // Many math library functions read the rounding mode. We will only
2594 // vectorize a loop if it contains known function calls that don't set
2595 // the flag. Therefore, it is safe to ignore this read from memory.
2596 auto *Call = dyn_cast<CallInst>(&I);
2598 continue;
2599
2600 // If this is a load, save it. If this instruction can read from memory
2601 // but is not a load, we only allow it if it's a call to a function with a
2602 // vector mapping and no pointer arguments.
2603 if (I.mayReadFromMemory()) {
2604 auto hasPointerArgs = [](CallBase *CB) {
2605 return any_of(CB->args(), [](Value const *Arg) {
2606 return Arg->getType()->isPointerTy();
2607 });
2608 };
2609
2610 // If the function has an explicit vectorized counterpart, and does not
2611 // take output/input pointers, we can safely assume that it can be
2612 // vectorized.
2613 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2614 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2615 continue;
2616
2617 auto *Ld = dyn_cast<LoadInst>(&I);
2618 if (!Ld) {
2619 recordAnalysis("CantVectorizeInstruction", &I)
2620 << "instruction cannot be vectorized";
2621 HasComplexMemInst = true;
2622 continue;
2623 }
2624 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2625 recordAnalysis("NonSimpleLoad", Ld)
2626 << "read with atomic ordering or volatile read";
2627 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2628 HasComplexMemInst = true;
2629 continue;
2630 }
2631 NumLoads++;
2632 Loads.push_back(Ld);
2633 DepChecker->addAccess(Ld);
2634 if (EnableMemAccessVersioningOfLoop)
2635 collectStridedAccess(Ld);
2636 continue;
2637 }
2638
2639 // Save 'store' instructions. Abort if other instructions write to memory.
2640 if (I.mayWriteToMemory()) {
2641 auto *St = dyn_cast<StoreInst>(&I);
2642 if (!St) {
2643 recordAnalysis("CantVectorizeInstruction", &I)
2644 << "instruction cannot be vectorized";
2645 HasComplexMemInst = true;
2646 continue;
2647 }
2648 if (!St->isSimple() && !IsAnnotatedParallel) {
2649 recordAnalysis("NonSimpleStore", St)
2650 << "write with atomic ordering or volatile write";
2651 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2652 HasComplexMemInst = true;
2653 continue;
2654 }
2655 NumStores++;
2656 Stores.push_back(St);
2657 DepChecker->addAccess(St);
2658 if (EnableMemAccessVersioningOfLoop)
2659 collectStridedAccess(St);
2660 }
2661 } // Next instr.
2662 } // Next block.
2663
2664 if (HasComplexMemInst)
2665 return false;
2666
2667 // Now we have two lists that hold the loads and the stores.
2668 // Next, we find the pointers that they use.
2669
2670 // Check if we see any stores. If there are no stores, then we don't
2671 // care if the pointers are *restrict*.
2672 if (!Stores.size()) {
2673 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2674 return true;
2675 }
2676
2678 AccessAnalysis Accesses(TheLoop, AA, LI, *DT, DepCands, *PSE,
2679 LoopAliasScopes);
2680
2681 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2682 // multiple times on the same object. If the ptr is accessed twice, once
2683 // for read and once for write, it will only appear once (on the write
2684 // list). This is okay, since we are going to check for conflicts between
2685 // writes and between reads and writes, but not between reads and reads.
2686 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2687
2688 // Record uniform store addresses to identify if we have multiple stores
2689 // to the same address.
2690 SmallPtrSet<Value *, 16> UniformStores;
2691
2692 for (StoreInst *ST : Stores) {
2693 Value *Ptr = ST->getPointerOperand();
2694
2695 if (isInvariant(Ptr)) {
2696 // Record store instructions to loop invariant addresses
2697 StoresToInvariantAddresses.push_back(ST);
2698 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2699 !UniformStores.insert(Ptr).second;
2700 }
2701
2702 // If we did *not* see this pointer before, insert it to the read-write
2703 // list. At this phase it is only a 'write' list.
2704 Type *AccessTy = getLoadStoreType(ST);
2705 if (Seen.insert({Ptr, AccessTy}).second) {
2706 ++NumReadWrites;
2707
2708 MemoryLocation Loc = MemoryLocation::get(ST);
2709 // The TBAA metadata could have a control dependency on the predication
2710 // condition, so we cannot rely on it when determining whether or not we
2711 // need runtime pointer checks.
2712 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2713 Loc.AATags.TBAA = nullptr;
2714
2715 // Expand forked pointers (i.e., a phi of multiple strided pointers) into
2716 // all alternatives.
2717 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2718 [&Accesses, AccessTy, Loc](Value *Ptr) {
2719 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2720 Accesses.addStore(NewLoc, AccessTy);
2721 });
2722 }
2723 }
2724
2725 if (IsAnnotatedParallel) {
2726 LLVM_DEBUG(
2727 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2728 << "checks.\n");
2729 return true;
2730 }
2731
2732 for (LoadInst *LD : Loads) {
2733 Value *Ptr = LD->getPointerOperand();
2734 // If we did *not* see this pointer before, insert it to the read list. If
2735 // we *did* see it before, then it is already in the read-write list. This
2736 // allows us to vectorize expressions such as A[i] += x; Because the address
2737 // of A[i] is a read-write pointer. This only works if the index of A[i] is
2738 // strictly monotonic, which we approximate (conservatively) via
2739 // getPtrStride. If the address is unknown (e.g. A[B[i]]) then we may read,
2740 // modify, and write overlapping words. Note that "zero stride" is unsafe
2741 // and is being handled below.
2742 bool IsReadOnlyPtr = false;
2743 Type *AccessTy = getLoadStoreType(LD);
2744 if (Seen.insert({Ptr, AccessTy}).second ||
2745 !getPtrStride(*PSE, AccessTy, Ptr, TheLoop, *DT, SymbolicStrides, false,
2746 true)) {
2747 ++NumReads;
2748 IsReadOnlyPtr = true;
2749 }
2750
2751 // See if there is an unsafe dependency between a load to a uniform address and
2752 // store to the same uniform address.
2753 if (UniformStores.contains(Ptr)) {
2754 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2755 "load and uniform store to the same address!\n");
2756 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2757 }
2758
2759 MemoryLocation Loc = MemoryLocation::get(LD);
2760 // The TBAA metadata could have a control dependency on the predication
2761 // condition, so we cannot rely on it when determining whether or not we
2762 // need runtime pointer checks.
2763 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2764 Loc.AATags.TBAA = nullptr;
2765
2766 // Expand forked pointers (i.e., a phi of multiple strided pointers) into
2767 // all alternatives.
2768 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2769 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2770 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2771 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2772 });
2773 }
2774
2775 // If we write (or read-write) to a single destination and there are no other
2776 // reads in this loop then is it safe to vectorize: the vectorized stores
2777 // preserve ordering via replication or order-preserving @llvm.masked.scatter.
2778 if (NumReadWrites == 1 && NumReads == 0) {
2779 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2780 return true;
2781 }
2782
2783 // Build dependence sets and check whether we need a runtime pointer bounds
2784 // check.
2785 Accesses.buildDependenceSets();
2786
2787 // Find pointers with computable bounds. We are going to use this information
2788 // to place a runtime bound check.
2789 Value *UncomputablePtr = nullptr;
2790 HasCompletePtrRtChecking =
2791 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2792 UncomputablePtr, AllowPartial, getDepChecker());
2793 if (!HasCompletePtrRtChecking) {
2794 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2795 recordAnalysis("CantIdentifyArrayBounds", I)
2796 << "cannot identify array bounds";
2797 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2798 << "the array bounds.\n");
2799 return false;
2800 }
2801
2802 LLVM_DEBUG(
2803 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2804
2805 bool DepsAreSafe = true;
2806 if (Accesses.isDependencyCheckNeeded()) {
2807 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2808 DepsAreSafe =
2809 DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2810
2811 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) {
2812 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2813
2814 PtrRtChecking->reset();
2815 PtrRtChecking->Need = true;
2816
2817 UncomputablePtr = nullptr;
2818 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2819 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr,
2820 AllowPartial, getDepChecker());
2821
2822 // Check that we found the bounds for the pointer.
2823 if (!HasCompletePtrRtChecking) {
2824 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2825 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2826 << "cannot check memory dependencies at runtime";
2827 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2828 return false;
2829 }
2830
2831 // Clear the dependency checks. They are no longer needed.
2832 Accesses.resetDepChecks(*DepChecker);
2833
2834 DepsAreSafe = true;
2835 }
2836 }
2837
2838 if (HasConvergentOp) {
2839 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2840 << "cannot add control dependency to convergent operation";
2841 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2842 "would be needed with a convergent operation\n");
2843 return false;
2844 }
2845
2846 if (DepsAreSafe) {
2847 LLVM_DEBUG(
2848 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2849 << (PtrRtChecking->Need ? "" : " don't")
2850 << " need runtime memory checks.\n");
2851 return true;
2852 }
2853
2854 emitUnsafeDependenceRemark();
2855 return false;
2856}
2857
2858void LoopAccessInfo::emitUnsafeDependenceRemark() {
2859 const auto *Deps = getDepChecker().getDependences();
2860 if (!Deps)
2861 return;
2862 const auto *Found =
2863 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2866 });
2867 if (Found == Deps->end())
2868 return;
2869 MemoryDepChecker::Dependence Dep = *Found;
2870
2871 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2872
2873 // Emit remark for first unsafe dependence
2874 bool HasForcedDistribution = false;
2875 std::optional<const MDOperand *> Value =
2876 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2877 if (Value) {
2878 const MDOperand *Op = *Value;
2879 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2880 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2881 }
2882
2883 const std::string Info =
2884 HasForcedDistribution
2885 ? "unsafe dependent memory operations in loop."
2886 : "unsafe dependent memory operations in loop. Use "
2887 "#pragma clang loop distribute(enable) to allow loop distribution "
2888 "to attempt to isolate the offending operations into a separate "
2889 "loop";
2890 OptimizationRemarkAnalysis &R =
2891 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2892
2893 switch (Dep.Type) {
2897 llvm_unreachable("Unexpected dependence");
2899 R << "\nBackward loop carried data dependence.";
2900 break;
2902 R << "\nForward loop carried data dependence that prevents "
2903 "store-to-load forwarding.";
2904 break;
2906 R << "\nBackward loop carried data dependence that prevents "
2907 "store-to-load forwarding.";
2908 break;
2910 R << "\nUnsafe indirect dependence.";
2911 break;
2913 R << "\nUnknown data dependence.";
2914 break;
2915 }
2916
2917 if (Instruction *I = Dep.getSource(getDepChecker())) {
2918 DebugLoc SourceLoc = I->getDebugLoc();
2920 SourceLoc = DD->getDebugLoc();
2921 if (SourceLoc)
2922 R << " Memory location is the same as accessed at "
2923 << ore::NV("Location", SourceLoc);
2924 }
2925}
2926
2928 const Loop *TheLoop,
2929 const DominatorTree *DT) {
2930 assert(TheLoop->contains(BB) && "Unknown block used");
2931
2932 // Blocks that do not dominate the latch need predication.
2933 const BasicBlock *Latch = TheLoop->getLoopLatch();
2934 return !DT->dominates(BB, Latch);
2935}
2936
2938LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2939 assert(!Report && "Multiple reports generated");
2940
2941 const BasicBlock *CodeRegion = TheLoop->getHeader();
2942 DebugLoc DL = TheLoop->getStartLoc();
2943
2944 if (I) {
2945 CodeRegion = I->getParent();
2946 // If there is no debug location attached to the instruction, revert back to
2947 // using the loop's.
2948 if (I->getDebugLoc())
2949 DL = I->getDebugLoc();
2950 }
2951
2952 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName,
2953 DL, CodeRegion);
2954 return *Report;
2955}
2956
2958 auto *SE = PSE->getSE();
2959 if (TheLoop->isLoopInvariant(V))
2960 return true;
2961 if (!SE->isSCEVable(V->getType()))
2962 return false;
2963 const SCEV *S = SE->getSCEV(V);
2964 return SE->isLoopInvariant(S, TheLoop);
2965}
2966
2967/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand.
2968/// Otherwise, return \p Ptr.
2970 Loop *Lp) {
2971 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2972 if (!GEP)
2973 return Ptr;
2974
2975 Value *V = Ptr;
2976 for (const Use &U : GEP->operands()) {
2977 if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) {
2978 if (V == Ptr)
2979 V = U;
2980 else
2981 // There must be exactly one loop-variant operand.
2982 return Ptr;
2983 }
2984 }
2985 return V;
2986}
2987
2988/// Get the stride of a pointer access in a loop. Looks for symbolic
2989/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2990static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2991 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2992 if (!PtrTy)
2993 return nullptr;
2994
2995 // Try to remove a gep instruction to make the pointer (actually index at this
2996 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2997 // pointer, otherwise, we are analyzing the index.
2998 Value *OrigPtr = Ptr;
2999
3000 Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp);
3001 const SCEV *V = SE->getSCEV(Ptr);
3002
3003 if (Ptr != OrigPtr)
3004 // Strip off casts.
3005 while (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
3006 V = C->getOperand();
3007
3009 return nullptr;
3010
3011 // Note that the restriction after this loop invariant check are only
3012 // profitability restrictions.
3013 if (!SE->isLoopInvariant(V, Lp))
3014 return nullptr;
3015
3016 // Look for the loop invariant symbolic value.
3017 if (isa<SCEVUnknown>(V))
3018 return V;
3019
3020 // Look through multiplies that scale a stride by a constant.
3022 if (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
3023 if (isa<SCEVUnknown>(C->getOperand()))
3024 return V;
3025
3026 return nullptr;
3027}
3028
3029void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
3030 Value *Ptr = getLoadStorePointerOperand(MemAccess);
3031 if (!Ptr)
3032 return;
3033
3034 // Note: getStrideFromPointer is a *profitability* heuristic. We
3035 // could broaden the scope of values returned here - to anything
3036 // which happens to be loop invariant and contributes to the
3037 // computation of an interesting IV - but we chose not to as we
3038 // don't have a cost model here, and broadening the scope exposes
3039 // far too many unprofitable cases.
3040 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
3041 if (!StrideExpr)
3042 return;
3043
3044 if (match(StrideExpr, m_scev_UndefOrPoison()))
3045 return;
3046
3047 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
3048 "versioning:");
3049 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
3050
3051 if (!SpeculateUnitStride) {
3052 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
3053 return;
3054 }
3055
3056 // Avoid adding the "Stride == 1" predicate when we know that
3057 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
3058 // or zero iteration loop, as Trip-Count <= Stride == 1.
3059 //
3060 // TODO: We are currently not making a very informed decision on when it is
3061 // beneficial to apply stride versioning. It might make more sense that the
3062 // users of this analysis (such as the vectorizer) will trigger it, based on
3063 // their specific cost considerations; For example, in cases where stride
3064 // versioning does not help resolving memory accesses/dependences, the
3065 // vectorizer should evaluate the cost of the runtime test, and the benefit
3066 // of various possible stride specializations, considering the alternatives
3067 // of using gather/scatters (if available).
3068
3069 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
3070
3071 // Match the types so we can compare the stride and the MaxBTC.
3072 // The Stride can be positive/negative, so we sign extend Stride;
3073 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
3074 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
3075 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3076 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
3077 const SCEV *CastedStride = StrideExpr;
3078 const SCEV *CastedBECount = MaxBTC;
3079 ScalarEvolution *SE = PSE->getSE();
3080 if (BETypeSizeBits >= StrideTypeSizeBits)
3081 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
3082 else
3083 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
3084 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3085 // Since TripCount == BackEdgeTakenCount + 1, checking:
3086 // "Stride >= TripCount" is equivalent to checking:
3087 // Stride - MaxBTC> 0
3088 if (SE->isKnownPositive(StrideMinusBETaken)) {
3089 LLVM_DEBUG(
3090 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3091 "Stride==1 predicate will imply that the loop executes "
3092 "at most once.\n");
3093 return;
3094 }
3095 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3096
3097 // Strip back off the integer cast, and check that our result is a
3098 // SCEVUnknown as we expect.
3099 const SCEV *StrideBase = StrideExpr;
3100 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3101 StrideBase = C->getOperand();
3102 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3103}
3104
3106 const TargetTransformInfo *TTI,
3107 const TargetLibraryInfo *TLI, AAResults *AA,
3108 DominatorTree *DT, LoopInfo *LI,
3109 AssumptionCache *AC, bool AllowPartial)
3110 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3111 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3112 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3113 if (TTI && !TTI->enableScalableVectorization())
3114 // Scale the vector width by 2 as rough estimate to also consider
3115 // interleaving.
3116 MaxTargetVectorWidthInBits =
3117 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;
3118
3119 DepChecker = std::make_unique<MemoryDepChecker>(
3120 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3121 PtrRtChecking =
3122 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3123 if (canAnalyzeLoop())
3124 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3125}
3126
3127void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3128 if (CanVecMem) {
3129 OS.indent(Depth) << "Memory dependences are safe";
3130 const MemoryDepChecker &DC = getDepChecker();
3131 if (!DC.isSafeForAnyVectorWidth())
3132 OS << " with a maximum safe vector width of "
3133 << DC.getMaxSafeVectorWidthInBits() << " bits";
3136 OS << ", with a maximum safe store-load forward width of " << SLDist
3137 << " bits";
3138 }
3139 if (PtrRtChecking->Need)
3140 OS << " with run-time checks";
3141 OS << "\n";
3142 }
3143
3144 if (HasConvergentOp)
3145 OS.indent(Depth) << "Has convergent operation in loop\n";
3146
3147 if (Report)
3148 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3149
3150 if (auto *Dependences = DepChecker->getDependences()) {
3151 OS.indent(Depth) << "Dependences:\n";
3152 for (const auto &Dep : *Dependences) {
3153 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3154 OS << "\n";
3155 }
3156 } else
3157 OS.indent(Depth) << "Too many dependences, not recorded\n";
3158
3159 // List the pair of accesses need run-time checks to prove independence.
3160 PtrRtChecking->print(OS, Depth);
3161 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3162 OS.indent(Depth) << "Generated run-time checks are incomplete\n";
3163 OS << "\n";
3164
3165 OS.indent(Depth)
3166 << "Non vectorizable stores to invariant address were "
3167 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3168 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3169 ? ""
3170 : "not ")
3171 << "found in loop.\n";
3172
3173 OS.indent(Depth) << "SCEV assumptions:\n";
3174 PSE->getPredicate().print(OS, Depth);
3175
3176 OS << "\n";
3177
3178 OS.indent(Depth) << "Expressions re-written:\n";
3179 PSE->print(OS, Depth);
3180}
3181
3183 bool AllowPartial) {
3184 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3185
3186 // We need to create the LoopAccessInfo if either we don't already have one,
3187 // or if it was created with a different value of AllowPartial.
3188 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3189 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3190 &LI, AC, AllowPartial);
3191
3192 return *It->second;
3193}
3195 // Collect LoopAccessInfo entries that may keep references to IR outside the
3196 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3197 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3198 // SCEVs, e.g. for pointer expressions.
3199 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3200 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3201 LAI->getPSE().getPredicate().isAlwaysTrue())
3202 continue;
3203 LoopAccessInfoMap.erase(L);
3204 }
3205}
3206
3208 Function &F, const PreservedAnalyses &PA,
3209 FunctionAnalysisManager::Invalidator &Inv) {
3210 // Check whether our analysis is preserved.
3211 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3212 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3213 // If not, give up now.
3214 return true;
3215
3216 // Check whether the analyses we depend on became invalid for any reason.
3217 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3218 // invalid.
3219 return Inv.invalidate<AAManager>(F, PA) ||
3220 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3221 Inv.invalidate<LoopAnalysis>(F, PA) ||
3222 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3223}
3224
3227 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3228 auto &AA = FAM.getResult<AAManager>(F);
3229 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3230 auto &LI = FAM.getResult<LoopAnalysis>(F);
3231 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3232 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3233 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
3234 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI, &AC);
3235}
3236
3237AnalysisKey LoopAccessAnalysis::Key;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static const SCEV * mulSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, const DominatorTree &DT, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
APInt abs() const
Get the absolute value.
Definition APInt.h:1810
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1052
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1589
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isNegative() const
Definition Constants.h:214
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
void eraseClass(const ElemTy &V)
Erase the class containing V, i.e.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:714
bool empty() const
Definition Function.h:859
PointerType * getType() const
Global values are always pointers.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static LLVM_ABI bool blockNeedsPredication(const BasicBlock *BB, const Loop *TheLoop, const DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition LoopInfo.cpp:688
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition LoopInfo.cpp:586
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:653
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, ArrayRef< MemAccessInfo > CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
PointerIntPair< Value *, 1, bool > MemAccessInfo
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands)
Generate the checks and store it.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:828
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:893
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:651
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, const SCEV * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:374
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2026
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
TargetTransformInfo TTI
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DominatorTree &DT, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition Metadata.h:786
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:780
MDNode * NoAlias
The tag specifying the noalias scope.
Definition Metadata.h:789
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1439