LLVM 20.0.0git
Loads.cpp
Go to the documentation of this file.
1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
22#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Module.h"
25#include "llvm/IR/Operator.h"
26
27using namespace llvm;
28
29static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
30 const DataLayout &DL) {
31 Align BA = Base->getPointerAlignment(DL);
32 return BA >= Alignment && Offset.isAligned(BA);
33}
34
35/// Test if V is always a pointer to allocated and suitably aligned memory for
36/// a simple load or store.
38 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
39 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
41 unsigned MaxDepth) {
42 assert(V->getType()->isPointerTy() && "Base must be pointer");
43
44 // Recursion limit.
45 if (MaxDepth-- == 0)
46 return false;
47
48 // Already visited? Bail out, we've likely hit unreachable code.
49 if (!Visited.insert(V).second)
50 return false;
51
52 // Note that it is not safe to speculate into a malloc'd region because
53 // malloc may return null.
54
55 // For GEPs, determine if the indexing lands within the allocated object.
56 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
57 const Value *Base = GEP->getPointerOperand();
58
59 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
60 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
61 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
62 .isMinValue())
63 return false;
64
65 // If the base pointer is dereferenceable for Offset+Size bytes, then the
66 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
67 // pointer is aligned to Align bytes, and the Offset is divisible by Align
68 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
69 // aligned to Align bytes.
70
71 // Offset and Size may have different bit widths if we have visited an
72 // addrspacecast, so we can't do arithmetic directly on the APInt values.
74 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
75 CtxI, AC, DT, TLI, Visited, MaxDepth);
76 }
77
78 // bitcast instructions are no-ops as far as dereferenceability is concerned.
79 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
80 if (BC->getSrcTy()->isPointerTy())
82 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
83 Visited, MaxDepth);
84 }
85
86 // Recurse into both hands of select.
87 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
88 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
89 Size, DL, CtxI, AC, DT, TLI,
90 Visited, MaxDepth) &&
91 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
92 Size, DL, CtxI, AC, DT, TLI,
93 Visited, MaxDepth);
94 }
95
96 bool CheckForNonNull, CheckForFreed;
97 APInt KnownDerefBytes(Size.getBitWidth(),
98 V->getPointerDereferenceableBytes(DL, CheckForNonNull,
99 CheckForFreed));
100 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
101 !CheckForFreed)
102 if (!CheckForNonNull ||
103 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI))) {
104 // As we recursed through GEPs to get here, we've incrementally checked
105 // that each step advanced by a multiple of the alignment. If our base is
106 // properly aligned, then the original offset accessed must also be.
107 APInt Offset(DL.getTypeStoreSizeInBits(V->getType()), 0);
108 return isAligned(V, Offset, Alignment, DL);
109 }
110
111 /// TODO refactor this function to be able to search independently for
112 /// Dereferencability and Alignment requirements.
113
114
115 if (const auto *Call = dyn_cast<CallBase>(V)) {
116 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
117 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
118 AC, DT, TLI, Visited, MaxDepth);
119
120 // If we have a call we can't recurse through, check to see if this is an
121 // allocation function for which we can establish an minimum object size.
122 // Such a minimum object size is analogous to a deref_or_null attribute in
123 // that we still need to prove the result non-null at point of use.
124 // NOTE: We can only use the object size as a base fact as we a) need to
125 // prove alignment too, and b) don't want the compile time impact of a
126 // separate recursive walk.
127 ObjectSizeOpts Opts;
128 // TODO: It may be okay to round to align, but that would imply that
129 // accessing slightly out of bounds was legal, and we're currently
130 // inconsistent about that. For the moment, be conservative.
131 Opts.RoundToAlign = false;
132 Opts.NullIsUnknownSize = true;
133 uint64_t ObjSize;
134 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
135 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
136 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
137 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
138 !V->canBeFreed()) {
139 // As we recursed through GEPs to get here, we've incrementally
140 // checked that each step advanced by a multiple of the alignment. If
141 // our base is properly aligned, then the original offset accessed
142 // must also be.
143 APInt Offset(DL.getTypeStoreSizeInBits(V->getType()), 0);
144 return isAligned(V, Offset, Alignment, DL);
145 }
146 }
147 }
148
149 // For gc.relocate, look through relocations
150 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
151 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
152 Alignment, Size, DL, CtxI, AC, DT,
153 TLI, Visited, MaxDepth);
154
155 if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
156 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
157 Size, DL, CtxI, AC, DT, TLI,
158 Visited, MaxDepth);
159
160 if (CtxI) {
161 /// Look through assumes to see if both dereferencability and alignment can
162 /// be provent by an assume
163 RetainedKnowledge AlignRK;
164 RetainedKnowledge DerefRK;
166 V, {Attribute::Dereferenceable, Attribute::Alignment}, AC,
167 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
168 if (!isValidAssumeForContext(Assume, CtxI, DT))
169 return false;
170 if (RK.AttrKind == Attribute::Alignment)
171 AlignRK = std::max(AlignRK, RK);
172 if (RK.AttrKind == Attribute::Dereferenceable)
173 DerefRK = std::max(DerefRK, RK);
174 if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() &&
175 DerefRK.ArgValue >= Size.getZExtValue())
176 return true; // We have found what we needed so we stop looking
177 return false; // Other assumes may have better information. so
178 // keep looking
179 }))
180 return true;
181 }
182
183 // If we don't know, assume the worst.
184 return false;
185}
186
188 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
189 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
190 const TargetLibraryInfo *TLI) {
191 // Note: At the moment, Size can be zero. This ends up being interpreted as
192 // a query of whether [Base, V] is dereferenceable and V is aligned (since
193 // that's what the implementation happened to do). It's unclear if this is
194 // the desired semantic, but at least SelectionDAG does exercise this case.
195
197 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
198 DT, TLI, Visited, 16);
199}
200
202 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
203 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
204 const TargetLibraryInfo *TLI) {
205 // For unsized types or scalable vectors we don't know exactly how many bytes
206 // are dereferenced, so bail out.
207 if (!Ty->isSized() || Ty->isScalableTy())
208 return false;
209
210 // When dereferenceability information is provided by a dereferenceable
211 // attribute, we know exactly how many bytes are dereferenceable. If we can
212 // determine the exact offset to the attributed variable, we can use that
213 // information here.
214
215 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
216 DL.getTypeStoreSize(Ty));
217 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
218 AC, DT, TLI);
219}
220
222 const DataLayout &DL,
223 const Instruction *CtxI,
224 AssumptionCache *AC,
225 const DominatorTree *DT,
226 const TargetLibraryInfo *TLI) {
227 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
228 TLI);
229}
230
231/// Test if A and B will obviously have the same value.
232///
233/// This includes recognizing that %t0 and %t1 will have the same
234/// value in code like this:
235/// \code
236/// %t0 = getelementptr \@a, 0, 3
237/// store i32 0, i32* %t0
238/// %t1 = getelementptr \@a, 0, 3
239/// %t2 = load i32* %t1
240/// \endcode
241///
242static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
243 // Test if the values are trivially equivalent.
244 if (A == B)
245 return true;
246
247 // Test if the values come from identical arithmetic instructions.
248 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
249 // this function is only used when one address use dominates the
250 // other, which means that they'll always either have the same
251 // value or one of them will have an undefined value.
252 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
253 isa<GetElementPtrInst>(A))
254 if (const Instruction *BI = dyn_cast<Instruction>(B))
255 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
256 return true;
257
258 // Otherwise they may not be equivalent.
259 return false;
260}
261
263 ScalarEvolution &SE,
264 DominatorTree &DT,
265 AssumptionCache *AC) {
266 auto &DL = LI->getDataLayout();
267 Value *Ptr = LI->getPointerOperand();
268
269 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
270 DL.getTypeStoreSize(LI->getType()).getFixedValue());
271 const Align Alignment = LI->getAlign();
272
273 Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
274
275 // If given a uniform (i.e. non-varying) address, see if we can prove the
276 // access is safe within the loop w/o needing predication.
277 if (L->isLoopInvariant(Ptr))
278 return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
279 HeaderFirstNonPHI, AC, &DT);
280
281 // Otherwise, check to see if we have a repeating access pattern where we can
282 // prove that all accesses are well aligned and dereferenceable.
283 auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
284 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
285 return false;
286 auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
287 if (!Step)
288 return false;
289
290 auto TC = SE.getSmallConstantMaxTripCount(L);
291 if (!TC)
292 return false;
293
294 // TODO: Handle overlapping accesses.
295 // We should be computing AccessSize as (TC - 1) * Step + EltSize.
296 if (EltSize.sgt(Step->getAPInt()))
297 return false;
298
299 // Compute the total access size for access patterns with unit stride and
300 // patterns with gaps. For patterns with unit stride, Step and EltSize are the
301 // same.
302 // For patterns with gaps (i.e. non unit stride), we are
303 // accessing EltSize bytes at every Step.
304 APInt AccessSize = TC * Step->getAPInt();
305
306 assert(SE.isLoopInvariant(AddRec->getStart(), L) &&
307 "implied by addrec definition");
308 Value *Base = nullptr;
309 if (auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart())) {
310 Base = StartS->getValue();
311 } else if (auto *StartS = dyn_cast<SCEVAddExpr>(AddRec->getStart())) {
312 // Handle (NewBase + offset) as start value.
313 const auto *Offset = dyn_cast<SCEVConstant>(StartS->getOperand(0));
314 const auto *NewBase = dyn_cast<SCEVUnknown>(StartS->getOperand(1));
315 if (StartS->getNumOperands() == 2 && Offset && NewBase) {
316 // The following code below assumes the offset is unsigned, but GEP
317 // offsets are treated as signed so we can end up with a signed value
318 // here too. For example, suppose the initial PHI value is (i8 255),
319 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
320 if (Offset->getAPInt().isNegative())
321 return false;
322
323 // For the moment, restrict ourselves to the case where the offset is a
324 // multiple of the requested alignment and the base is aligned.
325 // TODO: generalize if a case found which warrants
326 if (Offset->getAPInt().urem(Alignment.value()) != 0)
327 return false;
328 Base = NewBase->getValue();
329 bool Overflow = false;
330 AccessSize = AccessSize.uadd_ov(Offset->getAPInt(), Overflow);
331 if (Overflow)
332 return false;
333 }
334 }
335
336 if (!Base)
337 return false;
338
339 // For the moment, restrict ourselves to the case where the access size is a
340 // multiple of the requested alignment and the base is aligned.
341 // TODO: generalize if a case found which warrants
342 if (EltSize.urem(Alignment.value()) != 0)
343 return false;
344 return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
345 HeaderFirstNonPHI, AC, &DT);
346}
347
349 const Function &F = *CtxI.getFunction();
350 // Speculative load may create a race that did not exist in the source.
351 return F.hasFnAttribute(Attribute::SanitizeThread) ||
352 // Speculative load may load data from dirty regions.
353 F.hasFnAttribute(Attribute::SanitizeAddress) ||
354 F.hasFnAttribute(Attribute::SanitizeHWAddress);
355}
356
359}
360
361/// Check if executing a load of this pointer value cannot trap.
362///
363/// If DT and ScanFrom are specified this method performs context-sensitive
364/// analysis and returns true if it is safe to load immediately before ScanFrom.
365///
366/// If it is not obviously safe to load from the specified pointer, we do
367/// a quick local scan of the basic block containing \c ScanFrom, to determine
368/// if the address is already accessed.
369///
370/// This uses the pointee type to determine how many bytes need to be safe to
371/// load from the pointer.
373 const DataLayout &DL,
374 Instruction *ScanFrom,
375 AssumptionCache *AC,
376 const DominatorTree *DT,
377 const TargetLibraryInfo *TLI) {
378 // If DT is not specified we can't make context-sensitive query
379 const Instruction* CtxI = DT ? ScanFrom : nullptr;
380 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
381 TLI)) {
382 // With sanitizers `Dereferenceable` is not always enough for unconditional
383 // load.
384 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
385 return true;
386 }
387
388 if (!ScanFrom)
389 return false;
390
391 if (Size.getBitWidth() > 64)
392 return false;
393 const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
394
395 // Otherwise, be a little bit aggressive by scanning the local block where we
396 // want to check to see if the pointer is already being loaded or stored
397 // from/to. If so, the previous load or store would have already trapped,
398 // so there is no harm doing an extra load (also, CSE will later eliminate
399 // the load entirely).
400 BasicBlock::iterator BBI = ScanFrom->getIterator(),
401 E = ScanFrom->getParent()->begin();
402
403 // We can at least always strip pointer casts even though we can't use the
404 // base here.
405 V = V->stripPointerCasts();
406
407 while (BBI != E) {
408 --BBI;
409
410 // If we see a free or a call which may write to memory (i.e. which might do
411 // a free) the pointer could be marked invalid.
412 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
413 !isa<LifetimeIntrinsic>(BBI) && !isa<DbgInfoIntrinsic>(BBI))
414 return false;
415
416 Value *AccessedPtr;
417 Type *AccessedTy;
418 Align AccessedAlign;
419 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
420 // Ignore volatile loads. The execution of a volatile load cannot
421 // be used to prove an address is backed by regular memory; it can,
422 // for example, point to an MMIO register.
423 if (LI->isVolatile())
424 continue;
425 AccessedPtr = LI->getPointerOperand();
426 AccessedTy = LI->getType();
427 AccessedAlign = LI->getAlign();
428 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
429 // Ignore volatile stores (see comment for loads).
430 if (SI->isVolatile())
431 continue;
432 AccessedPtr = SI->getPointerOperand();
433 AccessedTy = SI->getValueOperand()->getType();
434 AccessedAlign = SI->getAlign();
435 } else
436 continue;
437
438 if (AccessedAlign < Alignment)
439 continue;
440
441 // Handle trivial cases.
442 if (AccessedPtr == V &&
443 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
444 return true;
445
446 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
447 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
448 return true;
449 }
450 return false;
451}
452
454 const DataLayout &DL,
455 Instruction *ScanFrom,
456 AssumptionCache *AC,
457 const DominatorTree *DT,
458 const TargetLibraryInfo *TLI) {
459 TypeSize TySize = DL.getTypeStoreSize(Ty);
460 if (TySize.isScalable())
461 return false;
462 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
463 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
464 TLI);
465}
466
467/// DefMaxInstsToScan - the default number of maximum instructions
468/// to scan in the block, used by FindAvailableLoadedValue().
469/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
470/// threading in part by eliminating partially redundant loads.
471/// At that point, the value of MaxInstsToScan was already set to '6'
472/// without documented explanation.
474llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
475 cl::desc("Use this to specify the default maximum number of instructions "
476 "to scan backward from a given instruction, when searching for "
477 "available loaded value"));
478
480 BasicBlock::iterator &ScanFrom,
481 unsigned MaxInstsToScan,
482 BatchAAResults *AA, bool *IsLoad,
483 unsigned *NumScanedInst) {
484 // Don't CSE load that is volatile or anything stronger than unordered.
485 if (!Load->isUnordered())
486 return nullptr;
487
489 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
490 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
491 NumScanedInst);
492}
493
494// Check if the load and the store have the same base, constant offsets and
495// non-overlapping access ranges.
496static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
497 Type *LoadTy,
498 const Value *StorePtr,
499 Type *StoreTy,
500 const DataLayout &DL) {
501 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
502 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
503 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
504 DL, LoadOffset, /* AllowNonInbounds */ false);
505 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
506 DL, StoreOffset, /* AllowNonInbounds */ false);
507 if (LoadBase != StoreBase)
508 return false;
509 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
510 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
511 ConstantRange LoadRange(LoadOffset,
512 LoadOffset + LoadAccessSize.toRaw());
513 ConstantRange StoreRange(StoreOffset,
514 StoreOffset + StoreAccessSize.toRaw());
515 return LoadRange.intersectWith(StoreRange).isEmptySet();
516}
517
519 Type *AccessTy, bool AtLeastAtomic,
520 const DataLayout &DL, bool *IsLoadCSE) {
521 // If this is a load of Ptr, the loaded value is available.
522 // (This is true even if the load is volatile or atomic, although
523 // those cases are unlikely.)
524 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
525 // We can value forward from an atomic to a non-atomic, but not the
526 // other way around.
527 if (LI->isAtomic() < AtLeastAtomic)
528 return nullptr;
529
530 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
531 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
532 return nullptr;
533
534 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
535 if (IsLoadCSE)
536 *IsLoadCSE = true;
537 return LI;
538 }
539 }
540
541 // If this is a store through Ptr, the value is available!
542 // (This is true even if the store is volatile or atomic, although
543 // those cases are unlikely.)
544 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
545 // We can value forward from an atomic to a non-atomic, but not the
546 // other way around.
547 if (SI->isAtomic() < AtLeastAtomic)
548 return nullptr;
549
550 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
551 if (!AreEquivalentAddressValues(StorePtr, Ptr))
552 return nullptr;
553
554 if (IsLoadCSE)
555 *IsLoadCSE = false;
556
557 Value *Val = SI->getValueOperand();
558 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
559 return Val;
560
561 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
562 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
563 if (TypeSize::isKnownLE(LoadSize, StoreSize))
564 if (auto *C = dyn_cast<Constant>(Val))
565 return ConstantFoldLoadFromConst(C, AccessTy, DL);
566 }
567
568 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
569 // Don't forward from (non-atomic) memset to atomic load.
570 if (AtLeastAtomic)
571 return nullptr;
572
573 // Only handle constant memsets.
574 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
575 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
576 if (!Val || !Len)
577 return nullptr;
578
579 // TODO: Handle offsets.
580 Value *Dst = MSI->getDest();
582 return nullptr;
583
584 if (IsLoadCSE)
585 *IsLoadCSE = false;
586
587 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
588 if (LoadTypeSize.isScalable())
589 return nullptr;
590
591 // Make sure the read bytes are contained in the memset.
592 uint64_t LoadSize = LoadTypeSize.getFixedValue();
593 if ((Len->getValue() * 8).ult(LoadSize))
594 return nullptr;
595
596 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
597 : Val->getValue().trunc(LoadSize);
598 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
599 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
600 return SplatC;
601
602 return nullptr;
603 }
604
605 return nullptr;
606}
607
609 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
610 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
611 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
612 if (MaxInstsToScan == 0)
613 MaxInstsToScan = ~0U;
614
615 const DataLayout &DL = ScanBB->getDataLayout();
616 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
617
618 while (ScanFrom != ScanBB->begin()) {
619 // We must ignore debug info directives when counting (otherwise they
620 // would affect codegen).
621 Instruction *Inst = &*--ScanFrom;
622 if (Inst->isDebugOrPseudoInst())
623 continue;
624
625 // Restore ScanFrom to expected value in case next test succeeds
626 ScanFrom++;
627
628 if (NumScanedInst)
629 ++(*NumScanedInst);
630
631 // Don't scan huge blocks.
632 if (MaxInstsToScan-- == 0)
633 return nullptr;
634
635 --ScanFrom;
636
637 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
638 AtLeastAtomic, DL, IsLoadCSE))
639 return Available;
640
641 // Try to get the store size for the type.
642 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
643 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
644
645 // If both StrippedPtr and StorePtr reach all the way to an alloca or
646 // global and they are different, ignore the store. This is a trivial form
647 // of alias analysis that is important for reg2mem'd code.
648 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
649 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
650 StrippedPtr != StorePtr)
651 continue;
652
653 if (!AA) {
654 // When AA isn't available, but if the load and the store have the same
655 // base, constant offsets and non-overlapping access ranges, ignore the
656 // store. This is a simple form of alias analysis that is used by the
657 // inliner. FIXME: use BasicAA if possible.
659 Loc.Ptr, AccessTy, SI->getPointerOperand(),
660 SI->getValueOperand()->getType(), DL))
661 continue;
662 } else {
663 // If we have alias analysis and it says the store won't modify the
664 // loaded value, ignore the store.
665 if (!isModSet(AA->getModRefInfo(SI, Loc)))
666 continue;
667 }
668
669 // Otherwise the store that may or may not alias the pointer, bail out.
670 ++ScanFrom;
671 return nullptr;
672 }
673
674 // If this is some other instruction that may clobber Ptr, bail out.
675 if (Inst->mayWriteToMemory()) {
676 // If alias analysis claims that it really won't modify the load,
677 // ignore it.
678 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
679 continue;
680
681 // May modify the pointer, bail out.
682 ++ScanFrom;
683 return nullptr;
684 }
685 }
686
687 // Got to the start of the block, we didn't find it, but are done for this
688 // block.
689 return nullptr;
690}
691
693 bool *IsLoadCSE,
694 unsigned MaxInstsToScan) {
695 const DataLayout &DL = Load->getDataLayout();
696 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
697 BasicBlock *ScanBB = Load->getParent();
698 Type *AccessTy = Load->getType();
699 bool AtLeastAtomic = Load->isAtomic();
700
701 if (!Load->isUnordered())
702 return nullptr;
703
704 // Try to find an available value first, and delay expensive alias analysis
705 // queries until later.
706 Value *Available = nullptr;
707 SmallVector<Instruction *> MustNotAliasInsts;
708 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
709 ScanBB->rend())) {
710 if (Inst.isDebugOrPseudoInst())
711 continue;
712
713 if (MaxInstsToScan-- == 0)
714 return nullptr;
715
716 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
717 AtLeastAtomic, DL, IsLoadCSE);
718 if (Available)
719 break;
720
721 if (Inst.mayWriteToMemory())
722 MustNotAliasInsts.push_back(&Inst);
723 }
724
725 // If we found an available value, ensure that the instructions in between
726 // did not modify the memory location.
727 if (Available) {
729 for (Instruction *Inst : MustNotAliasInsts)
730 if (isModSet(AA.getModRefInfo(Inst, Loc)))
731 return nullptr;
732 }
733
734 return Available;
735}
736
737// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
738// feeds into them.
739static bool isPointerUseReplacable(const Use &U) {
740 unsigned Limit = 40;
741 SmallVector<const User *> Worklist({U.getUser()});
743
744 while (!Worklist.empty() && --Limit) {
745 auto *User = Worklist.pop_back_val();
746 if (!Visited.insert(User).second)
747 continue;
748 if (isa<ICmpInst, PtrToIntInst>(User))
749 continue;
750 if (isa<PHINode, SelectInst>(User))
751 Worklist.append(User->user_begin(), User->user_end());
752 else
753 return false;
754 }
755
756 return Limit != 0;
757}
758
759// Returns true if `To` is a null pointer, constant dereferenceable pointer or
760// both pointers have the same underlying objects.
761static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
762 const DataLayout &DL) {
763 // This is not strictly correct, but we do it for now to retain important
764 // optimizations.
765 if (isa<ConstantPointerNull>(To))
766 return true;
767 if (isa<Constant>(To) &&
769 return true;
772}
773
775 const DataLayout &DL) {
776 assert(U->getType() == To->getType() && "values must have matching types");
777 // Not a pointer, just return true.
778 if (!To->getType()->isPointerTy())
779 return true;
780
781 if (isPointerAlwaysReplaceable(&*U, To, DL))
782 return true;
783 return isPointerUseReplacable(U);
784}
785
787 const DataLayout &DL) {
788 assert(From->getType() == To->getType() && "values must have matching types");
789 // Not a pointer, just return true.
790 if (!From->getType()->isPointerTy())
791 return true;
792
794}
795
797 DominatorTree *DT,
798 AssumptionCache *AC) {
799 for (BasicBlock *BB : L->blocks()) {
800 for (Instruction &I : *BB) {
801 if (auto *LI = dyn_cast<LoadInst>(&I)) {
802 if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC))
803 return false;
804 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
805 return false;
806 }
807 }
808 return true;
809}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
uint64_t Size
@ Available
We know the block is fully available. This is a fixpoint.
Hexagon Common GEP
static const unsigned MaxDepth
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
Definition: Loads.cpp:242
static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment, const DataLayout &DL)
Definition: Loads.cpp:29
static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.
Definition: Loads.cpp:37
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:761
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
Definition: Loads.cpp:496
static bool isPointerUseReplacable(const Use &U)
Definition: Loads.cpp:739
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
Definition: Loads.cpp:518
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
Definition: Loads.cpp:348
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
Module.h This file contains the declarations for the Module class.
if(PassOpts->AAPipeline)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1905
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:620
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:449
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1199
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
reverse_iterator rend()
Definition: BasicBlock.h:466
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
This class represents a range of values.
Definition: ConstantRange.h:47
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
Represents calls to the gc.relocate intrinsic.
bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
bool isUnordered() const
Definition: Instructions.h:247
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
static IntegerType * getInt8Ty(LLVMContext &C)
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator user_begin()
Definition: Value.h:397
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
user_iterator user_end()
Definition: Value.h:405
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:201
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
Definition: Loads.cpp:608
bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition: Loads.cpp:357
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition: Loads.cpp:479
RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache *AC=nullptr, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:774
bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC)
Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.
Definition: Loads.cpp:796
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition: Loads.cpp:786
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:372
bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
Definition: Loads.cpp:262
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:221
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.