LLVM 18.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CFG.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/User.h"
49#include "llvm/IR/Value.h"
51#include "llvm/Pass.h"
57#include <cassert>
58#include <cstdint>
59#include <cstdlib>
60#include <optional>
61#include <utility>
62
63#define DEBUG_TYPE "basicaa"
64
65using namespace llvm;
66
67/// Enable analysis of recursive PHI nodes.
69 cl::init(true));
70
71static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden, cl::init(false));
73
74/// SearchLimitReached / SearchTimes shows how often the limit of
75/// to decompose GEPs is reached. It will affect the precision
76/// of basic alias analysis.
77STATISTIC(SearchLimitReached, "Number of times the limit to "
78 "decompose GEPs is reached");
79STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80
81// The max limit of the search depth in DecomposeGEPExpression() and
82// getUnderlyingObject().
83static const unsigned MaxLookupSearchDepth = 6;
84
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
90 // depend on them.
91 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93 return true;
94
95 // Otherwise this analysis result remains valid.
96 return false;
97}
98
99//===----------------------------------------------------------------------===//
100// Useful predicates
101//===----------------------------------------------------------------------===//
102
103/// Returns the size of the object specified by V or UnknownSize if unknown.
104static std::optional<TypeSize> getObjectSize(const Value *V,
105 const DataLayout &DL,
106 const TargetLibraryInfo &TLI,
107 bool NullIsValidLoc,
108 bool RoundToAlign = false) {
110 ObjectSizeOpts Opts;
111 Opts.RoundToAlign = RoundToAlign;
112 Opts.NullIsUnknownSize = NullIsValidLoc;
113 if (getObjectSize(V, Size, DL, &TLI, Opts))
114 return TypeSize::getFixed(Size);
115 return std::nullopt;
116}
117
118/// Returns true if we can prove that the object specified by V is smaller than
119/// Size.
121 const DataLayout &DL,
122 const TargetLibraryInfo &TLI,
123 bool NullIsValidLoc) {
124 // Note that the meanings of the "object" are slightly different in the
125 // following contexts:
126 // c1: llvm::getObjectSize()
127 // c2: llvm.objectsize() intrinsic
128 // c3: isObjectSmallerThan()
129 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130 // refers to the "entire object".
131 //
132 // Consider this example:
133 // char *p = (char*)malloc(100)
134 // char *q = p+80;
135 //
136 // In the context of c1 and c2, the "object" pointed by q refers to the
137 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 //
139 // However, in the context of c3, the "object" refers to the chunk of memory
140 // being allocated. So, the "object" has 100 bytes, and q points to the middle
141 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142 // parameter, before the llvm::getObjectSize() is called to get the size of
143 // entire object, we should:
144 // - either rewind the pointer q to the base-address of the object in
145 // question (in this case rewind to p), or
146 // - just give up. It is up to caller to make sure the pointer is pointing
147 // to the base address the object.
148 //
149 // We go for 2nd option for simplicity.
150 if (!isIdentifiedObject(V))
151 return false;
152
153 // This function needs to use the aligned object size because we allow
154 // reads a bit past the end given sufficient alignment.
155 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
156 /*RoundToAlign*/ true);
157
158 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
159}
160
161/// Return the minimal extent from \p V to the end of the underlying object,
162/// assuming the result is used in an aliasing query. E.g., we do use the query
163/// location size and the fact that null pointers cannot alias here.
165 const LocationSize &LocSize,
166 const DataLayout &DL,
167 bool NullIsValidLoc) {
168 // If we have dereferenceability information we know a lower bound for the
169 // extent as accesses for a lower offset would be valid. We need to exclude
170 // the "or null" part if null is a valid pointer. We can ignore frees, as an
171 // access after free would be undefined behavior.
172 bool CanBeNull, CanBeFreed;
173 uint64_t DerefBytes =
174 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
175 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
176 // If queried with a precise location size, we assume that location size to be
177 // accessed, thus valid.
178 if (LocSize.isPrecise())
179 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
180 return TypeSize::getFixed(DerefBytes);
181}
182
183/// Returns true if we can prove that the object specified by V has size Size.
184static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
185 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
186 std::optional<TypeSize> ObjectSize =
187 getObjectSize(V, DL, TLI, NullIsValidLoc);
188 return ObjectSize && *ObjectSize == Size;
189}
190
191//===----------------------------------------------------------------------===//
192// CaptureInfo implementations
193//===----------------------------------------------------------------------===//
194
195CaptureInfo::~CaptureInfo() = default;
196
198 const Instruction *I, bool OrAt) {
199 return isNonEscapingLocalObject(Object, &IsCapturedCache);
200}
201
202static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
203 const LoopInfo *LI) {
204 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
206 return Succs.empty() ||
207 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
208}
209
211 const Instruction *I, bool OrAt) {
212 if (!isIdentifiedFunctionLocal(Object))
213 return false;
214
215 auto Iter = EarliestEscapes.insert({Object, nullptr});
216 if (Iter.second) {
217 Instruction *EarliestCapture = FindEarliestCapture(
218 Object, *const_cast<Function *>(I->getFunction()),
219 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
220 if (EarliestCapture) {
221 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
222 Ins.first->second.push_back(Object);
223 }
224 Iter.first->second = EarliestCapture;
225 }
226
227 // No capturing instruction.
228 if (!Iter.first->second)
229 return true;
230
231 if (I == Iter.first->second) {
232 if (OrAt)
233 return false;
234 return isNotInCycle(I, &DT, LI);
235 }
236
237 return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
238}
239
241 auto Iter = Inst2Obj.find(I);
242 if (Iter != Inst2Obj.end()) {
243 for (const Value *Obj : Iter->second)
244 EarliestEscapes.erase(Obj);
245 Inst2Obj.erase(I);
246 }
247}
248
249//===----------------------------------------------------------------------===//
250// GetElementPtr Instruction Decomposition and Analysis
251//===----------------------------------------------------------------------===//
252
253namespace {
254/// Represents zext(sext(trunc(V))).
255struct CastedValue {
256 const Value *V;
257 unsigned ZExtBits = 0;
258 unsigned SExtBits = 0;
259 unsigned TruncBits = 0;
260
261 explicit CastedValue(const Value *V) : V(V) {}
262 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
263 unsigned TruncBits)
264 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
265
266 unsigned getBitWidth() const {
267 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
268 SExtBits;
269 }
270
271 CastedValue withValue(const Value *NewV) const {
272 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
273 }
274
275 /// Replace V with zext(NewV)
276 CastedValue withZExtOfValue(const Value *NewV) const {
277 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
279 if (ExtendBy <= TruncBits)
280 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
281
282 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
283 ExtendBy -= TruncBits;
284 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
285 }
286
287 /// Replace V with sext(NewV)
288 CastedValue withSExtOfValue(const Value *NewV) const {
289 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
291 if (ExtendBy <= TruncBits)
292 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
293
294 // zext(sext(sext(NewV)))
295 ExtendBy -= TruncBits;
296 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
297 }
298
299 APInt evaluateWith(APInt N) const {
300 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
301 "Incompatible bit width");
302 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
303 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
304 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
305 return N;
306 }
307
308 ConstantRange evaluateWith(ConstantRange N) const {
309 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
310 "Incompatible bit width");
311 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
312 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
313 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
314 return N;
315 }
316
317 bool canDistributeOver(bool NUW, bool NSW) const {
318 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
319 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
320 // trunc(x op y) == trunc(x) op trunc(y)
321 return (!ZExtBits || NUW) && (!SExtBits || NSW);
322 }
323
324 bool hasSameCastsAs(const CastedValue &Other) const {
325 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
326 TruncBits == Other.TruncBits;
327 }
328};
329
330/// Represents zext(sext(trunc(V))) * Scale + Offset.
331struct LinearExpression {
332 CastedValue Val;
333 APInt Scale;
335
336 /// True if all operations in this expression are NSW.
337 bool IsNSW;
338
339 LinearExpression(const CastedValue &Val, const APInt &Scale,
340 const APInt &Offset, bool IsNSW)
341 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
342
343 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
344 unsigned BitWidth = Val.getBitWidth();
345 Scale = APInt(BitWidth, 1);
346 Offset = APInt(BitWidth, 0);
347 }
348
349 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
350 // The check for zero offset is necessary, because generally
351 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
352 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
353 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
354 }
355};
356}
357
358/// Analyzes the specified value as a linear expression: "A*V + B", where A and
359/// B are constant integers.
360static LinearExpression GetLinearExpression(
361 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
363 // Limit our recursion depth.
364 if (Depth == 6)
365 return Val;
366
367 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
368 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
369 Val.evaluateWith(Const->getValue()), true);
370
371 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
372 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
373 APInt RHS = Val.evaluateWith(RHSC->getValue());
374 // The only non-OBO case we deal with is or, and only limited to the
375 // case where it is both nuw and nsw.
376 bool NUW = true, NSW = true;
377 if (isa<OverflowingBinaryOperator>(BOp)) {
378 NUW &= BOp->hasNoUnsignedWrap();
379 NSW &= BOp->hasNoSignedWrap();
380 }
381 if (!Val.canDistributeOver(NUW, NSW))
382 return Val;
383
384 // While we can distribute over trunc, we cannot preserve nowrap flags
385 // in that case.
386 if (Val.TruncBits)
387 NUW = NSW = false;
388
389 LinearExpression E(Val);
390 switch (BOp->getOpcode()) {
391 default:
392 // We don't understand this instruction, so we can't decompose it any
393 // further.
394 return Val;
395 case Instruction::Or:
396 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
397 // analyze it.
398 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(),
399 SimplifyQuery(DL, DT, AC, BOp)))
400 return Val;
401
402 [[fallthrough]];
403 case Instruction::Add: {
404 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
405 Depth + 1, AC, DT);
406 E.Offset += RHS;
407 E.IsNSW &= NSW;
408 break;
409 }
410 case Instruction::Sub: {
411 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
412 Depth + 1, AC, DT);
413 E.Offset -= RHS;
414 E.IsNSW &= NSW;
415 break;
416 }
417 case Instruction::Mul:
418 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
419 Depth + 1, AC, DT)
420 .mul(RHS, NSW);
421 break;
422 case Instruction::Shl:
423 // We're trying to linearize an expression of the kind:
424 // shl i8 -128, 36
425 // where the shift count exceeds the bitwidth of the type.
426 // We can't decompose this further (the expression would return
427 // a poison value).
428 if (RHS.getLimitedValue() > Val.getBitWidth())
429 return Val;
430
431 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
432 Depth + 1, AC, DT);
433 E.Offset <<= RHS.getLimitedValue();
434 E.Scale <<= RHS.getLimitedValue();
435 E.IsNSW &= NSW;
436 break;
437 }
438 return E;
439 }
440 }
441
442 if (isa<ZExtInst>(Val.V))
443 return GetLinearExpression(
444 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
445 DL, Depth + 1, AC, DT);
446
447 if (isa<SExtInst>(Val.V))
448 return GetLinearExpression(
449 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
450 DL, Depth + 1, AC, DT);
451
452 return Val;
453}
454
455/// To ensure a pointer offset fits in an integer of size IndexSize
456/// (in bits) when that size is smaller than the maximum index size. This is
457/// an issue, for example, in particular for 32b pointers with negative indices
458/// that rely on two's complement wrap-arounds for precise alias information
459/// where the maximum index size is 64b.
460static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
461 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
462 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
463 if (ShiftBits != 0) {
464 Offset <<= ShiftBits;
465 Offset.ashrInPlace(ShiftBits);
466 }
467}
468
469namespace {
470// A linear transformation of a Value; this class represents
471// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
472struct VariableGEPIndex {
473 CastedValue Val;
474 APInt Scale;
475
476 // Context instruction to use when querying information about this index.
477 const Instruction *CxtI;
478
479 /// True if all operations in this expression are NSW.
480 bool IsNSW;
481
482 /// True if the index should be subtracted rather than added. We don't simply
483 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
484 /// non-wrapping, while X + INT_MIN*(-1) wraps.
485 bool IsNegated;
486
487 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
488 if (IsNegated == Other.IsNegated)
489 return Scale == -Other.Scale;
490 return Scale == Other.Scale;
491 }
492
493 void dump() const {
494 print(dbgs());
495 dbgs() << "\n";
496 }
497 void print(raw_ostream &OS) const {
498 OS << "(V=" << Val.V->getName()
499 << ", zextbits=" << Val.ZExtBits
500 << ", sextbits=" << Val.SExtBits
501 << ", truncbits=" << Val.TruncBits
502 << ", scale=" << Scale
503 << ", nsw=" << IsNSW
504 << ", negated=" << IsNegated << ")";
505 }
506};
507}
508
509// Represents the internal structure of a GEP, decomposed into a base pointer,
510// constant offsets, and variable scaled indices.
512 // Base pointer of the GEP
513 const Value *Base;
514 // Total constant offset from base.
516 // Scaled variable (non-constant) indices.
518 // Are all operations inbounds GEPs or non-indexing operations?
519 // (std::nullopt iff expression doesn't involve any geps)
520 std::optional<bool> InBounds;
521
522 void dump() const {
523 print(dbgs());
524 dbgs() << "\n";
525 }
526 void print(raw_ostream &OS) const {
527 OS << "(DecomposedGEP Base=" << Base->getName()
528 << ", Offset=" << Offset
529 << ", VarIndices=[";
530 for (size_t i = 0; i < VarIndices.size(); i++) {
531 if (i != 0)
532 OS << ", ";
533 VarIndices[i].print(OS);
534 }
535 OS << "])";
536 }
537};
538
539
540/// If V is a symbolic pointer expression, decompose it into a base pointer
541/// with a constant offset and a number of scaled symbolic offsets.
542///
543/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
544/// in the VarIndices vector) are Value*'s that are known to be scaled by the
545/// specified amount, but which may have other unrepresented high bits. As
546/// such, the gep cannot necessarily be reconstructed from its decomposed form.
548BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
550 // Limit recursion depth to limit compile time in crazy cases.
551 unsigned MaxLookup = MaxLookupSearchDepth;
552 SearchTimes++;
553 const Instruction *CxtI = dyn_cast<Instruction>(V);
554
555 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
556 DecomposedGEP Decomposed;
557 Decomposed.Offset = APInt(MaxIndexSize, 0);
558 do {
559 // See if this is a bitcast or GEP.
560 const Operator *Op = dyn_cast<Operator>(V);
561 if (!Op) {
562 // The only non-operator case we can handle are GlobalAliases.
563 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
564 if (!GA->isInterposable()) {
565 V = GA->getAliasee();
566 continue;
567 }
568 }
569 Decomposed.Base = V;
570 return Decomposed;
571 }
572
573 if (Op->getOpcode() == Instruction::BitCast ||
574 Op->getOpcode() == Instruction::AddrSpaceCast) {
575 V = Op->getOperand(0);
576 continue;
577 }
578
579 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
580 if (!GEPOp) {
581 if (const auto *PHI = dyn_cast<PHINode>(V)) {
582 // Look through single-arg phi nodes created by LCSSA.
583 if (PHI->getNumIncomingValues() == 1) {
584 V = PHI->getIncomingValue(0);
585 continue;
586 }
587 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
588 // CaptureTracking can know about special capturing properties of some
589 // intrinsics like launder.invariant.group, that can't be expressed with
590 // the attributes, but have properties like returning aliasing pointer.
591 // Because some analysis may assume that nocaptured pointer is not
592 // returned from some special intrinsic (because function would have to
593 // be marked with returns attribute), it is crucial to use this function
594 // because it should be in sync with CaptureTracking. Not using it may
595 // cause weird miscompilations where 2 aliasing pointers are assumed to
596 // noalias.
597 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
598 V = RP;
599 continue;
600 }
601 }
602
603 Decomposed.Base = V;
604 return Decomposed;
605 }
606
607 // Track whether we've seen at least one in bounds gep, and if so, whether
608 // all geps parsed were in bounds.
609 if (Decomposed.InBounds == std::nullopt)
610 Decomposed.InBounds = GEPOp->isInBounds();
611 else if (!GEPOp->isInBounds())
612 Decomposed.InBounds = false;
613
614 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
615
616 unsigned AS = GEPOp->getPointerAddressSpace();
617 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
619 unsigned IndexSize = DL.getIndexSizeInBits(AS);
620 // Assume all GEP operands are constants until proven otherwise.
621 bool GepHasConstantOffset = true;
622 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
623 I != E; ++I, ++GTI) {
624 const Value *Index = *I;
625 // Compute the (potentially symbolic) offset in bytes for this index.
626 if (StructType *STy = GTI.getStructTypeOrNull()) {
627 // For a struct, add the member offset.
628 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
629 if (FieldNo == 0)
630 continue;
631
632 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
633 continue;
634 }
635
636 // For an array/pointer, add the element offset, explicitly scaled.
637 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
638 if (CIdx->isZero())
639 continue;
640
641 // Don't attempt to analyze GEPs if the scalable index is not zero.
642 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
643 if (AllocTypeSize.isScalable()) {
644 Decomposed.Base = V;
645 return Decomposed;
646 }
647
648 Decomposed.Offset += AllocTypeSize.getFixedValue() *
649 CIdx->getValue().sextOrTrunc(MaxIndexSize);
650 continue;
651 }
652
653 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
654 if (AllocTypeSize.isScalable()) {
655 Decomposed.Base = V;
656 return Decomposed;
657 }
658
659 GepHasConstantOffset = false;
660
661 // If the integer type is smaller than the index size, it is implicitly
662 // sign extended or truncated to index size.
663 unsigned Width = Index->getType()->getIntegerBitWidth();
664 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
665 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
666 LinearExpression LE = GetLinearExpression(
667 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
668
669 // Scale by the type size.
670 unsigned TypeSize = AllocTypeSize.getFixedValue();
671 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
672 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
673 APInt Scale = LE.Scale.sext(MaxIndexSize);
674
675 // If we already had an occurrence of this index variable, merge this
676 // scale into it. For example, we want to handle:
677 // A[x][x] -> x*16 + x*4 -> x*20
678 // This also ensures that 'x' only appears in the index list once.
679 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
680 if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
681 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
682 Scale += Decomposed.VarIndices[i].Scale;
683 LE.IsNSW = false; // We cannot guarantee nsw for the merge.
684 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
685 break;
686 }
687 }
688
689 // Make sure that we have a scale that makes sense for this target's
690 // index size.
691 adjustToIndexSize(Scale, IndexSize);
692
693 if (!!Scale) {
694 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
695 /* IsNegated */ false};
696 Decomposed.VarIndices.push_back(Entry);
697 }
698 }
699
700 // Take care of wrap-arounds
701 if (GepHasConstantOffset)
702 adjustToIndexSize(Decomposed.Offset, IndexSize);
703
704 // Analyze the base pointer next.
705 V = GEPOp->getOperand(0);
706 } while (--MaxLookup);
707
708 // If the chain of expressions is too deep, just return early.
709 Decomposed.Base = V;
710 SearchLimitReached++;
711 return Decomposed;
712}
713
715 AAQueryInfo &AAQI,
716 bool IgnoreLocals) {
717 assert(Visited.empty() && "Visited must be cleared after use!");
718 auto _ = make_scope_exit([&] { Visited.clear(); });
719
720 unsigned MaxLookup = 8;
722 Worklist.push_back(Loc.Ptr);
724
725 do {
726 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
727 if (!Visited.insert(V).second)
728 continue;
729
730 // Ignore allocas if we were instructed to do so.
731 if (IgnoreLocals && isa<AllocaInst>(V))
732 continue;
733
734 // If the location points to memory that is known to be invariant for
735 // the life of the underlying SSA value, then we can exclude Mod from
736 // the set of valid memory effects.
737 //
738 // An argument that is marked readonly and noalias is known to be
739 // invariant while that function is executing.
740 if (const Argument *Arg = dyn_cast<Argument>(V)) {
741 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
742 Result |= ModRefInfo::Ref;
743 continue;
744 }
745 }
746
747 // A global constant can't be mutated.
748 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
749 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
750 // global to be marked constant in some modules and non-constant in
751 // others. GV may even be a declaration, not a definition.
752 if (!GV->isConstant())
753 return ModRefInfo::ModRef;
754 continue;
755 }
756
757 // If both select values point to local memory, then so does the select.
758 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
759 Worklist.push_back(SI->getTrueValue());
760 Worklist.push_back(SI->getFalseValue());
761 continue;
762 }
763
764 // If all values incoming to a phi node point to local memory, then so does
765 // the phi.
766 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
767 // Don't bother inspecting phi nodes with many operands.
768 if (PN->getNumIncomingValues() > MaxLookup)
769 return ModRefInfo::ModRef;
770 append_range(Worklist, PN->incoming_values());
771 continue;
772 }
773
774 // Otherwise be conservative.
775 return ModRefInfo::ModRef;
776 } while (!Worklist.empty() && --MaxLookup);
777
778 // If we hit the maximum number of instructions to examine, be conservative.
779 if (!Worklist.empty())
780 return ModRefInfo::ModRef;
781
782 return Result;
783}
784
785static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
786 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
787 return II && II->getIntrinsicID() == IID;
788}
789
790/// Returns the behavior when calling the given call site.
792 AAQueryInfo &AAQI) {
793 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
794
795 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
796 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
797 // Operand bundles on the call may also read or write memory, in addition
798 // to the behavior of the called function.
799 if (Call->hasReadingOperandBundles())
800 FuncME |= MemoryEffects::readOnly();
801 if (Call->hasClobberingOperandBundles())
802 FuncME |= MemoryEffects::writeOnly();
803 Min &= FuncME;
804 }
805
806 return Min;
807}
808
809/// Returns the behavior when calling the given function. For use when the call
810/// site is not known.
812 switch (F->getIntrinsicID()) {
813 case Intrinsic::experimental_guard:
814 case Intrinsic::experimental_deoptimize:
815 // These intrinsics can read arbitrary memory, and additionally modref
816 // inaccessible memory to model control dependence.
817 return MemoryEffects::readOnly() |
819 }
820
821 return F->getMemoryEffects();
822}
823
825 unsigned ArgIdx) {
826 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
827 return ModRefInfo::Mod;
828
829 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
830 return ModRefInfo::Ref;
831
832 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
834
835 return ModRefInfo::ModRef;
836}
837
838#ifndef NDEBUG
839static const Function *getParent(const Value *V) {
840 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
841 if (!inst->getParent())
842 return nullptr;
843 return inst->getParent()->getParent();
844 }
845
846 if (const Argument *arg = dyn_cast<Argument>(V))
847 return arg->getParent();
848
849 return nullptr;
850}
851
852static bool notDifferentParent(const Value *O1, const Value *O2) {
853
854 const Function *F1 = getParent(O1);
855 const Function *F2 = getParent(O2);
856
857 return !F1 || !F2 || F1 == F2;
858}
859#endif
860
862 const MemoryLocation &LocB, AAQueryInfo &AAQI,
863 const Instruction *CtxI) {
864 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
865 "BasicAliasAnalysis doesn't support interprocedural queries.");
866 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
867}
868
869/// Checks to see if the specified callsite can clobber the specified memory
870/// object.
871///
872/// Since we only look at local properties of this function, we really can't
873/// say much about this query. We do, however, use simple "address taken"
874/// analysis on local objects.
876 const MemoryLocation &Loc,
877 AAQueryInfo &AAQI) {
878 assert(notDifferentParent(Call, Loc.Ptr) &&
879 "AliasAnalysis query involving multiple functions!");
880
881 const Value *Object = getUnderlyingObject(Loc.Ptr);
882
883 // Calls marked 'tail' cannot read or write allocas from the current frame
884 // because the current frame might be destroyed by the time they run. However,
885 // a tail call may use an alloca with byval. Calling with byval copies the
886 // contents of the alloca into argument registers or stack slots, so there is
887 // no lifetime issue.
888 if (isa<AllocaInst>(Object))
889 if (const CallInst *CI = dyn_cast<CallInst>(Call))
890 if (CI->isTailCall() &&
891 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
893
894 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
895 // modify them even though the alloca is not escaped.
896 if (auto *AI = dyn_cast<AllocaInst>(Object))
897 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
898 return ModRefInfo::Mod;
899
900 // A call can access a locally allocated object either because it is passed as
901 // an argument to the call, or because it has escaped prior to the call.
902 //
903 // Make sure the object has not escaped here, and then check that none of the
904 // call arguments alias the object below.
905 if (!isa<Constant>(Object) && Call != Object &&
906 AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
907
908 // Optimistically assume that call doesn't touch Object and check this
909 // assumption in the following loop.
911
912 unsigned OperandNo = 0;
913 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
914 CI != CE; ++CI, ++OperandNo) {
915 if (!(*CI)->getType()->isPointerTy())
916 continue;
917
918 // Call doesn't access memory through this operand, so we don't care
919 // if it aliases with Object.
920 if (Call->doesNotAccessMemory(OperandNo))
921 continue;
922
923 // If this is a no-capture pointer argument, see if we can tell that it
924 // is impossible to alias the pointer we're checking.
925 AliasResult AR =
928 // Operand doesn't alias 'Object', continue looking for other aliases
929 if (AR == AliasResult::NoAlias)
930 continue;
931 // Operand aliases 'Object', but call doesn't modify it. Strengthen
932 // initial assumption and keep looking in case if there are more aliases.
933 if (Call->onlyReadsMemory(OperandNo)) {
934 Result |= ModRefInfo::Ref;
935 continue;
936 }
937 // Operand aliases 'Object' but call only writes into it.
938 if (Call->onlyWritesMemory(OperandNo)) {
939 Result |= ModRefInfo::Mod;
940 continue;
941 }
942 // This operand aliases 'Object' and call reads and writes into it.
943 // Setting ModRef will not yield an early return below, MustAlias is not
944 // used further.
945 Result = ModRefInfo::ModRef;
946 break;
947 }
948
949 // Early return if we improved mod ref information
950 if (!isModAndRefSet(Result))
951 return Result;
952 }
953
954 // If the call is malloc/calloc like, we can assume that it doesn't
955 // modify any IR visible value. This is only valid because we assume these
956 // routines do not read values visible in the IR. TODO: Consider special
957 // casing realloc and strdup routines which access only their arguments as
958 // well. Or alternatively, replace all of this with inaccessiblememonly once
959 // that's implemented fully.
960 if (isMallocOrCallocLikeFn(Call, &TLI)) {
961 // Be conservative if the accessed pointer may alias the allocation -
962 // fallback to the generic handling below.
963 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
966 }
967
968 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
969 // writing so that proper control dependencies are maintained but they never
970 // mod any particular memory location visible to the IR.
971 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
972 // intrinsic is now modeled as reading memory. This prevents hoisting the
973 // invariant.start intrinsic over stores. Consider:
974 // *ptr = 40;
975 // *ptr = 50;
976 // invariant_start(ptr)
977 // int val = *ptr;
978 // print(val);
979 //
980 // This cannot be transformed to:
981 //
982 // *ptr = 40;
983 // invariant_start(ptr)
984 // *ptr = 50;
985 // int val = *ptr;
986 // print(val);
987 //
988 // The transformation will cause the second store to be ignored (based on
989 // rules of invariant.start) and print 40, while the first program always
990 // prints 50.
991 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
992 return ModRefInfo::Ref;
993
994 // Be conservative.
995 return ModRefInfo::ModRef;
996}
997
999 const CallBase *Call2,
1000 AAQueryInfo &AAQI) {
1001 // Guard intrinsics are marked as arbitrarily writing so that proper control
1002 // dependencies are maintained but they never mods any particular memory
1003 // location.
1004 //
1005 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1006 // heap state at the point the guard is issued needs to be consistent in case
1007 // the guard invokes the "deopt" continuation.
1008
1009 // NB! This function is *not* commutative, so we special case two
1010 // possibilities for guard intrinsics.
1011
1012 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1013 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1016
1017 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1018 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1021
1022 // Be conservative.
1023 return ModRefInfo::ModRef;
1024}
1025
1026/// Return true if we know V to the base address of the corresponding memory
1027/// object. This implies that any address less than V must be out of bounds
1028/// for the underlying object. Note that just being isIdentifiedObject() is
1029/// not enough - For example, a negative offset from a noalias argument or call
1030/// can be inbounds w.r.t the actual underlying object.
1031static bool isBaseOfObject(const Value *V) {
1032 // TODO: We can handle other cases here
1033 // 1) For GC languages, arguments to functions are often required to be
1034 // base pointers.
1035 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1036 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1037}
1038
1039/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1040/// another pointer.
1041///
1042/// We know that V1 is a GEP, but we don't know anything about V2.
1043/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1044/// V2.
1045AliasResult BasicAAResult::aliasGEP(
1046 const GEPOperator *GEP1, LocationSize V1Size,
1047 const Value *V2, LocationSize V2Size,
1048 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1049 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1050 // TODO: This limitation exists for compile-time reasons. Relax it if we
1051 // can avoid exponential pathological cases.
1052 if (!isa<GEPOperator>(V2))
1053 return AliasResult::MayAlias;
1054
1055 // If both accesses have unknown size, we can only check whether the base
1056 // objects don't alias.
1057 AliasResult BaseAlias =
1058 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1059 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1060 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1062 }
1063
1064 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1065 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1066
1067 // Bail if we were not able to decompose anything.
1068 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1069 return AliasResult::MayAlias;
1070
1071 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1072 // symbolic difference.
1073 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1074
1075 // If an inbounds GEP would have to start from an out of bounds address
1076 // for the two to alias, then we can assume noalias.
1077 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1078 // size
1079 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1080 V2Size.hasValue() && !V2Size.isScalable() &&
1081 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1082 isBaseOfObject(DecompGEP2.Base))
1083 return AliasResult::NoAlias;
1084
1085 if (isa<GEPOperator>(V2)) {
1086 // Symmetric case to above.
1087 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1088 V1Size.hasValue() && !V1Size.isScalable() &&
1089 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1090 isBaseOfObject(DecompGEP1.Base))
1091 return AliasResult::NoAlias;
1092 }
1093
1094 // For GEPs with identical offsets, we can preserve the size and AAInfo
1095 // when performing the alias check on the underlying objects.
1096 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1097 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1098 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1099
1100 // Do the base pointers alias?
1101 AliasResult BaseAlias =
1102 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1103 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1104
1105 // If we get a No or May, then return it immediately, no amount of analysis
1106 // will improve this situation.
1107 if (BaseAlias != AliasResult::MustAlias) {
1108 assert(BaseAlias == AliasResult::NoAlias ||
1109 BaseAlias == AliasResult::MayAlias);
1110 return BaseAlias;
1111 }
1112
1113 // Bail on analysing scalable LocationSize
1114 if (V1Size.isScalable() || V2Size.isScalable())
1115 return AliasResult::MayAlias;
1116
1117 // If there is a constant difference between the pointers, but the difference
1118 // is less than the size of the associated memory object, then we know
1119 // that the objects are partially overlapping. If the difference is
1120 // greater, we know they do not overlap.
1121 if (DecompGEP1.VarIndices.empty()) {
1122 APInt &Off = DecompGEP1.Offset;
1123
1124 // Initialize for Off >= 0 (V2 <= GEP1) case.
1125 const Value *LeftPtr = V2;
1126 const Value *RightPtr = GEP1;
1127 LocationSize VLeftSize = V2Size;
1128 LocationSize VRightSize = V1Size;
1129 const bool Swapped = Off.isNegative();
1130
1131 if (Swapped) {
1132 // Swap if we have the situation where:
1133 // + +
1134 // | BaseOffset |
1135 // ---------------->|
1136 // |-->V1Size |-------> V2Size
1137 // GEP1 V2
1138 std::swap(LeftPtr, RightPtr);
1139 std::swap(VLeftSize, VRightSize);
1140 Off = -Off;
1141 }
1142
1143 if (!VLeftSize.hasValue())
1144 return AliasResult::MayAlias;
1145
1146 const uint64_t LSize = VLeftSize.getValue();
1147 if (Off.ult(LSize)) {
1148 // Conservatively drop processing if a phi was visited and/or offset is
1149 // too big.
1151 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1152 (Off + VRightSize.getValue()).ule(LSize)) {
1153 // Memory referenced by right pointer is nested. Save the offset in
1154 // cache. Note that originally offset estimated as GEP1-V2, but
1155 // AliasResult contains the shift that represents GEP1+Offset=V2.
1156 AR.setOffset(-Off.getSExtValue());
1157 AR.swap(Swapped);
1158 }
1159 return AR;
1160 }
1161 return AliasResult::NoAlias;
1162 }
1163
1164 // We need to know both acess sizes for all the following heuristics.
1165 if (!V1Size.hasValue() || !V2Size.hasValue())
1166 return AliasResult::MayAlias;
1167
1168 APInt GCD;
1169 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1170 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1171 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1172 const APInt &Scale = Index.Scale;
1173 APInt ScaleForGCD = Scale;
1174 if (!Index.IsNSW)
1175 ScaleForGCD =
1177
1178 if (i == 0)
1179 GCD = ScaleForGCD.abs();
1180 else
1181 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1182
1183 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1184 true, &AC, Index.CxtI);
1185 KnownBits Known =
1186 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1187 CR = CR.intersectWith(
1188 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1190 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1191
1192 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1193 "Bit widths are normalized to MaxIndexSize");
1194 if (Index.IsNSW)
1195 CR = CR.smul_sat(ConstantRange(Scale));
1196 else
1197 CR = CR.smul_fast(ConstantRange(Scale));
1198
1199 if (Index.IsNegated)
1200 OffsetRange = OffsetRange.sub(CR);
1201 else
1202 OffsetRange = OffsetRange.add(CR);
1203 }
1204
1205 // We now have accesses at two offsets from the same base:
1206 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1207 // 2. 0 with size V2Size
1208 // Using arithmetic modulo GCD, the accesses are at
1209 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1210 // into the range [V2Size..GCD), then we know they cannot overlap.
1211 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1212 if (ModOffset.isNegative())
1213 ModOffset += GCD; // We want mod, not rem.
1214 if (ModOffset.uge(V2Size.getValue()) &&
1215 (GCD - ModOffset).uge(V1Size.getValue()))
1216 return AliasResult::NoAlias;
1217
1218 // Compute ranges of potentially accessed bytes for both accesses. If the
1219 // interseciton is empty, there can be no overlap.
1220 unsigned BW = OffsetRange.getBitWidth();
1221 ConstantRange Range1 = OffsetRange.add(
1222 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1223 ConstantRange Range2 =
1224 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1225 if (Range1.intersectWith(Range2).isEmptySet())
1226 return AliasResult::NoAlias;
1227
1228 // Try to determine the range of values for VarIndex such that
1229 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1230 std::optional<APInt> MinAbsVarIndex;
1231 if (DecompGEP1.VarIndices.size() == 1) {
1232 // VarIndex = Scale*V.
1233 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1234 if (Var.Val.TruncBits == 0 &&
1235 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1236 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1237 // potentially wrapping math.
1238 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1239 if (Var.IsNSW)
1240 return true;
1241
1242 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1243 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1244 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1245 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1246 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1247 if (MaxScaleValueBW <= 0)
1248 return false;
1249 return Var.Scale.ule(
1250 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1251 };
1252 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1253 // presence of potentially wrapping math.
1254 if (MultiplyByScaleNoWrap(Var)) {
1255 // If V != 0 then abs(VarIndex) >= abs(Scale).
1256 MinAbsVarIndex = Var.Scale.abs();
1257 }
1258 }
1259 } else if (DecompGEP1.VarIndices.size() == 2) {
1260 // VarIndex = Scale*V0 + (-Scale)*V1.
1261 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1262 // Check that MayBeCrossIteration is false, to avoid reasoning about
1263 // inequality of values across loop iterations.
1264 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1265 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1266 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1267 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1268 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1269 DT))
1270 MinAbsVarIndex = Var0.Scale.abs();
1271 }
1272
1273 if (MinAbsVarIndex) {
1274 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1275 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1276 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1277 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1278 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1279 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1280 return AliasResult::NoAlias;
1281 }
1282
1283 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1284 return AliasResult::NoAlias;
1285
1286 // Statically, we can see that the base objects are the same, but the
1287 // pointers have dynamic offsets which we can't resolve. And none of our
1288 // little tricks above worked.
1289 return AliasResult::MayAlias;
1290}
1291
1293 // If the results agree, take it.
1294 if (A == B)
1295 return A;
1296 // A mix of PartialAlias and MustAlias is PartialAlias.
1300 // Otherwise, we don't know anything.
1301 return AliasResult::MayAlias;
1302}
1303
1304/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1305/// against another.
1307BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1308 const Value *V2, LocationSize V2Size,
1309 AAQueryInfo &AAQI) {
1310 // If the values are Selects with the same condition, we can do a more precise
1311 // check: just check for aliases between the values on corresponding arms.
1312 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1313 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1314 AAQI)) {
1315 AliasResult Alias =
1316 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1317 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1318 if (Alias == AliasResult::MayAlias)
1319 return AliasResult::MayAlias;
1320 AliasResult ThisAlias =
1321 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1322 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1323 return MergeAliasResults(ThisAlias, Alias);
1324 }
1325
1326 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1327 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1328 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1329 MemoryLocation(V2, V2Size), AAQI);
1330 if (Alias == AliasResult::MayAlias)
1331 return AliasResult::MayAlias;
1332
1333 AliasResult ThisAlias =
1334 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1335 MemoryLocation(V2, V2Size), AAQI);
1336 return MergeAliasResults(ThisAlias, Alias);
1337}
1338
1339/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1340/// another.
1341AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1342 const Value *V2, LocationSize V2Size,
1343 AAQueryInfo &AAQI) {
1344 if (!PN->getNumIncomingValues())
1345 return AliasResult::NoAlias;
1346 // If the values are PHIs in the same block, we can do a more precise
1347 // as well as efficient check: just check for aliases between the values
1348 // on corresponding edges.
1349 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1350 if (PN2->getParent() == PN->getParent()) {
1351 std::optional<AliasResult> Alias;
1352 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1353 AliasResult ThisAlias = AAQI.AAR.alias(
1354 MemoryLocation(PN->getIncomingValue(i), PNSize),
1356 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1357 AAQI);
1358 if (Alias)
1359 *Alias = MergeAliasResults(*Alias, ThisAlias);
1360 else
1361 Alias = ThisAlias;
1362 if (*Alias == AliasResult::MayAlias)
1363 break;
1364 }
1365 return *Alias;
1366 }
1367
1369 // If a phi operand recurses back to the phi, we can still determine NoAlias
1370 // if we don't alias the underlying objects of the other phi operands, as we
1371 // know that the recursive phi needs to be based on them in some way.
1372 bool isRecursive = false;
1373 auto CheckForRecPhi = [&](Value *PV) {
1375 return false;
1376 if (getUnderlyingObject(PV) == PN) {
1377 isRecursive = true;
1378 return true;
1379 }
1380 return false;
1381 };
1382
1383 SmallPtrSet<Value *, 4> UniqueSrc;
1384 Value *OnePhi = nullptr;
1385 for (Value *PV1 : PN->incoming_values()) {
1386 // Skip the phi itself being the incoming value.
1387 if (PV1 == PN)
1388 continue;
1389
1390 if (isa<PHINode>(PV1)) {
1391 if (OnePhi && OnePhi != PV1) {
1392 // To control potential compile time explosion, we choose to be
1393 // conserviate when we have more than one Phi input. It is important
1394 // that we handle the single phi case as that lets us handle LCSSA
1395 // phi nodes and (combined with the recursive phi handling) simple
1396 // pointer induction variable patterns.
1397 return AliasResult::MayAlias;
1398 }
1399 OnePhi = PV1;
1400 }
1401
1402 if (CheckForRecPhi(PV1))
1403 continue;
1404
1405 if (UniqueSrc.insert(PV1).second)
1406 V1Srcs.push_back(PV1);
1407 }
1408
1409 if (OnePhi && UniqueSrc.size() > 1)
1410 // Out of an abundance of caution, allow only the trivial lcssa and
1411 // recursive phi cases.
1412 return AliasResult::MayAlias;
1413
1414 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1415 // value. This should only be possible in blocks unreachable from the entry
1416 // block, but return MayAlias just in case.
1417 if (V1Srcs.empty())
1418 return AliasResult::MayAlias;
1419
1420 // If this PHI node is recursive, indicate that the pointer may be moved
1421 // across iterations. We can only prove NoAlias if different underlying
1422 // objects are involved.
1423 if (isRecursive)
1425
1426 // In the recursive alias queries below, we may compare values from two
1427 // different loop iterations.
1428 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1429
1430 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1431 MemoryLocation(V2, V2Size), AAQI);
1432
1433 // Early exit if the check of the first PHI source against V2 is MayAlias.
1434 // Other results are not possible.
1435 if (Alias == AliasResult::MayAlias)
1436 return AliasResult::MayAlias;
1437 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1438 // remain valid to all elements and needs to conservatively return MayAlias.
1439 if (isRecursive && Alias != AliasResult::NoAlias)
1440 return AliasResult::MayAlias;
1441
1442 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1443 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1444 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1445 Value *V = V1Srcs[i];
1446
1447 AliasResult ThisAlias = AAQI.AAR.alias(
1448 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1449 Alias = MergeAliasResults(ThisAlias, Alias);
1450 if (Alias == AliasResult::MayAlias)
1451 break;
1452 }
1453
1454 return Alias;
1455}
1456
1457/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1458/// array references.
1459AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1460 const Value *V2, LocationSize V2Size,
1461 AAQueryInfo &AAQI,
1462 const Instruction *CtxI) {
1463 // If either of the memory references is empty, it doesn't matter what the
1464 // pointer values are.
1465 if (V1Size.isZero() || V2Size.isZero())
1466 return AliasResult::NoAlias;
1467
1468 // Strip off any casts if they exist.
1470 V2 = V2->stripPointerCastsForAliasAnalysis();
1471
1472 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1473 // value for undef that aliases nothing in the program.
1474 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1475 return AliasResult::NoAlias;
1476
1477 // Are we checking for alias of the same value?
1478 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1479 // different iterations. We must therefore make sure that this is not the
1480 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1481 // happen by looking at the visited phi nodes and making sure they cannot
1482 // reach the value.
1483 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1485
1486 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1487 return AliasResult::NoAlias; // Scalars cannot alias each other
1488
1489 // Figure out what objects these things are pointing to if we can.
1492
1493 // Null values in the default address space don't point to any object, so they
1494 // don't alias any other pointer.
1495 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1496 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1497 return AliasResult::NoAlias;
1498 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1499 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1500 return AliasResult::NoAlias;
1501
1502 if (O1 != O2) {
1503 // If V1/V2 point to two different objects, we know that we have no alias.
1505 return AliasResult::NoAlias;
1506
1507 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1508 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1509 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1510 return AliasResult::NoAlias;
1511
1512 // Function arguments can't alias with things that are known to be
1513 // unambigously identified at the function level.
1514 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1515 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1516 return AliasResult::NoAlias;
1517
1518 // If one pointer is the result of a call/invoke or load and the other is a
1519 // non-escaping local object within the same function, then we know the
1520 // object couldn't escape to a point where the call could return it.
1521 //
1522 // Note that if the pointers are in different functions, there are a
1523 // variety of complications. A call with a nocapture argument may still
1524 // temporary store the nocapture argument's value in a temporary memory
1525 // location if that memory location doesn't escape. Or it may pass a
1526 // nocapture value to other functions as long as they don't capture it.
1527 if (isEscapeSource(O1) &&
1528 AAQI.CI->isNotCapturedBefore(O2, cast<Instruction>(O1), /*OrAt*/ true))
1529 return AliasResult::NoAlias;
1530 if (isEscapeSource(O2) &&
1531 AAQI.CI->isNotCapturedBefore(O1, cast<Instruction>(O2), /*OrAt*/ true))
1532 return AliasResult::NoAlias;
1533 }
1534
1535 // If the size of one access is larger than the entire object on the other
1536 // side, then we know such behavior is undefined and can assume no alias.
1537 bool NullIsValidLocation = NullPointerIsDefined(&F);
1539 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1540 TLI, NullIsValidLocation)) ||
1542 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1543 TLI, NullIsValidLocation)))
1544 return AliasResult::NoAlias;
1545
1546 if (CtxI && EnableSeparateStorageAnalysis) {
1547 for (auto &AssumeVH : AC.assumptions()) {
1548 if (!AssumeVH)
1549 continue;
1550
1551 AssumeInst *Assume = cast<AssumeInst>(AssumeVH);
1552
1553 for (unsigned Idx = 0; Idx < Assume->getNumOperandBundles(); Idx++) {
1555 if (OBU.getTagName() == "separate_storage") {
1556 assert(OBU.Inputs.size() == 2);
1557 const Value *Hint1 = OBU.Inputs[0].get();
1558 const Value *Hint2 = OBU.Inputs[1].get();
1559 // This is often a no-op; instcombine rewrites this for us. No-op
1560 // getUnderlyingObject calls are fast, though.
1561 const Value *HintO1 = getUnderlyingObject(Hint1);
1562 const Value *HintO2 = getUnderlyingObject(Hint2);
1563
1564 if (((O1 == HintO1 && O2 == HintO2) ||
1565 (O1 == HintO2 && O2 == HintO1)) &&
1566 isValidAssumeForContext(Assume, CtxI, DT))
1567 return AliasResult::NoAlias;
1568 }
1569 }
1570 }
1571 }
1572
1573 // If one the accesses may be before the accessed pointer, canonicalize this
1574 // by using unknown after-pointer sizes for both accesses. This is
1575 // equivalent, because regardless of which pointer is lower, one of them
1576 // will always came after the other, as long as the underlying objects aren't
1577 // disjoint. We do this so that the rest of BasicAA does not have to deal
1578 // with accesses before the base pointer, and to improve cache utilization by
1579 // merging equivalent states.
1580 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1581 V1Size = LocationSize::afterPointer();
1582 V2Size = LocationSize::afterPointer();
1583 }
1584
1585 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1586 // for recursive queries. For this reason, this limit is chosen to be large
1587 // enough to be very rarely hit, while still being small enough to avoid
1588 // stack overflows.
1589 if (AAQI.Depth >= 512)
1590 return AliasResult::MayAlias;
1591
1592 // Check the cache before climbing up use-def chains. This also terminates
1593 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1594 // cache key, because some cases where MayBeCrossIteration==false returns
1595 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1596 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1597 {V2, V2Size, AAQI.MayBeCrossIteration});
1598 const bool Swapped = V1 > V2;
1599 if (Swapped)
1600 std::swap(Locs.first, Locs.second);
1601 const auto &Pair = AAQI.AliasCache.try_emplace(
1603 if (!Pair.second) {
1604 auto &Entry = Pair.first->second;
1605 if (!Entry.isDefinitive()) {
1606 // Remember that we used an assumption.
1607 ++Entry.NumAssumptionUses;
1608 ++AAQI.NumAssumptionUses;
1609 }
1610 // Cache contains sorted {V1,V2} pairs but we should return original order.
1611 auto Result = Entry.Result;
1612 Result.swap(Swapped);
1613 return Result;
1614 }
1615
1616 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1617 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1619 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1620
1621 auto It = AAQI.AliasCache.find(Locs);
1622 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1623 auto &Entry = It->second;
1624
1625 // Check whether a NoAlias assumption has been used, but disproven.
1626 bool AssumptionDisproven =
1627 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1628 if (AssumptionDisproven)
1630
1631 // This is a definitive result now, when considered as a root query.
1632 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1633 Entry.Result = Result;
1634 // Cache contains sorted {V1,V2} pairs.
1635 Entry.Result.swap(Swapped);
1636 Entry.NumAssumptionUses = -1;
1637
1638 // If the assumption has been disproven, remove any results that may have
1639 // been based on this assumption. Do this after the Entry updates above to
1640 // avoid iterator invalidation.
1641 if (AssumptionDisproven)
1642 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1644
1645 // The result may still be based on assumptions higher up in the chain.
1646 // Remember it, so it can be purged from the cache later.
1647 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1648 Result != AliasResult::MayAlias)
1650 return Result;
1651}
1652
1653AliasResult BasicAAResult::aliasCheckRecursive(
1654 const Value *V1, LocationSize V1Size,
1655 const Value *V2, LocationSize V2Size,
1656 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1657 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1658 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1659 if (Result != AliasResult::MayAlias)
1660 return Result;
1661 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1662 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1663 Result.swap();
1664 if (Result != AliasResult::MayAlias)
1665 return Result;
1666 }
1667
1668 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1669 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1670 if (Result != AliasResult::MayAlias)
1671 return Result;
1672 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1673 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1674 Result.swap();
1675 if (Result != AliasResult::MayAlias)
1676 return Result;
1677 }
1678
1679 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1680 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1681 if (Result != AliasResult::MayAlias)
1682 return Result;
1683 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1684 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1685 Result.swap();
1686 if (Result != AliasResult::MayAlias)
1687 return Result;
1688 }
1689
1690 // If both pointers are pointing into the same object and one of them
1691 // accesses the entire object, then the accesses must overlap in some way.
1692 if (O1 == O2) {
1693 bool NullIsValidLocation = NullPointerIsDefined(&F);
1694 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1695 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1696 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1698 }
1699
1700 return AliasResult::MayAlias;
1701}
1702
1703/// Check whether two Values can be considered equivalent.
1704///
1705/// If the values may come from different cycle iterations, this will also
1706/// check that the values are not part of cycle. We have to do this because we
1707/// are looking through phi nodes, that is we say
1708/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1709bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1710 const Value *V2,
1711 const AAQueryInfo &AAQI) {
1712 if (V != V2)
1713 return false;
1714
1715 if (!AAQI.MayBeCrossIteration)
1716 return true;
1717
1718 // Non-instructions and instructions in the entry block cannot be part of
1719 // a loop.
1720 const Instruction *Inst = dyn_cast<Instruction>(V);
1721 if (!Inst || Inst->getParent()->isEntryBlock())
1722 return true;
1723
1724 return isNotInCycle(Inst, DT, /*LI*/ nullptr);
1725}
1726
1727/// Computes the symbolic difference between two de-composed GEPs.
1728void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1729 const DecomposedGEP &SrcGEP,
1730 const AAQueryInfo &AAQI) {
1731 DestGEP.Offset -= SrcGEP.Offset;
1732 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1733 // Find V in Dest. This is N^2, but pointer indices almost never have more
1734 // than a few variable indexes.
1735 bool Found = false;
1736 for (auto I : enumerate(DestGEP.VarIndices)) {
1737 VariableGEPIndex &Dest = I.value();
1738 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1739 !Dest.Val.hasSameCastsAs(Src.Val))
1740 continue;
1741
1742 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1743 if (Dest.IsNegated) {
1744 Dest.Scale = -Dest.Scale;
1745 Dest.IsNegated = false;
1746 Dest.IsNSW = false;
1747 }
1748
1749 // If we found it, subtract off Scale V's from the entry in Dest. If it
1750 // goes to zero, remove the entry.
1751 if (Dest.Scale != Src.Scale) {
1752 Dest.Scale -= Src.Scale;
1753 Dest.IsNSW = false;
1754 } else {
1755 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1756 }
1757 Found = true;
1758 break;
1759 }
1760
1761 // If we didn't consume this entry, add it to the end of the Dest list.
1762 if (!Found) {
1763 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1764 /* IsNegated */ true};
1765 DestGEP.VarIndices.push_back(Entry);
1766 }
1767 }
1768}
1769
1770bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1771 LocationSize MaybeV1Size,
1772 LocationSize MaybeV2Size,
1773 AssumptionCache *AC,
1774 DominatorTree *DT,
1775 const AAQueryInfo &AAQI) {
1776 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1777 !MaybeV2Size.hasValue())
1778 return false;
1779
1780 const uint64_t V1Size = MaybeV1Size.getValue();
1781 const uint64_t V2Size = MaybeV2Size.getValue();
1782
1783 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1784
1785 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1786 !Var0.hasNegatedScaleOf(Var1) ||
1787 Var0.Val.V->getType() != Var1.Val.V->getType())
1788 return false;
1789
1790 // We'll strip off the Extensions of Var0 and Var1 and do another round
1791 // of GetLinearExpression decomposition. In the example above, if Var0
1792 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1793
1794 LinearExpression E0 =
1795 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1796 LinearExpression E1 =
1797 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1798 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1799 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1800 return false;
1801
1802 // We have a hit - Var0 and Var1 only differ by a constant offset!
1803
1804 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1805 // Var1 is possible to calculate, but we're just interested in the absolute
1806 // minimum difference between the two. The minimum distance may occur due to
1807 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1808 // the minimum distance between %i and %i + 5 is 3.
1809 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1810 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1811 APInt MinDiffBytes =
1812 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1813
1814 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1815 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1816 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1817 // V2Size can fit in the MinDiffBytes gap.
1818 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1819 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1820}
1821
1822//===----------------------------------------------------------------------===//
1823// BasicAliasAnalysis Pass
1824//===----------------------------------------------------------------------===//
1825
1826AnalysisKey BasicAA::Key;
1827
1829 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1830 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1831 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1832 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1833}
1834
1837}
1838
1839char BasicAAWrapperPass::ID = 0;
1840
1841void BasicAAWrapperPass::anchor() {}
1842
1844 "Basic Alias Analysis (stateless AA impl)", true, true)
1849 "Basic Alias Analysis (stateless AA impl)", true, true)
1850
1852 return new BasicAAWrapperPass();
1853}
1854
1856 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1857 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1858 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1859
1860 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1861 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1862 &DTWP.getDomTree()));
1863
1864 return false;
1865}
1866
1868 AU.setPreservesAll();
1872}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(false))
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static void adjustToIndexSize(APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
basic Basic Alias true
static const unsigned MaxLookupSearchDepth
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1275
Hexagon Common GEP
#define _
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
CaptureInfo * CI
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasCacheT AliasCache
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:184
APInt abs() const
Get the absolute value.
Definition: APInt.h:1730
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1583
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1742
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:312
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1193
The possible results of an alias query.
Definition: AliasAnalysis.h:82
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:690
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:708
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:649
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:803
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:601
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1227
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2060
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2004
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
A constant pointer value that points to null.
Definition: Constants.h:534
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:277
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:312
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:164
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:387
Type * getSourceElementType() const
Definition: Operator.cpp:60
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:431
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
const BasicBlock * getParent() const
Definition: Instruction.h:139
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
bool hasValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
bool isZero() const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:172
This class represents the LLVM 'select' instruction.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Class to represent struct types.
Definition: DerivedTypes.h:216
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:333
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_iterator op_begin()
Definition: User.h:234
Value * getOperand(unsigned i) const
Definition: User.h:169
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:709
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:188
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:204
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2181
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:767
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:456
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2375
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition: CFG.cpp:133
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2042
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2007
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
Definition: ModRef.h:45
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:89
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
virtual ~CaptureInfo()=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1122
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Definition: InstrTypes.h:1141
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1123
A utility class that uses RAII to save and restore the value of a variable.