LLVM  7.0.0svn
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
69  cl::init(false));
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74  "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // GetUnderlyingObject(), both functions need to use the same search
85 // depth otherwise the algorithm in aliasGEP will assert.
86 static const unsigned MaxLookupSearchDepth = 6;
87 
90  // We don't care if this analysis itself is preserved, it has no state. But
91  // we need to check that the analyses it depends on have been. Note that we
92  // may be created without handles to some analyses and in that case don't
93  // depend on them.
94  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
95  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
96  (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)))
97  return true;
98 
99  // Otherwise this analysis result remains valid.
100  return false;
101 }
102 
103 //===----------------------------------------------------------------------===//
104 // Useful predicates
105 //===----------------------------------------------------------------------===//
106 
107 /// Returns true if the pointer is to a function-local object that never
108 /// escapes from the function.
109 static bool isNonEscapingLocalObject(const Value *V) {
110  // If this is a local allocation, check to see if it escapes.
111  if (isa<AllocaInst>(V) || isNoAliasCall(V))
112  // Set StoreCaptures to True so that we can assume in our callers that the
113  // pointer is not the result of a load instruction. Currently
114  // PointerMayBeCaptured doesn't have any special analysis for the
115  // StoreCaptures=false case; if it did, our callers could be refined to be
116  // more precise.
117  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
118 
119  // If this is an argument that corresponds to a byval or noalias argument,
120  // then it has not escaped before entering the function. Check if it escapes
121  // inside the function.
122  if (const Argument *A = dyn_cast<Argument>(V))
123  if (A->hasByValAttr() || A->hasNoAliasAttr())
124  // Note even if the argument is marked nocapture, we still need to check
125  // for copies made inside the function. The nocapture attribute only
126  // specifies that there are no copies made that outlive the function.
127  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
128 
129  return false;
130 }
131 
132 /// Returns true if the pointer is one which would have been considered an
133 /// escape by isNonEscapingLocalObject.
134 static bool isEscapeSource(const Value *V) {
135  if (ImmutableCallSite(V))
136  return true;
137 
138  if (isa<Argument>(V))
139  return true;
140 
141  // The load case works because isNonEscapingLocalObject considers all
142  // stores to be escapes (it passes true for the StoreCaptures argument
143  // to PointerMayBeCaptured).
144  if (isa<LoadInst>(V))
145  return true;
146 
147  return false;
148 }
149 
150 /// Returns the size of the object specified by V or UnknownSize if unknown.
151 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
152  const TargetLibraryInfo &TLI,
153  bool NullIsValidLoc,
154  bool RoundToAlign = false) {
155  uint64_t Size;
156  ObjectSizeOpts Opts;
157  Opts.RoundToAlign = RoundToAlign;
158  Opts.NullIsUnknownSize = NullIsValidLoc;
159  if (getObjectSize(V, Size, DL, &TLI, Opts))
160  return Size;
162 }
163 
164 /// Returns true if we can prove that the object specified by V is smaller than
165 /// Size.
166 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
167  const DataLayout &DL,
168  const TargetLibraryInfo &TLI,
169  bool NullIsValidLoc) {
170  // Note that the meanings of the "object" are slightly different in the
171  // following contexts:
172  // c1: llvm::getObjectSize()
173  // c2: llvm.objectsize() intrinsic
174  // c3: isObjectSmallerThan()
175  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
176  // refers to the "entire object".
177  //
178  // Consider this example:
179  // char *p = (char*)malloc(100)
180  // char *q = p+80;
181  //
182  // In the context of c1 and c2, the "object" pointed by q refers to the
183  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
184  //
185  // However, in the context of c3, the "object" refers to the chunk of memory
186  // being allocated. So, the "object" has 100 bytes, and q points to the middle
187  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
188  // parameter, before the llvm::getObjectSize() is called to get the size of
189  // entire object, we should:
190  // - either rewind the pointer q to the base-address of the object in
191  // question (in this case rewind to p), or
192  // - just give up. It is up to caller to make sure the pointer is pointing
193  // to the base address the object.
194  //
195  // We go for 2nd option for simplicity.
196  if (!isIdentifiedObject(V))
197  return false;
198 
199  // This function needs to use the aligned object size because we allow
200  // reads a bit past the end given sufficient alignment.
201  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
202  /*RoundToAlign*/ true);
203 
204  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
205 }
206 
207 /// Returns true if we can prove that the object specified by V has size Size.
208 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
209  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
210  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
211  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
212 }
213 
214 //===----------------------------------------------------------------------===//
215 // GetElementPtr Instruction Decomposition and Analysis
216 //===----------------------------------------------------------------------===//
217 
218 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
219 /// B are constant integers.
220 ///
221 /// Returns the scale and offset values as APInts and return V as a Value*, and
222 /// return whether we looked through any sign or zero extends. The incoming
223 /// Value is known to have IntegerType, and it may already be sign or zero
224 /// extended.
225 ///
226 /// Note that this looks through extends, so the high bits may not be
227 /// represented in the result.
228 /*static*/ const Value *BasicAAResult::GetLinearExpression(
229  const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
230  unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
231  AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
232  assert(V->getType()->isIntegerTy() && "Not an integer value");
233 
234  // Limit our recursion depth.
235  if (Depth == 6) {
236  Scale = 1;
237  Offset = 0;
238  return V;
239  }
240 
241  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
242  // If it's a constant, just convert it to an offset and remove the variable.
243  // If we've been called recursively, the Offset bit width will be greater
244  // than the constant's (the Offset's always as wide as the outermost call),
245  // so we'll zext here and process any extension in the isa<SExtInst> &
246  // isa<ZExtInst> cases below.
247  Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
248  assert(Scale == 0 && "Constant values don't have a scale");
249  return V;
250  }
251 
252  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
253  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
254  // If we've been called recursively, then Offset and Scale will be wider
255  // than the BOp operands. We'll always zext it here as we'll process sign
256  // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
257  APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
258 
259  switch (BOp->getOpcode()) {
260  default:
261  // We don't understand this instruction, so we can't decompose it any
262  // further.
263  Scale = 1;
264  Offset = 0;
265  return V;
266  case Instruction::Or:
267  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
268  // analyze it.
269  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
270  BOp, DT)) {
271  Scale = 1;
272  Offset = 0;
273  return V;
274  }
276  case Instruction::Add:
277  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
278  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
279  Offset += RHS;
280  break;
281  case Instruction::Sub:
282  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
283  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
284  Offset -= RHS;
285  break;
286  case Instruction::Mul:
287  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
288  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
289  Offset *= RHS;
290  Scale *= RHS;
291  break;
292  case Instruction::Shl:
293  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
294  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
295 
296  // We're trying to linearize an expression of the kind:
297  // shl i8 -128, 36
298  // where the shift count exceeds the bitwidth of the type.
299  // We can't decompose this further (the expression would return
300  // a poison value).
301  if (Offset.getBitWidth() < RHS.getLimitedValue() ||
302  Scale.getBitWidth() < RHS.getLimitedValue()) {
303  Scale = 1;
304  Offset = 0;
305  return V;
306  }
307 
308  Offset <<= RHS.getLimitedValue();
309  Scale <<= RHS.getLimitedValue();
310  // the semantics of nsw and nuw for left shifts don't match those of
311  // multiplications, so we won't propagate them.
312  NSW = NUW = false;
313  return V;
314  }
315 
316  if (isa<OverflowingBinaryOperator>(BOp)) {
317  NUW &= BOp->hasNoUnsignedWrap();
318  NSW &= BOp->hasNoSignedWrap();
319  }
320  return V;
321  }
322  }
323 
324  // Since GEP indices are sign extended anyway, we don't care about the high
325  // bits of a sign or zero extended value - just scales and offsets. The
326  // extensions have to be consistent though.
327  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
328  Value *CastOp = cast<CastInst>(V)->getOperand(0);
329  unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
330  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
331  unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
332  const Value *Result =
333  GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
334  Depth + 1, AC, DT, NSW, NUW);
335 
336  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
337  // by just incrementing the number of bits we've extended by.
338  unsigned ExtendedBy = NewWidth - SmallWidth;
339 
340  if (isa<SExtInst>(V) && ZExtBits == 0) {
341  // sext(sext(%x, a), b) == sext(%x, a + b)
342 
343  if (NSW) {
344  // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
345  // into sext(%x) + sext(c). We'll sext the Offset ourselves:
346  unsigned OldWidth = Offset.getBitWidth();
347  Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
348  } else {
349  // We may have signed-wrapped, so don't decompose sext(%x + c) into
350  // sext(%x) + sext(c)
351  Scale = 1;
352  Offset = 0;
353  Result = CastOp;
354  ZExtBits = OldZExtBits;
355  SExtBits = OldSExtBits;
356  }
357  SExtBits += ExtendedBy;
358  } else {
359  // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
360 
361  if (!NUW) {
362  // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
363  // zext(%x) + zext(c)
364  Scale = 1;
365  Offset = 0;
366  Result = CastOp;
367  ZExtBits = OldZExtBits;
368  SExtBits = OldSExtBits;
369  }
370  ZExtBits += ExtendedBy;
371  }
372 
373  return Result;
374  }
375 
376  Scale = 1;
377  Offset = 0;
378  return V;
379 }
380 
381 /// To ensure a pointer offset fits in an integer of size PointerSize
382 /// (in bits) when that size is smaller than 64. This is an issue in
383 /// particular for 32b programs with negative indices that rely on two's
384 /// complement wrap-arounds for precise alias information.
385 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
386  assert(PointerSize <= 64 && "Invalid PointerSize!");
387  unsigned ShiftBits = 64 - PointerSize;
388  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
389 }
390 
391 /// If V is a symbolic pointer expression, decompose it into a base pointer
392 /// with a constant offset and a number of scaled symbolic offsets.
393 ///
394 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
395 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
396 /// specified amount, but which may have other unrepresented high bits. As
397 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
398 ///
399 /// When DataLayout is around, this function is capable of analyzing everything
400 /// that GetUnderlyingObject can look through. To be able to do that
401 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
402 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
403 /// through pointer casts.
404 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
405  DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
406  DominatorTree *DT) {
407  // Limit recursion depth to limit compile time in crazy cases.
408  unsigned MaxLookup = MaxLookupSearchDepth;
409  SearchTimes++;
410 
411  Decomposed.StructOffset = 0;
412  Decomposed.OtherOffset = 0;
413  Decomposed.VarIndices.clear();
414  do {
415  // See if this is a bitcast or GEP.
416  const Operator *Op = dyn_cast<Operator>(V);
417  if (!Op) {
418  // The only non-operator case we can handle are GlobalAliases.
419  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
420  if (!GA->isInterposable()) {
421  V = GA->getAliasee();
422  continue;
423  }
424  }
425  Decomposed.Base = V;
426  return false;
427  }
428 
429  if (Op->getOpcode() == Instruction::BitCast ||
430  Op->getOpcode() == Instruction::AddrSpaceCast) {
431  V = Op->getOperand(0);
432  continue;
433  }
434 
435  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
436  if (!GEPOp) {
437  if (auto CS = ImmutableCallSite(V)) {
438  // CaptureTracking can know about special capturing properties of some
439  // intrinsics like launder.invariant.group, that can't be expressed with
440  // the attributes, but have properties like returning aliasing pointer.
441  // Because some analysis may assume that nocaptured pointer is not
442  // returned from some special intrinsic (because function would have to
443  // be marked with returns attribute), it is crucial to use this function
444  // because it should be in sync with CaptureTracking. Not using it may
445  // cause weird miscompilations where 2 aliasing pointers are assumed to
446  // noalias.
447  if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
448  V = RP;
449  continue;
450  }
451  }
452 
453  // If it's not a GEP, hand it off to SimplifyInstruction to see if it
454  // can come up with something. This matches what GetUnderlyingObject does.
455  if (const Instruction *I = dyn_cast<Instruction>(V))
456  // TODO: Get a DominatorTree and AssumptionCache and use them here
457  // (these are both now available in this function, but this should be
458  // updated when GetUnderlyingObject is updated). TLI should be
459  // provided also.
460  if (const Value *Simplified =
461  SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
462  V = Simplified;
463  continue;
464  }
465 
466  Decomposed.Base = V;
467  return false;
468  }
469 
470  // Don't attempt to analyze GEPs over unsized objects.
471  if (!GEPOp->getSourceElementType()->isSized()) {
472  Decomposed.Base = V;
473  return false;
474  }
475 
476  unsigned AS = GEPOp->getPointerAddressSpace();
477  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
478  gep_type_iterator GTI = gep_type_begin(GEPOp);
479  unsigned PointerSize = DL.getPointerSizeInBits(AS);
480  // Assume all GEP operands are constants until proven otherwise.
481  bool GepHasConstantOffset = true;
482  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
483  I != E; ++I, ++GTI) {
484  const Value *Index = *I;
485  // Compute the (potentially symbolic) offset in bytes for this index.
486  if (StructType *STy = GTI.getStructTypeOrNull()) {
487  // For a struct, add the member offset.
488  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
489  if (FieldNo == 0)
490  continue;
491 
492  Decomposed.StructOffset +=
493  DL.getStructLayout(STy)->getElementOffset(FieldNo);
494  continue;
495  }
496 
497  // For an array/pointer, add the element offset, explicitly scaled.
498  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
499  if (CIdx->isZero())
500  continue;
501  Decomposed.OtherOffset +=
502  DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
503  continue;
504  }
505 
506  GepHasConstantOffset = false;
507 
508  uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
509  unsigned ZExtBits = 0, SExtBits = 0;
510 
511  // If the integer type is smaller than the pointer size, it is implicitly
512  // sign extended to pointer size.
513  unsigned Width = Index->getType()->getIntegerBitWidth();
514  if (PointerSize > Width)
515  SExtBits += PointerSize - Width;
516 
517  // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
518  APInt IndexScale(Width, 0), IndexOffset(Width, 0);
519  bool NSW = true, NUW = true;
520  Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
521  SExtBits, DL, 0, AC, DT, NSW, NUW);
522 
523  // All GEP math happens in the width of the pointer type,
524  // so we can truncate the value to 64-bits as we don't handle
525  // currently pointers larger than 64 bits and we would crash
526  // later. TODO: Make `Scale` an APInt to avoid this problem.
527  if (IndexScale.getBitWidth() > 64)
528  IndexScale = IndexScale.sextOrTrunc(64);
529 
530  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
531  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
532  Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
533  Scale *= IndexScale.getSExtValue();
534 
535  // If we already had an occurrence of this index variable, merge this
536  // scale into it. For example, we want to handle:
537  // A[x][x] -> x*16 + x*4 -> x*20
538  // This also ensures that 'x' only appears in the index list once.
539  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
540  if (Decomposed.VarIndices[i].V == Index &&
541  Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
542  Decomposed.VarIndices[i].SExtBits == SExtBits) {
543  Scale += Decomposed.VarIndices[i].Scale;
544  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
545  break;
546  }
547  }
548 
549  // Make sure that we have a scale that makes sense for this target's
550  // pointer size.
551  Scale = adjustToPointerSize(Scale, PointerSize);
552 
553  if (Scale) {
554  VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
555  static_cast<int64_t>(Scale)};
556  Decomposed.VarIndices.push_back(Entry);
557  }
558  }
559 
560  // Take care of wrap-arounds
561  if (GepHasConstantOffset) {
562  Decomposed.StructOffset =
563  adjustToPointerSize(Decomposed.StructOffset, PointerSize);
564  Decomposed.OtherOffset =
565  adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
566  }
567 
568  // Analyze the base pointer next.
569  V = GEPOp->getOperand(0);
570  } while (--MaxLookup);
571 
572  // If the chain of expressions is too deep, just return early.
573  Decomposed.Base = V;
574  SearchLimitReached++;
575  return true;
576 }
577 
578 /// Returns whether the given pointer value points to memory that is local to
579 /// the function, with global constants being considered local to all
580 /// functions.
582  bool OrLocal) {
583  assert(Visited.empty() && "Visited must be cleared after use!");
584 
585  unsigned MaxLookup = 8;
587  Worklist.push_back(Loc.Ptr);
588  do {
589  const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
590  if (!Visited.insert(V).second) {
591  Visited.clear();
592  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
593  }
594 
595  // An alloca instruction defines local memory.
596  if (OrLocal && isa<AllocaInst>(V))
597  continue;
598 
599  // A global constant counts as local memory for our purposes.
600  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
601  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
602  // global to be marked constant in some modules and non-constant in
603  // others. GV may even be a declaration, not a definition.
604  if (!GV->isConstant()) {
605  Visited.clear();
606  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
607  }
608  continue;
609  }
610 
611  // If both select values point to local memory, then so does the select.
612  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
613  Worklist.push_back(SI->getTrueValue());
614  Worklist.push_back(SI->getFalseValue());
615  continue;
616  }
617 
618  // If all values incoming to a phi node point to local memory, then so does
619  // the phi.
620  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
621  // Don't bother inspecting phi nodes with many operands.
622  if (PN->getNumIncomingValues() > MaxLookup) {
623  Visited.clear();
624  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
625  }
626  for (Value *IncValue : PN->incoming_values())
627  Worklist.push_back(IncValue);
628  continue;
629  }
630 
631  // Otherwise be conservative.
632  Visited.clear();
633  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
634  } while (!Worklist.empty() && --MaxLookup);
635 
636  Visited.clear();
637  return Worklist.empty();
638 }
639 
640 /// Returns the behavior when calling the given call site.
642  if (CS.doesNotAccessMemory())
643  // Can't do better than this.
645 
647 
648  // If the callsite knows it only reads memory, don't return worse
649  // than that.
650  if (CS.onlyReadsMemory())
651  Min = FMRB_OnlyReadsMemory;
652  else if (CS.doesNotReadMemory())
654 
655  if (CS.onlyAccessesArgMemory())
657  else if (CS.onlyAccessesInaccessibleMemory())
661 
662  // If CS has operand bundles then aliasing attributes from the function it
663  // calls do not directly apply to the CallSite. This can be made more
664  // precise in the future.
665  if (!CS.hasOperandBundles())
666  if (const Function *F = CS.getCalledFunction())
667  Min =
669 
670  return Min;
671 }
672 
673 /// Returns the behavior when calling the given function. For use when the call
674 /// site is not known.
676  // If the function declares it doesn't access memory, we can't do better.
677  if (F->doesNotAccessMemory())
679 
681 
682  // If the function declares it only reads memory, go with that.
683  if (F->onlyReadsMemory())
684  Min = FMRB_OnlyReadsMemory;
685  else if (F->doesNotReadMemory())
687 
688  if (F->onlyAccessesArgMemory())
690  else if (F->onlyAccessesInaccessibleMemory())
694 
695  return Min;
696 }
697 
698 /// Returns true if this is a writeonly (i.e Mod only) parameter.
699 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
700  const TargetLibraryInfo &TLI) {
701  if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
702  return true;
703 
704  // We can bound the aliasing properties of memset_pattern16 just as we can
705  // for memcpy/memset. This is particularly important because the
706  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
707  // whenever possible.
708  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
709  // attributes.
710  LibFunc F;
711  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
712  F == LibFunc_memset_pattern16 && TLI.has(F))
713  if (ArgIdx == 0)
714  return true;
715 
716  // TODO: memset_pattern4, memset_pattern8
717  // TODO: _chk variants
718  // TODO: strcmp, strcpy
719 
720  return false;
721 }
722 
724  unsigned ArgIdx) {
725  // Checking for known builtin intrinsics and target library functions.
726  if (isWriteOnlyParam(CS, ArgIdx, TLI))
727  return ModRefInfo::Mod;
728 
729  if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
730  return ModRefInfo::Ref;
731 
732  if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
733  return ModRefInfo::NoModRef;
734 
735  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
736 }
737 
740  return II && II->getIntrinsicID() == IID;
741 }
742 
743 #ifndef NDEBUG
744 static const Function *getParent(const Value *V) {
745  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
746  if (!inst->getParent())
747  return nullptr;
748  return inst->getParent()->getParent();
749  }
750 
751  if (const Argument *arg = dyn_cast<Argument>(V))
752  return arg->getParent();
753 
754  return nullptr;
755 }
756 
757 static bool notDifferentParent(const Value *O1, const Value *O2) {
758 
759  const Function *F1 = getParent(O1);
760  const Function *F2 = getParent(O2);
761 
762  return !F1 || !F2 || F1 == F2;
763 }
764 #endif
765 
767  const MemoryLocation &LocB) {
768  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
769  "BasicAliasAnalysis doesn't support interprocedural queries.");
770 
771  // If we have a directly cached entry for these locations, we have recursed
772  // through this once, so just return the cached results. Notably, when this
773  // happens, we don't clear the cache.
774  auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
775  if (CacheIt != AliasCache.end())
776  return CacheIt->second;
777 
778  AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
779  LocB.Size, LocB.AATags);
780  // AliasCache rarely has more than 1 or 2 elements, always use
781  // shrink_and_clear so it quickly returns to the inline capacity of the
782  // SmallDenseMap if it ever grows larger.
783  // FIXME: This should really be shrink_to_inline_capacity_and_clear().
784  AliasCache.shrink_and_clear();
785  VisitedPhiBBs.clear();
786  return Alias;
787 }
788 
789 /// Checks to see if the specified callsite can clobber the specified memory
790 /// object.
791 ///
792 /// Since we only look at local properties of this function, we really can't
793 /// say much about this query. We do, however, use simple "address taken"
794 /// analysis on local objects.
796  const MemoryLocation &Loc) {
798  "AliasAnalysis query involving multiple functions!");
799 
800  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
801 
802  // If this is a tail call and Loc.Ptr points to a stack location, we know that
803  // the tail call cannot access or modify the local stack.
804  // We cannot exclude byval arguments here; these belong to the caller of
805  // the current function not to the current function, and a tail callee
806  // may reference them.
807  if (isa<AllocaInst>(Object))
808  if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
809  if (CI->isTailCall())
810  return ModRefInfo::NoModRef;
811 
812  // If the pointer is to a locally allocated object that does not escape,
813  // then the call can not mod/ref the pointer unless the call takes the pointer
814  // as an argument, and itself doesn't capture it.
815  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
816  isNonEscapingLocalObject(Object)) {
817 
818  // Optimistically assume that call doesn't touch Object and check this
819  // assumption in the following loop.
821  bool IsMustAlias = true;
822 
823  unsigned OperandNo = 0;
824  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
825  CI != CE; ++CI, ++OperandNo) {
826  // Only look at the no-capture or byval pointer arguments. If this
827  // pointer were passed to arguments that were neither of these, then it
828  // couldn't be no-capture.
829  if (!(*CI)->getType()->isPointerTy() ||
830  (!CS.doesNotCapture(OperandNo) &&
831  OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
832  continue;
833 
834  // Call doesn't access memory through this operand, so we don't care
835  // if it aliases with Object.
836  if (CS.doesNotAccessMemory(OperandNo))
837  continue;
838 
839  // If this is a no-capture pointer argument, see if we can tell that it
840  // is impossible to alias the pointer we're checking.
841  AliasResult AR =
843  if (AR != MustAlias)
844  IsMustAlias = false;
845  // Operand doesnt alias 'Object', continue looking for other aliases
846  if (AR == NoAlias)
847  continue;
848  // Operand aliases 'Object', but call doesn't modify it. Strengthen
849  // initial assumption and keep looking in case if there are more aliases.
850  if (CS.onlyReadsMemory(OperandNo)) {
851  Result = setRef(Result);
852  continue;
853  }
854  // Operand aliases 'Object' but call only writes into it.
855  if (CS.doesNotReadMemory(OperandNo)) {
856  Result = setMod(Result);
857  continue;
858  }
859  // This operand aliases 'Object' and call reads and writes into it.
860  // Setting ModRef will not yield an early return below, MustAlias is not
861  // used further.
862  Result = ModRefInfo::ModRef;
863  break;
864  }
865 
866  // No operand aliases, reset Must bit. Add below if at least one aliases
867  // and all aliases found are MustAlias.
868  if (isNoModRef(Result))
869  IsMustAlias = false;
870 
871  // Early return if we improved mod ref information
872  if (!isModAndRefSet(Result)) {
873  if (isNoModRef(Result))
874  return ModRefInfo::NoModRef;
875  return IsMustAlias ? setMust(Result) : clearMust(Result);
876  }
877  }
878 
879  // If the CallSite is to malloc or calloc, we can assume that it doesn't
880  // modify any IR visible value. This is only valid because we assume these
881  // routines do not read values visible in the IR. TODO: Consider special
882  // casing realloc and strdup routines which access only their arguments as
883  // well. Or alternatively, replace all of this with inaccessiblememonly once
884  // that's implemented fully.
885  auto *Inst = CS.getInstruction();
886  if (isMallocOrCallocLikeFn(Inst, &TLI)) {
887  // Be conservative if the accessed pointer may alias the allocation -
888  // fallback to the generic handling below.
889  if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
890  return ModRefInfo::NoModRef;
891  }
892 
893  // The semantics of memcpy intrinsics forbid overlap between their respective
894  // operands, i.e., source and destination of any given memcpy must no-alias.
895  // If Loc must-aliases either one of these two locations, then it necessarily
896  // no-aliases the other.
897  if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
898  AliasResult SrcAA, DestAA;
899 
901  Loc)) == MustAlias)
902  // Loc is exactly the memcpy source thus disjoint from memcpy dest.
903  return ModRefInfo::Ref;
904  if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
905  Loc)) == MustAlias)
906  // The converse case.
907  return ModRefInfo::Mod;
908 
909  // It's also possible for Loc to alias both src and dest, or neither.
911  if (SrcAA != NoAlias)
912  rv = setRef(rv);
913  if (DestAA != NoAlias)
914  rv = setMod(rv);
915  return rv;
916  }
917 
918  // While the assume intrinsic is marked as arbitrarily writing so that
919  // proper control dependencies will be maintained, it never aliases any
920  // particular memory location.
921  if (isIntrinsicCall(CS, Intrinsic::assume))
922  return ModRefInfo::NoModRef;
923 
924  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
925  // that proper control dependencies are maintained but they never mods any
926  // particular memory location.
927  //
928  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
929  // heap state at the point the guard is issued needs to be consistent in case
930  // the guard invokes the "deopt" continuation.
931  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
932  return ModRefInfo::Ref;
933 
934  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
935  // writing so that proper control dependencies are maintained but they never
936  // mod any particular memory location visible to the IR.
937  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
938  // intrinsic is now modeled as reading memory. This prevents hoisting the
939  // invariant.start intrinsic over stores. Consider:
940  // *ptr = 40;
941  // *ptr = 50;
942  // invariant_start(ptr)
943  // int val = *ptr;
944  // print(val);
945  //
946  // This cannot be transformed to:
947  //
948  // *ptr = 40;
949  // invariant_start(ptr)
950  // *ptr = 50;
951  // int val = *ptr;
952  // print(val);
953  //
954  // The transformation will cause the second store to be ignored (based on
955  // rules of invariant.start) and print 40, while the first program always
956  // prints 50.
957  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
958  return ModRefInfo::Ref;
959 
960  // The AAResultBase base class has some smarts, lets use them.
961  return AAResultBase::getModRefInfo(CS, Loc);
962 }
963 
965  ImmutableCallSite CS2) {
966  // While the assume intrinsic is marked as arbitrarily writing so that
967  // proper control dependencies will be maintained, it never aliases any
968  // particular memory location.
969  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
970  isIntrinsicCall(CS2, Intrinsic::assume))
971  return ModRefInfo::NoModRef;
972 
973  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
974  // that proper control dependencies are maintained but they never mod any
975  // particular memory location.
976  //
977  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
978  // heap state at the point the guard is issued needs to be consistent in case
979  // the guard invokes the "deopt" continuation.
980 
981  // NB! This function is *not* commutative, so we specical case two
982  // possibilities for guard intrinsics.
983 
984  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
988 
989  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
993 
994  // The AAResultBase base class has some smarts, lets use them.
995  return AAResultBase::getModRefInfo(CS1, CS2);
996 }
997 
998 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
999 /// both having the exact same pointer operand.
1001  LocationSize V1Size,
1002  const GEPOperator *GEP2,
1003  LocationSize V2Size,
1004  const DataLayout &DL) {
1007  GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1008  "Expected GEPs with the same pointer operand");
1009 
1010  // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1011  // such that the struct field accesses provably cannot alias.
1012  // We also need at least two indices (the pointer, and the struct field).
1013  if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1014  GEP1->getNumIndices() < 2)
1015  return MayAlias;
1016 
1017  // If we don't know the size of the accesses through both GEPs, we can't
1018  // determine whether the struct fields accessed can't alias.
1019  if (V1Size == MemoryLocation::UnknownSize ||
1020  V2Size == MemoryLocation::UnknownSize)
1021  return MayAlias;
1022 
1023  ConstantInt *C1 =
1024  dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1025  ConstantInt *C2 =
1026  dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1027 
1028  // If the last (struct) indices are constants and are equal, the other indices
1029  // might be also be dynamically equal, so the GEPs can alias.
1030  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
1031  return MayAlias;
1032 
1033  // Find the last-indexed type of the GEP, i.e., the type you'd get if
1034  // you stripped the last index.
1035  // On the way, look at each indexed type. If there's something other
1036  // than an array, different indices can lead to different final types.
1037  SmallVector<Value *, 8> IntermediateIndices;
1038 
1039  // Insert the first index; we don't need to check the type indexed
1040  // through it as it only drops the pointer indirection.
1041  assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1042  IntermediateIndices.push_back(GEP1->getOperand(1));
1043 
1044  // Insert all the remaining indices but the last one.
1045  // Also, check that they all index through arrays.
1046  for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1047  if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1048  GEP1->getSourceElementType(), IntermediateIndices)))
1049  return MayAlias;
1050  IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1051  }
1052 
1054  GEP1->getSourceElementType(), IntermediateIndices);
1055  StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1056 
1057  if (isa<SequentialType>(Ty)) {
1058  // We know that:
1059  // - both GEPs begin indexing from the exact same pointer;
1060  // - the last indices in both GEPs are constants, indexing into a sequential
1061  // type (array or pointer);
1062  // - both GEPs only index through arrays prior to that.
1063  //
1064  // Because array indices greater than the number of elements are valid in
1065  // GEPs, unless we know the intermediate indices are identical between
1066  // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1067  // partially overlap. We also need to check that the loaded size matches
1068  // the element size, otherwise we could still have overlap.
1069  const uint64_t ElementSize =
1070  DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1071  if (V1Size != ElementSize || V2Size != ElementSize)
1072  return MayAlias;
1073 
1074  for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1075  if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1076  return MayAlias;
1077 
1078  // Now we know that the array/pointer that GEP1 indexes into and that
1079  // that GEP2 indexes into must either precisely overlap or be disjoint.
1080  // Because they cannot partially overlap and because fields in an array
1081  // cannot overlap, if we can prove the final indices are different between
1082  // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1083 
1084  // If the last indices are constants, we've already checked they don't
1085  // equal each other so we can exit early.
1086  if (C1 && C2)
1087  return NoAlias;
1088  {
1089  Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1090  Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1091  if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1092  // If one of the indices is a PHI node, be safe and only use
1093  // computeKnownBits so we don't make any assumptions about the
1094  // relationships between the two indices. This is important if we're
1095  // asking about values from different loop iterations. See PR32314.
1096  // TODO: We may be able to change the check so we only do this when
1097  // we definitely looked through a PHINode.
1098  if (GEP1LastIdx != GEP2LastIdx &&
1099  GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1100  KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1101  KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1102  if (Known1.Zero.intersects(Known2.One) ||
1103  Known1.One.intersects(Known2.Zero))
1104  return NoAlias;
1105  }
1106  } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1107  return NoAlias;
1108  }
1109  return MayAlias;
1110  } else if (!LastIndexedStruct || !C1 || !C2) {
1111  return MayAlias;
1112  }
1113 
1114  // We know that:
1115  // - both GEPs begin indexing from the exact same pointer;
1116  // - the last indices in both GEPs are constants, indexing into a struct;
1117  // - said indices are different, hence, the pointed-to fields are different;
1118  // - both GEPs only index through arrays prior to that.
1119  //
1120  // This lets us determine that the struct that GEP1 indexes into and the
1121  // struct that GEP2 indexes into must either precisely overlap or be
1122  // completely disjoint. Because they cannot partially overlap, indexing into
1123  // different non-overlapping fields of the struct will never alias.
1124 
1125  // Therefore, the only remaining thing needed to show that both GEPs can't
1126  // alias is that the fields are not overlapping.
1127  const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1128  const uint64_t StructSize = SL->getSizeInBytes();
1129  const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1130  const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1131 
1132  auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1133  uint64_t V2Off, uint64_t V2Size) {
1134  return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1135  ((V2Off + V2Size <= StructSize) ||
1136  (V2Off + V2Size - StructSize <= V1Off));
1137  };
1138 
1139  if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1140  EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1141  return NoAlias;
1142 
1143  return MayAlias;
1144 }
1145 
1146 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1147 // beginning of the object the GEP points would have a negative offset with
1148 // repsect to the alloca, that means the GEP can not alias pointer (b).
1149 // Note that the pointer based on the alloca may not be a GEP. For
1150 // example, it may be the alloca itself.
1151 // The same applies if (b) is based on a GlobalVariable. Note that just being
1152 // based on isIdentifiedObject() is not enough - we need an identified object
1153 // that does not permit access to negative offsets. For example, a negative
1154 // offset from a noalias argument or call can be inbounds w.r.t the actual
1155 // underlying object.
1156 //
1157 // For example, consider:
1158 //
1159 // struct { int f0, int f1, ...} foo;
1160 // foo alloca;
1161 // foo* random = bar(alloca);
1162 // int *f0 = &alloca.f0
1163 // int *f1 = &random->f1;
1164 //
1165 // Which is lowered, approximately, to:
1166 //
1167 // %alloca = alloca %struct.foo
1168 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1169 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1170 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1171 //
1172 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1173 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1174 // point into the same object. But since %f0 points to the beginning of %alloca,
1175 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1176 // than (%alloca - 1), and so is not inbounds, a contradiction.
1177 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1178  const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1179  LocationSize ObjectAccessSize) {
1180  // If the object access size is unknown, or the GEP isn't inbounds, bail.
1181  if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1182  return false;
1183 
1184  // We need the object to be an alloca or a globalvariable, and want to know
1185  // the offset of the pointer from the object precisely, so no variable
1186  // indices are allowed.
1187  if (!(isa<AllocaInst>(DecompObject.Base) ||
1188  isa<GlobalVariable>(DecompObject.Base)) ||
1189  !DecompObject.VarIndices.empty())
1190  return false;
1191 
1192  int64_t ObjectBaseOffset = DecompObject.StructOffset +
1193  DecompObject.OtherOffset;
1194 
1195  // If the GEP has no variable indices, we know the precise offset
1196  // from the base, then use it. If the GEP has variable indices,
1197  // we can't get exact GEP offset to identify pointer alias. So return
1198  // false in that case.
1199  if (!DecompGEP.VarIndices.empty())
1200  return false;
1201  int64_t GEPBaseOffset = DecompGEP.StructOffset;
1202  GEPBaseOffset += DecompGEP.OtherOffset;
1203 
1204  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1205 }
1206 
1207 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1208 /// another pointer.
1209 ///
1210 /// We know that V1 is a GEP, but we don't know anything about V2.
1211 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1212 /// V2.
1214 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1215  const AAMDNodes &V1AAInfo, const Value *V2,
1216  LocationSize V2Size, const AAMDNodes &V2AAInfo,
1217  const Value *UnderlyingV1, const Value *UnderlyingV2) {
1218  DecomposedGEP DecompGEP1, DecompGEP2;
1219  bool GEP1MaxLookupReached =
1220  DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1221  bool GEP2MaxLookupReached =
1222  DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1223 
1224  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1225  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1226 
1227  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1228  "DecomposeGEPExpression returned a result different from "
1229  "GetUnderlyingObject");
1230 
1231  // If the GEP's offset relative to its base is such that the base would
1232  // fall below the start of the object underlying V2, then the GEP and V2
1233  // cannot alias.
1234  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1235  isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1236  return NoAlias;
1237  // If we have two gep instructions with must-alias or not-alias'ing base
1238  // pointers, figure out if the indexes to the GEP tell us anything about the
1239  // derived pointer.
1240  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1241  // Check for the GEP base being at a negative offset, this time in the other
1242  // direction.
1243  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1244  isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1245  return NoAlias;
1246  // Do the base pointers alias?
1247  AliasResult BaseAlias =
1248  aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1249  UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1250 
1251  // Check for geps of non-aliasing underlying pointers where the offsets are
1252  // identical.
1253  if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1254  // Do the base pointers alias assuming type and size.
1255  AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1256  UnderlyingV2, V2Size, V2AAInfo);
1257  if (PreciseBaseAlias == NoAlias) {
1258  // See if the computed offset from the common pointer tells us about the
1259  // relation of the resulting pointer.
1260  // If the max search depth is reached the result is undefined
1261  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1262  return MayAlias;
1263 
1264  // Same offsets.
1265  if (GEP1BaseOffset == GEP2BaseOffset &&
1266  DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1267  return NoAlias;
1268  }
1269  }
1270 
1271  // If we get a No or May, then return it immediately, no amount of analysis
1272  // will improve this situation.
1273  if (BaseAlias != MustAlias) {
1274  assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1275  return BaseAlias;
1276  }
1277 
1278  // Otherwise, we have a MustAlias. Since the base pointers alias each other
1279  // exactly, see if the computed offset from the common pointer tells us
1280  // about the relation of the resulting pointer.
1281  // If we know the two GEPs are based off of the exact same pointer (and not
1282  // just the same underlying object), see if that tells us anything about
1283  // the resulting pointers.
1285  GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1286  GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1287  AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1288  // If we couldn't find anything interesting, don't abandon just yet.
1289  if (R != MayAlias)
1290  return R;
1291  }
1292 
1293  // If the max search depth is reached, the result is undefined
1294  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1295  return MayAlias;
1296 
1297  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1298  // symbolic difference.
1299  GEP1BaseOffset -= GEP2BaseOffset;
1300  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1301 
1302  } else {
1303  // Check to see if these two pointers are related by the getelementptr
1304  // instruction. If one pointer is a GEP with a non-zero index of the other
1305  // pointer, we know they cannot alias.
1306 
1307  // If both accesses are unknown size, we can't do anything useful here.
1308  if (V1Size == MemoryLocation::UnknownSize &&
1309  V2Size == MemoryLocation::UnknownSize)
1310  return MayAlias;
1311 
1312  AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1314  V2AAInfo, nullptr, UnderlyingV2);
1315  if (R != MustAlias) {
1316  // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1317  // If V2 is known not to alias GEP base pointer, then the two values
1318  // cannot alias per GEP semantics: "Any memory access must be done through
1319  // a pointer value associated with an address range of the memory access,
1320  // otherwise the behavior is undefined.".
1321  assert(R == NoAlias || R == MayAlias);
1322  return R;
1323  }
1324 
1325  // If the max search depth is reached the result is undefined
1326  if (GEP1MaxLookupReached)
1327  return MayAlias;
1328  }
1329 
1330  // In the two GEP Case, if there is no difference in the offsets of the
1331  // computed pointers, the resultant pointers are a must alias. This
1332  // happens when we have two lexically identical GEP's (for example).
1333  //
1334  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1335  // must aliases the GEP, the end result is a must alias also.
1336  if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1337  return MustAlias;
1338 
1339  // If there is a constant difference between the pointers, but the difference
1340  // is less than the size of the associated memory object, then we know
1341  // that the objects are partially overlapping. If the difference is
1342  // greater, we know they do not overlap.
1343  if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1344  if (GEP1BaseOffset >= 0) {
1345  if (V2Size != MemoryLocation::UnknownSize) {
1346  if ((uint64_t)GEP1BaseOffset < V2Size)
1347  return PartialAlias;
1348  return NoAlias;
1349  }
1350  } else {
1351  // We have the situation where:
1352  // + +
1353  // | BaseOffset |
1354  // ---------------->|
1355  // |-->V1Size |-------> V2Size
1356  // GEP1 V2
1357  // We need to know that V2Size is not unknown, otherwise we might have
1358  // stripped a gep with negative index ('gep <ptr>, -1, ...).
1359  if (V1Size != MemoryLocation::UnknownSize &&
1360  V2Size != MemoryLocation::UnknownSize) {
1361  if (-(uint64_t)GEP1BaseOffset < V1Size)
1362  return PartialAlias;
1363  return NoAlias;
1364  }
1365  }
1366  }
1367 
1368  if (!DecompGEP1.VarIndices.empty()) {
1369  uint64_t Modulo = 0;
1370  bool AllPositive = true;
1371  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1372 
1373  // Try to distinguish something like &A[i][1] against &A[42][0].
1374  // Grab the least significant bit set in any of the scales. We
1375  // don't need std::abs here (even if the scale's negative) as we'll
1376  // be ^'ing Modulo with itself later.
1377  Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1378 
1379  if (AllPositive) {
1380  // If the Value could change between cycles, then any reasoning about
1381  // the Value this cycle may not hold in the next cycle. We'll just
1382  // give up if we can't determine conditions that hold for every cycle:
1383  const Value *V = DecompGEP1.VarIndices[i].V;
1384 
1385  KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1386  bool SignKnownZero = Known.isNonNegative();
1387  bool SignKnownOne = Known.isNegative();
1388 
1389  // Zero-extension widens the variable, and so forces the sign
1390  // bit to zero.
1391  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1392  SignKnownZero |= IsZExt;
1393  SignKnownOne &= !IsZExt;
1394 
1395  // If the variable begins with a zero then we know it's
1396  // positive, regardless of whether the value is signed or
1397  // unsigned.
1398  int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1399  AllPositive =
1400  (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1401  }
1402  }
1403 
1404  Modulo = Modulo ^ (Modulo & (Modulo - 1));
1405 
1406  // We can compute the difference between the two addresses
1407  // mod Modulo. Check whether that difference guarantees that the
1408  // two locations do not alias.
1409  uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1410  if (V1Size != MemoryLocation::UnknownSize &&
1411  V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1412  V1Size <= Modulo - ModOffset)
1413  return NoAlias;
1414 
1415  // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1416  // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1417  // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1418  if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1419  return NoAlias;
1420 
1421  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1422  GEP1BaseOffset, &AC, DT))
1423  return NoAlias;
1424  }
1425 
1426  // Statically, we can see that the base objects are the same, but the
1427  // pointers have dynamic offsets which we can't resolve. And none of our
1428  // little tricks above worked.
1429  return MayAlias;
1430 }
1431 
1433  // If the results agree, take it.
1434  if (A == B)
1435  return A;
1436  // A mix of PartialAlias and MustAlias is PartialAlias.
1437  if ((A == PartialAlias && B == MustAlias) ||
1438  (B == PartialAlias && A == MustAlias))
1439  return PartialAlias;
1440  // Otherwise, we don't know anything.
1441  return MayAlias;
1442 }
1443 
1444 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1445 /// against another.
1446 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1447  LocationSize SISize,
1448  const AAMDNodes &SIAAInfo,
1449  const Value *V2, LocationSize V2Size,
1450  const AAMDNodes &V2AAInfo,
1451  const Value *UnderV2) {
1452  // If the values are Selects with the same condition, we can do a more precise
1453  // check: just check for aliases between the values on corresponding arms.
1454  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1455  if (SI->getCondition() == SI2->getCondition()) {
1456  AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1457  SI2->getTrueValue(), V2Size, V2AAInfo);
1458  if (Alias == MayAlias)
1459  return MayAlias;
1460  AliasResult ThisAlias =
1461  aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1462  SI2->getFalseValue(), V2Size, V2AAInfo);
1463  return MergeAliasResults(ThisAlias, Alias);
1464  }
1465 
1466  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1467  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1468  AliasResult Alias =
1469  aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1470  SISize, SIAAInfo, UnderV2);
1471  if (Alias == MayAlias)
1472  return MayAlias;
1473 
1474  AliasResult ThisAlias =
1475  aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1476  UnderV2);
1477  return MergeAliasResults(ThisAlias, Alias);
1478 }
1479 
1480 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1481 /// another.
1482 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1483  const AAMDNodes &PNAAInfo, const Value *V2,
1484  LocationSize V2Size,
1485  const AAMDNodes &V2AAInfo,
1486  const Value *UnderV2) {
1487  // Track phi nodes we have visited. We use this information when we determine
1488  // value equivalence.
1489  VisitedPhiBBs.insert(PN->getParent());
1490 
1491  // If the values are PHIs in the same block, we can do a more precise
1492  // as well as efficient check: just check for aliases between the values
1493  // on corresponding edges.
1494  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1495  if (PN2->getParent() == PN->getParent()) {
1496  LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1497  MemoryLocation(V2, V2Size, V2AAInfo));
1498  if (PN > V2)
1499  std::swap(Locs.first, Locs.second);
1500  // Analyse the PHIs' inputs under the assumption that the PHIs are
1501  // NoAlias.
1502  // If the PHIs are May/MustAlias there must be (recursively) an input
1503  // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1504  // there must be an operation on the PHIs within the PHIs' value cycle
1505  // that causes a MayAlias.
1506  // Pretend the phis do not alias.
1507  AliasResult Alias = NoAlias;
1508  assert(AliasCache.count(Locs) &&
1509  "There must exist an entry for the phi node");
1510  AliasResult OrigAliasResult = AliasCache[Locs];
1511  AliasCache[Locs] = NoAlias;
1512 
1513  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1514  AliasResult ThisAlias =
1515  aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1516  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1517  V2Size, V2AAInfo);
1518  Alias = MergeAliasResults(ThisAlias, Alias);
1519  if (Alias == MayAlias)
1520  break;
1521  }
1522 
1523  // Reset if speculation failed.
1524  if (Alias != NoAlias)
1525  AliasCache[Locs] = OrigAliasResult;
1526 
1527  return Alias;
1528  }
1529 
1530  SmallPtrSet<Value *, 4> UniqueSrc;
1531  SmallVector<Value *, 4> V1Srcs;
1532  bool isRecursive = false;
1533  for (Value *PV1 : PN->incoming_values()) {
1534  if (isa<PHINode>(PV1))
1535  // If any of the source itself is a PHI, return MayAlias conservatively
1536  // to avoid compile time explosion. The worst possible case is if both
1537  // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1538  // and 'n' are the number of PHI sources.
1539  return MayAlias;
1540 
1542  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1543  // Check whether the incoming value is a GEP that advances the pointer
1544  // result of this PHI node (e.g. in a loop). If this is the case, we
1545  // would recurse and always get a MayAlias. Handle this case specially
1546  // below.
1547  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1548  isa<ConstantInt>(PV1GEP->idx_begin())) {
1549  isRecursive = true;
1550  continue;
1551  }
1552  }
1553 
1554  if (UniqueSrc.insert(PV1).second)
1555  V1Srcs.push_back(PV1);
1556  }
1557 
1558  // If this PHI node is recursive, set the size of the accessed memory to
1559  // unknown to represent all the possible values the GEP could advance the
1560  // pointer to.
1561  if (isRecursive)
1562  PNSize = MemoryLocation::UnknownSize;
1563 
1564  AliasResult Alias =
1565  aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1566  PNSize, PNAAInfo, UnderV2);
1567 
1568  // Early exit if the check of the first PHI source against V2 is MayAlias.
1569  // Other results are not possible.
1570  if (Alias == MayAlias)
1571  return MayAlias;
1572 
1573  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1574  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1575  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1576  Value *V = V1Srcs[i];
1577 
1578  AliasResult ThisAlias =
1579  aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1580  Alias = MergeAliasResults(ThisAlias, Alias);
1581  if (Alias == MayAlias)
1582  break;
1583  }
1584 
1585  return Alias;
1586 }
1587 
1588 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1589 /// array references.
1590 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1591  AAMDNodes V1AAInfo, const Value *V2,
1592  LocationSize V2Size, AAMDNodes V2AAInfo,
1593  const Value *O1, const Value *O2) {
1594  // If either of the memory references is empty, it doesn't matter what the
1595  // pointer values are.
1596  if (V1Size == 0 || V2Size == 0)
1597  return NoAlias;
1598 
1599  // Strip off any casts if they exist.
1602 
1603  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1604  // value for undef that aliases nothing in the program.
1605  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1606  return NoAlias;
1607 
1608  // Are we checking for alias of the same value?
1609  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1610  // different iterations. We must therefore make sure that this is not the
1611  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1612  // happen by looking at the visited phi nodes and making sure they cannot
1613  // reach the value.
1614  if (isValueEqualInPotentialCycles(V1, V2))
1615  return MustAlias;
1616 
1617  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1618  return NoAlias; // Scalars cannot alias each other
1619 
1620  // Figure out what objects these things are pointing to if we can.
1621  if (O1 == nullptr)
1622  O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1623 
1624  if (O2 == nullptr)
1625  O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1626 
1627  // Null values in the default address space don't point to any object, so they
1628  // don't alias any other pointer.
1629  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1630  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1631  return NoAlias;
1632  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1633  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1634  return NoAlias;
1635 
1636  if (O1 != O2) {
1637  // If V1/V2 point to two different objects, we know that we have no alias.
1638  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1639  return NoAlias;
1640 
1641  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1642  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1643  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1644  return NoAlias;
1645 
1646  // Function arguments can't alias with things that are known to be
1647  // unambigously identified at the function level.
1648  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1649  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1650  return NoAlias;
1651 
1652  // If one pointer is the result of a call/invoke or load and the other is a
1653  // non-escaping local object within the same function, then we know the
1654  // object couldn't escape to a point where the call could return it.
1655  //
1656  // Note that if the pointers are in different functions, there are a
1657  // variety of complications. A call with a nocapture argument may still
1658  // temporary store the nocapture argument's value in a temporary memory
1659  // location if that memory location doesn't escape. Or it may pass a
1660  // nocapture value to other functions as long as they don't capture it.
1662  return NoAlias;
1664  return NoAlias;
1665  }
1666 
1667  // If the size of one access is larger than the entire object on the other
1668  // side, then we know such behavior is undefined and can assume no alias.
1669  bool NullIsValidLocation = NullPointerIsDefined(&F);
1670  if ((V1Size != MemoryLocation::UnknownSize &&
1671  isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) ||
1672  (V2Size != MemoryLocation::UnknownSize &&
1673  isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation)))
1674  return NoAlias;
1675 
1676  // Check the cache before climbing up use-def chains. This also terminates
1677  // otherwise infinitely recursive queries.
1678  LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1679  MemoryLocation(V2, V2Size, V2AAInfo));
1680  if (V1 > V2)
1681  std::swap(Locs.first, Locs.second);
1682  std::pair<AliasCacheTy::iterator, bool> Pair =
1683  AliasCache.insert(std::make_pair(Locs, MayAlias));
1684  if (!Pair.second)
1685  return Pair.first->second;
1686 
1687  // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1688  // GEP can't simplify, we don't even look at the PHI cases.
1689  if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1690  std::swap(V1, V2);
1691  std::swap(V1Size, V2Size);
1692  std::swap(O1, O2);
1693  std::swap(V1AAInfo, V2AAInfo);
1694  }
1695  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1696  AliasResult Result =
1697  aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1698  if (Result != MayAlias)
1699  return AliasCache[Locs] = Result;
1700  }
1701 
1702  if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1703  std::swap(V1, V2);
1704  std::swap(O1, O2);
1705  std::swap(V1Size, V2Size);
1706  std::swap(V1AAInfo, V2AAInfo);
1707  }
1708  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1709  AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1710  V2, V2Size, V2AAInfo, O2);
1711  if (Result != MayAlias)
1712  return AliasCache[Locs] = Result;
1713  }
1714 
1715  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1716  std::swap(V1, V2);
1717  std::swap(O1, O2);
1718  std::swap(V1Size, V2Size);
1719  std::swap(V1AAInfo, V2AAInfo);
1720  }
1721  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1722  AliasResult Result =
1723  aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1724  if (Result != MayAlias)
1725  return AliasCache[Locs] = Result;
1726  }
1727 
1728  // If both pointers are pointing into the same object and one of them
1729  // accesses the entire object, then the accesses must overlap in some way.
1730  if (O1 == O2)
1731  if (V1Size != MemoryLocation::UnknownSize &&
1732  V2Size != MemoryLocation::UnknownSize &&
1733  (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) ||
1734  isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation)))
1735  return AliasCache[Locs] = PartialAlias;
1736 
1737  // Recurse back into the best AA results we have, potentially with refined
1738  // memory locations. We have already ensured that BasicAA has a MayAlias
1739  // cache result for these, so any recursion back into BasicAA won't loop.
1740  AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1741  return AliasCache[Locs] = Result;
1742 }
1743 
1744 /// Check whether two Values can be considered equivalent.
1745 ///
1746 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1747 /// they can not be part of a cycle in the value graph by looking at all
1748 /// visited phi nodes an making sure that the phis cannot reach the value. We
1749 /// have to do this because we are looking through phi nodes (That is we say
1750 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1751 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1752  const Value *V2) {
1753  if (V != V2)
1754  return false;
1755 
1756  const Instruction *Inst = dyn_cast<Instruction>(V);
1757  if (!Inst)
1758  return true;
1759 
1760  if (VisitedPhiBBs.empty())
1761  return true;
1762 
1763  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1764  return false;
1765 
1766  // Make sure that the visited phis cannot reach the Value. This ensures that
1767  // the Values cannot come from different iterations of a potential cycle the
1768  // phi nodes could be involved in.
1769  for (auto *P : VisitedPhiBBs)
1770  if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1771  return false;
1772 
1773  return true;
1774 }
1775 
1776 /// Computes the symbolic difference between two de-composed GEPs.
1777 ///
1778 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1779 /// instructions GEP1 and GEP2 which have common base pointers.
1780 void BasicAAResult::GetIndexDifference(
1782  const SmallVectorImpl<VariableGEPIndex> &Src) {
1783  if (Src.empty())
1784  return;
1785 
1786  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1787  const Value *V = Src[i].V;
1788  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1789  int64_t Scale = Src[i].Scale;
1790 
1791  // Find V in Dest. This is N^2, but pointer indices almost never have more
1792  // than a few variable indexes.
1793  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1794  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1795  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1796  continue;
1797 
1798  // If we found it, subtract off Scale V's from the entry in Dest. If it
1799  // goes to zero, remove the entry.
1800  if (Dest[j].Scale != Scale)
1801  Dest[j].Scale -= Scale;
1802  else
1803  Dest.erase(Dest.begin() + j);
1804  Scale = 0;
1805  break;
1806  }
1807 
1808  // If we didn't consume this entry, add it to the end of the Dest list.
1809  if (Scale) {
1810  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1811  Dest.push_back(Entry);
1812  }
1813  }
1814 }
1815 
1816 bool BasicAAResult::constantOffsetHeuristic(
1817  const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size,
1818  LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC,
1819  DominatorTree *DT) {
1820  if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1821  V2Size == MemoryLocation::UnknownSize)
1822  return false;
1823 
1824  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1825 
1826  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1827  Var0.Scale != -Var1.Scale)
1828  return false;
1829 
1830  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1831 
1832  // We'll strip off the Extensions of Var0 and Var1 and do another round
1833  // of GetLinearExpression decomposition. In the example above, if Var0
1834  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1835 
1836  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1837  V1Offset(Width, 0);
1838  bool NSW = true, NUW = true;
1839  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1840  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1841  V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1842  NSW = true;
1843  NUW = true;
1844  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1845  V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1846 
1847  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1848  V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1849  return false;
1850 
1851  // We have a hit - Var0 and Var1 only differ by a constant offset!
1852 
1853  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1854  // Var1 is possible to calculate, but we're just interested in the absolute
1855  // minimum difference between the two. The minimum distance may occur due to
1856  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1857  // the minimum distance between %i and %i + 5 is 3.
1858  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1859  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1860  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1861 
1862  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1863  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1864  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1865  // V2Size can fit in the MinDiffBytes gap.
1866  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1867  V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1868 }
1869 
1870 //===----------------------------------------------------------------------===//
1871 // BasicAliasAnalysis Pass
1872 //===----------------------------------------------------------------------===//
1873 
1874 AnalysisKey BasicAA::Key;
1875 
1877  return BasicAAResult(F.getParent()->getDataLayout(),
1878  F,
1882  AM.getCachedResult<LoopAnalysis>(F));
1883 }
1884 
1887 }
1888 
1889 char BasicAAWrapperPass::ID = 0;
1890 
1891 void BasicAAWrapperPass::anchor() {}
1892 
1894  "Basic Alias Analysis (stateless AA impl)", true, true)
1899  "Basic Alias Analysis (stateless AA impl)", true, true)
1900 
1902  return new BasicAAWrapperPass();
1903 }
1904 
1906  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1907  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1908  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1909  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1910 
1911  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
1912  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1913  LIWP ? &LIWP->getLoopInfo() : nullptr));
1914 
1915  return false;
1916 }
1917 
1919  AU.setPreservesAll();
1923 }
1924 
1926  return BasicAAResult(
1927  F.getParent()->getDataLayout(),
1928  F,
1930  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1931 }
The access may reference and may modify the value stored in memory.
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:81
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:454
static const unsigned MaxLookupSearchDepth
uint64_t LocationSize
Basic Alias Analysis(stateless AA impl)"
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:577
The access neither references nor modifies the value stored in memory.
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1556
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:840
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:471
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can&#39;t be evaluated...
BasicAAResult(const DataLayout &DL, const Function &F, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
const Value * getTrueValue() const
The two locations do not alias at all.
Definition: AliasAnalysis.h:85
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:479
A cache of @llvm.assume calls within a function.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables...
This is the AA result object for the basic, local, and stateless alias analysis.
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:817
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:360
STATISTIC(NumFunctions, "Total number of functions")
F(f)
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:225
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
op_iterator op_begin()
Definition: User.h:230
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1502
This indicates that the function could not be classified into one of the behaviors above...
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, LocationSize V1Size, const GEPOperator *GEP2, LocationSize V2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
Definition: CallSite.h:454
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
AnalysisUsage & addRequired()
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:521
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
Class to represent struct types.
Definition: DerivedTypes.h:201
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:898
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
Definition: Operator.h:471
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
The access may reference the value stored in memory.
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:939
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
This file implements a class to represent arbitrary precision integral constant values and operations...
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: Function.h:478
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
place backedge safepoints impl
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
IterTy data_operands_end() const
Definition: CallSite.h:249
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1568
void shrink_and_clear()
Definition: DenseMap.h:1012
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
unsigned getNumIndices() const
Definition: Operator.h:483
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:444
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool has(LibFunc F) const
Tests whether a library function is available.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:377
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:142
Value * getOperand(unsigned i) const
Definition: User.h:170
AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:79
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition: Function.h:446
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:117
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if &#39;V & Mask&#39; is known to be zero.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:232
bool hasOperandBundles() const
Definition: CallSite.h:535
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
Definition: Operator.h:460
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction &#39;To&#39; is reachable from &#39;From&#39;, returning true if uncertain.
Definition: CFG.cpp:186
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
const Value * getCondition() const
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:593
const AMDGPUAS & AS
iterator erase(const_iterator CI)
Definition: SmallVector.h:446
unsigned getNumArgOperands() const
Definition: CallSite.h:293
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2108
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:87
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
size_type size() const
Definition: SmallPtrSet.h:93
The two locations precisely alias each other.
Definition: AliasAnalysis.h:91
A function analysis which provides an AssumptionCache.
unsigned getNumOperands() const
Definition: User.h:192
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
Definition: CallSite.h:462
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
Provides information about what library functions are available for the current target.
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
A constant pointer value that points to null.
Definition: Constants.h:535
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: CallSite.h:480
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
uint64_t getSizeInBytes() const
Definition: DataLayout.h:529
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
Definition: CallSite.h:245
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Value * stripPointerCastsAndInvariantGroups() const
Strip off pointer casts, all-zero GEPs, aliases and invariant group info.
Definition: Value.cpp:551
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1426
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
The access may modify the value stored in memory.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
Class for arbitrary precision integers.
Definition: APInt.h:69
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
const Value * getArgumentAliasingToReturnedPointer(ImmutableCallSite CS)
This function returns call pointer argument that is considered the same by aliasing rules...
const Value * getFalseValue() const
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:446
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
Basic Alias true
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1314
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size. ...
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:543
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:62
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:481
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
This file provides utility analysis objects describing memory locations.
Establish a view to a call site for examination.
Definition: CallSite.h:714
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
#define I(x, y, z)
Definition: MD5.cpp:58
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1213
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: Function.h:471
iterator end()
Definition: DenseMap.h:79
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
Definition: Function.h:462
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:706
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: Function.h:487
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:141
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:598
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:559
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:107
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:411
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
static const Function * getParent(const Value *V)
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:238
This is the interface for LLVM&#39;s primary stateless and local alias analysis.
Various options to control the behavior of getObjectSize.
Type * getSourceElementType() const
Definition: Operator.cpp:23
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:254
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:89
op_range incoming_values()
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:273
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void initializeBasicAAWrapperPassPass(PassRegistry &)
const BasicBlock * getParent() const
Definition: Instruction.h:67
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: CallSite.h:489
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.