LLVM  8.0.0svn
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallSite.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalAlias.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
70  cl::init(false));
71 /// SearchLimitReached / SearchTimes shows how often the limit of
72 /// to decompose GEPs is reached. It will affect the precision
73 /// of basic alias analysis.
74 STATISTIC(SearchLimitReached, "Number of times the limit to "
75  "decompose GEPs is reached");
76 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
77 
78 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
79 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
80 /// careful with value equivalence. We use reachability to make sure a value
81 /// cannot be involved in a cycle.
83 
84 // The max limit of the search depth in DecomposeGEPExpression() and
85 // GetUnderlyingObject(), both functions need to use the same search
86 // depth otherwise the algorithm in aliasGEP will assert.
87 static const unsigned MaxLookupSearchDepth = 6;
88 
91  // We don't care if this analysis itself is preserved, it has no state. But
92  // we need to check that the analyses it depends on have been. Note that we
93  // may be created without handles to some analyses and in that case don't
94  // depend on them.
95  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
96  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
97  (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
98  (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
99  return true;
100 
101  // Otherwise this analysis result remains valid.
102  return false;
103 }
104 
105 //===----------------------------------------------------------------------===//
106 // Useful predicates
107 //===----------------------------------------------------------------------===//
108 
109 /// Returns true if the pointer is to a function-local object that never
110 /// escapes from the function.
111 static bool isNonEscapingLocalObject(const Value *V) {
112  // If this is a local allocation, check to see if it escapes.
113  if (isa<AllocaInst>(V) || isNoAliasCall(V))
114  // Set StoreCaptures to True so that we can assume in our callers that the
115  // pointer is not the result of a load instruction. Currently
116  // PointerMayBeCaptured doesn't have any special analysis for the
117  // StoreCaptures=false case; if it did, our callers could be refined to be
118  // more precise.
119  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
120 
121  // If this is an argument that corresponds to a byval or noalias argument,
122  // then it has not escaped before entering the function. Check if it escapes
123  // inside the function.
124  if (const Argument *A = dyn_cast<Argument>(V))
125  if (A->hasByValAttr() || A->hasNoAliasAttr())
126  // Note even if the argument is marked nocapture, we still need to check
127  // for copies made inside the function. The nocapture attribute only
128  // specifies that there are no copies made that outlive the function.
129  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
130 
131  return false;
132 }
133 
134 /// Returns true if the pointer is one which would have been considered an
135 /// escape by isNonEscapingLocalObject.
136 static bool isEscapeSource(const Value *V) {
137  if (ImmutableCallSite(V))
138  return true;
139 
140  if (isa<Argument>(V))
141  return true;
142 
143  // The load case works because isNonEscapingLocalObject considers all
144  // stores to be escapes (it passes true for the StoreCaptures argument
145  // to PointerMayBeCaptured).
146  if (isa<LoadInst>(V))
147  return true;
148 
149  return false;
150 }
151 
152 /// Returns the size of the object specified by V or UnknownSize if unknown.
153 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
154  const TargetLibraryInfo &TLI,
155  bool NullIsValidLoc,
156  bool RoundToAlign = false) {
157  uint64_t Size;
158  ObjectSizeOpts Opts;
159  Opts.RoundToAlign = RoundToAlign;
160  Opts.NullIsUnknownSize = NullIsValidLoc;
161  if (getObjectSize(V, Size, DL, &TLI, Opts))
162  return Size;
164 }
165 
166 /// Returns true if we can prove that the object specified by V is smaller than
167 /// Size.
168 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
169  const DataLayout &DL,
170  const TargetLibraryInfo &TLI,
171  bool NullIsValidLoc) {
172  // Note that the meanings of the "object" are slightly different in the
173  // following contexts:
174  // c1: llvm::getObjectSize()
175  // c2: llvm.objectsize() intrinsic
176  // c3: isObjectSmallerThan()
177  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
178  // refers to the "entire object".
179  //
180  // Consider this example:
181  // char *p = (char*)malloc(100)
182  // char *q = p+80;
183  //
184  // In the context of c1 and c2, the "object" pointed by q refers to the
185  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
186  //
187  // However, in the context of c3, the "object" refers to the chunk of memory
188  // being allocated. So, the "object" has 100 bytes, and q points to the middle
189  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
190  // parameter, before the llvm::getObjectSize() is called to get the size of
191  // entire object, we should:
192  // - either rewind the pointer q to the base-address of the object in
193  // question (in this case rewind to p), or
194  // - just give up. It is up to caller to make sure the pointer is pointing
195  // to the base address the object.
196  //
197  // We go for 2nd option for simplicity.
198  if (!isIdentifiedObject(V))
199  return false;
200 
201  // This function needs to use the aligned object size because we allow
202  // reads a bit past the end given sufficient alignment.
203  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
204  /*RoundToAlign*/ true);
205 
206  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
207 }
208 
209 /// Returns true if we can prove that the object specified by V has size Size.
210 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
211  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
212  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
213  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
214 }
215 
216 //===----------------------------------------------------------------------===//
217 // GetElementPtr Instruction Decomposition and Analysis
218 //===----------------------------------------------------------------------===//
219 
220 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
221 /// B are constant integers.
222 ///
223 /// Returns the scale and offset values as APInts and return V as a Value*, and
224 /// return whether we looked through any sign or zero extends. The incoming
225 /// Value is known to have IntegerType, and it may already be sign or zero
226 /// extended.
227 ///
228 /// Note that this looks through extends, so the high bits may not be
229 /// represented in the result.
230 /*static*/ const Value *BasicAAResult::GetLinearExpression(
231  const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
232  unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
233  AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
234  assert(V->getType()->isIntegerTy() && "Not an integer value");
235 
236  // Limit our recursion depth.
237  if (Depth == 6) {
238  Scale = 1;
239  Offset = 0;
240  return V;
241  }
242 
243  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
244  // If it's a constant, just convert it to an offset and remove the variable.
245  // If we've been called recursively, the Offset bit width will be greater
246  // than the constant's (the Offset's always as wide as the outermost call),
247  // so we'll zext here and process any extension in the isa<SExtInst> &
248  // isa<ZExtInst> cases below.
249  Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
250  assert(Scale == 0 && "Constant values don't have a scale");
251  return V;
252  }
253 
254  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
255  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
256  // If we've been called recursively, then Offset and Scale will be wider
257  // than the BOp operands. We'll always zext it here as we'll process sign
258  // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
259  APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
260 
261  switch (BOp->getOpcode()) {
262  default:
263  // We don't understand this instruction, so we can't decompose it any
264  // further.
265  Scale = 1;
266  Offset = 0;
267  return V;
268  case Instruction::Or:
269  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
270  // analyze it.
271  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
272  BOp, DT)) {
273  Scale = 1;
274  Offset = 0;
275  return V;
276  }
278  case Instruction::Add:
279  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
280  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
281  Offset += RHS;
282  break;
283  case Instruction::Sub:
284  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
285  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
286  Offset -= RHS;
287  break;
288  case Instruction::Mul:
289  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
290  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
291  Offset *= RHS;
292  Scale *= RHS;
293  break;
294  case Instruction::Shl:
295  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
296  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
297 
298  // We're trying to linearize an expression of the kind:
299  // shl i8 -128, 36
300  // where the shift count exceeds the bitwidth of the type.
301  // We can't decompose this further (the expression would return
302  // a poison value).
303  if (Offset.getBitWidth() < RHS.getLimitedValue() ||
304  Scale.getBitWidth() < RHS.getLimitedValue()) {
305  Scale = 1;
306  Offset = 0;
307  return V;
308  }
309 
310  Offset <<= RHS.getLimitedValue();
311  Scale <<= RHS.getLimitedValue();
312  // the semantics of nsw and nuw for left shifts don't match those of
313  // multiplications, so we won't propagate them.
314  NSW = NUW = false;
315  return V;
316  }
317 
318  if (isa<OverflowingBinaryOperator>(BOp)) {
319  NUW &= BOp->hasNoUnsignedWrap();
320  NSW &= BOp->hasNoSignedWrap();
321  }
322  return V;
323  }
324  }
325 
326  // Since GEP indices are sign extended anyway, we don't care about the high
327  // bits of a sign or zero extended value - just scales and offsets. The
328  // extensions have to be consistent though.
329  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
330  Value *CastOp = cast<CastInst>(V)->getOperand(0);
331  unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
332  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
333  unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
334  const Value *Result =
335  GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
336  Depth + 1, AC, DT, NSW, NUW);
337 
338  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
339  // by just incrementing the number of bits we've extended by.
340  unsigned ExtendedBy = NewWidth - SmallWidth;
341 
342  if (isa<SExtInst>(V) && ZExtBits == 0) {
343  // sext(sext(%x, a), b) == sext(%x, a + b)
344 
345  if (NSW) {
346  // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
347  // into sext(%x) + sext(c). We'll sext the Offset ourselves:
348  unsigned OldWidth = Offset.getBitWidth();
349  Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
350  } else {
351  // We may have signed-wrapped, so don't decompose sext(%x + c) into
352  // sext(%x) + sext(c)
353  Scale = 1;
354  Offset = 0;
355  Result = CastOp;
356  ZExtBits = OldZExtBits;
357  SExtBits = OldSExtBits;
358  }
359  SExtBits += ExtendedBy;
360  } else {
361  // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
362 
363  if (!NUW) {
364  // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
365  // zext(%x) + zext(c)
366  Scale = 1;
367  Offset = 0;
368  Result = CastOp;
369  ZExtBits = OldZExtBits;
370  SExtBits = OldSExtBits;
371  }
372  ZExtBits += ExtendedBy;
373  }
374 
375  return Result;
376  }
377 
378  Scale = 1;
379  Offset = 0;
380  return V;
381 }
382 
383 /// To ensure a pointer offset fits in an integer of size PointerSize
384 /// (in bits) when that size is smaller than 64. This is an issue in
385 /// particular for 32b programs with negative indices that rely on two's
386 /// complement wrap-arounds for precise alias information.
387 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
388  assert(PointerSize <= 64 && "Invalid PointerSize!");
389  unsigned ShiftBits = 64 - PointerSize;
390  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
391 }
392 
393 /// If V is a symbolic pointer expression, decompose it into a base pointer
394 /// with a constant offset and a number of scaled symbolic offsets.
395 ///
396 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
397 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
398 /// specified amount, but which may have other unrepresented high bits. As
399 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
400 ///
401 /// When DataLayout is around, this function is capable of analyzing everything
402 /// that GetUnderlyingObject can look through. To be able to do that
403 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
404 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
405 /// through pointer casts.
406 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
407  DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
408  DominatorTree *DT) {
409  // Limit recursion depth to limit compile time in crazy cases.
410  unsigned MaxLookup = MaxLookupSearchDepth;
411  SearchTimes++;
412 
413  Decomposed.StructOffset = 0;
414  Decomposed.OtherOffset = 0;
415  Decomposed.VarIndices.clear();
416  do {
417  // See if this is a bitcast or GEP.
418  const Operator *Op = dyn_cast<Operator>(V);
419  if (!Op) {
420  // The only non-operator case we can handle are GlobalAliases.
421  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
422  if (!GA->isInterposable()) {
423  V = GA->getAliasee();
424  continue;
425  }
426  }
427  Decomposed.Base = V;
428  return false;
429  }
430 
431  if (Op->getOpcode() == Instruction::BitCast ||
432  Op->getOpcode() == Instruction::AddrSpaceCast) {
433  V = Op->getOperand(0);
434  continue;
435  }
436 
437  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
438  if (!GEPOp) {
439  if (auto CS = ImmutableCallSite(V)) {
440  // CaptureTracking can know about special capturing properties of some
441  // intrinsics like launder.invariant.group, that can't be expressed with
442  // the attributes, but have properties like returning aliasing pointer.
443  // Because some analysis may assume that nocaptured pointer is not
444  // returned from some special intrinsic (because function would have to
445  // be marked with returns attribute), it is crucial to use this function
446  // because it should be in sync with CaptureTracking. Not using it may
447  // cause weird miscompilations where 2 aliasing pointers are assumed to
448  // noalias.
449  if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
450  V = RP;
451  continue;
452  }
453  }
454 
455  // If it's not a GEP, hand it off to SimplifyInstruction to see if it
456  // can come up with something. This matches what GetUnderlyingObject does.
457  if (const Instruction *I = dyn_cast<Instruction>(V))
458  // TODO: Get a DominatorTree and AssumptionCache and use them here
459  // (these are both now available in this function, but this should be
460  // updated when GetUnderlyingObject is updated). TLI should be
461  // provided also.
462  if (const Value *Simplified =
463  SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
464  V = Simplified;
465  continue;
466  }
467 
468  Decomposed.Base = V;
469  return false;
470  }
471 
472  // Don't attempt to analyze GEPs over unsized objects.
473  if (!GEPOp->getSourceElementType()->isSized()) {
474  Decomposed.Base = V;
475  return false;
476  }
477 
478  unsigned AS = GEPOp->getPointerAddressSpace();
479  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
480  gep_type_iterator GTI = gep_type_begin(GEPOp);
481  unsigned PointerSize = DL.getPointerSizeInBits(AS);
482  // Assume all GEP operands are constants until proven otherwise.
483  bool GepHasConstantOffset = true;
484  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
485  I != E; ++I, ++GTI) {
486  const Value *Index = *I;
487  // Compute the (potentially symbolic) offset in bytes for this index.
488  if (StructType *STy = GTI.getStructTypeOrNull()) {
489  // For a struct, add the member offset.
490  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
491  if (FieldNo == 0)
492  continue;
493 
494  Decomposed.StructOffset +=
495  DL.getStructLayout(STy)->getElementOffset(FieldNo);
496  continue;
497  }
498 
499  // For an array/pointer, add the element offset, explicitly scaled.
500  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
501  if (CIdx->isZero())
502  continue;
503  Decomposed.OtherOffset +=
504  DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
505  continue;
506  }
507 
508  GepHasConstantOffset = false;
509 
510  uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
511  unsigned ZExtBits = 0, SExtBits = 0;
512 
513  // If the integer type is smaller than the pointer size, it is implicitly
514  // sign extended to pointer size.
515  unsigned Width = Index->getType()->getIntegerBitWidth();
516  if (PointerSize > Width)
517  SExtBits += PointerSize - Width;
518 
519  // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
520  APInt IndexScale(Width, 0), IndexOffset(Width, 0);
521  bool NSW = true, NUW = true;
522  Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
523  SExtBits, DL, 0, AC, DT, NSW, NUW);
524 
525  // All GEP math happens in the width of the pointer type,
526  // so we can truncate the value to 64-bits as we don't handle
527  // currently pointers larger than 64 bits and we would crash
528  // later. TODO: Make `Scale` an APInt to avoid this problem.
529  if (IndexScale.getBitWidth() > 64)
530  IndexScale = IndexScale.sextOrTrunc(64);
531 
532  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
533  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
534  Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
535  Scale *= IndexScale.getSExtValue();
536 
537  // If we already had an occurrence of this index variable, merge this
538  // scale into it. For example, we want to handle:
539  // A[x][x] -> x*16 + x*4 -> x*20
540  // This also ensures that 'x' only appears in the index list once.
541  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
542  if (Decomposed.VarIndices[i].V == Index &&
543  Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
544  Decomposed.VarIndices[i].SExtBits == SExtBits) {
545  Scale += Decomposed.VarIndices[i].Scale;
546  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
547  break;
548  }
549  }
550 
551  // Make sure that we have a scale that makes sense for this target's
552  // pointer size.
553  Scale = adjustToPointerSize(Scale, PointerSize);
554 
555  if (Scale) {
556  VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
557  static_cast<int64_t>(Scale)};
558  Decomposed.VarIndices.push_back(Entry);
559  }
560  }
561 
562  // Take care of wrap-arounds
563  if (GepHasConstantOffset) {
564  Decomposed.StructOffset =
565  adjustToPointerSize(Decomposed.StructOffset, PointerSize);
566  Decomposed.OtherOffset =
567  adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
568  }
569 
570  // Analyze the base pointer next.
571  V = GEPOp->getOperand(0);
572  } while (--MaxLookup);
573 
574  // If the chain of expressions is too deep, just return early.
575  Decomposed.Base = V;
576  SearchLimitReached++;
577  return true;
578 }
579 
580 /// Returns whether the given pointer value points to memory that is local to
581 /// the function, with global constants being considered local to all
582 /// functions.
584  bool OrLocal) {
585  assert(Visited.empty() && "Visited must be cleared after use!");
586 
587  unsigned MaxLookup = 8;
589  Worklist.push_back(Loc.Ptr);
590  do {
591  const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
592  if (!Visited.insert(V).second) {
593  Visited.clear();
594  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
595  }
596 
597  // An alloca instruction defines local memory.
598  if (OrLocal && isa<AllocaInst>(V))
599  continue;
600 
601  // A global constant counts as local memory for our purposes.
602  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
603  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
604  // global to be marked constant in some modules and non-constant in
605  // others. GV may even be a declaration, not a definition.
606  if (!GV->isConstant()) {
607  Visited.clear();
608  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
609  }
610  continue;
611  }
612 
613  // If both select values point to local memory, then so does the select.
614  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
615  Worklist.push_back(SI->getTrueValue());
616  Worklist.push_back(SI->getFalseValue());
617  continue;
618  }
619 
620  // If all values incoming to a phi node point to local memory, then so does
621  // the phi.
622  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
623  // Don't bother inspecting phi nodes with many operands.
624  if (PN->getNumIncomingValues() > MaxLookup) {
625  Visited.clear();
626  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
627  }
628  for (Value *IncValue : PN->incoming_values())
629  Worklist.push_back(IncValue);
630  continue;
631  }
632 
633  // Otherwise be conservative.
634  Visited.clear();
635  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
636  } while (!Worklist.empty() && --MaxLookup);
637 
638  Visited.clear();
639  return Worklist.empty();
640 }
641 
642 /// Returns the behavior when calling the given call site.
644  if (CS.doesNotAccessMemory())
645  // Can't do better than this.
647 
649 
650  // If the callsite knows it only reads memory, don't return worse
651  // than that.
652  if (CS.onlyReadsMemory())
653  Min = FMRB_OnlyReadsMemory;
654  else if (CS.doesNotReadMemory())
656 
657  if (CS.onlyAccessesArgMemory())
659  else if (CS.onlyAccessesInaccessibleMemory())
663 
664  // If CS has operand bundles then aliasing attributes from the function it
665  // calls do not directly apply to the CallSite. This can be made more
666  // precise in the future.
667  if (!CS.hasOperandBundles())
668  if (const Function *F = CS.getCalledFunction())
669  Min =
671 
672  return Min;
673 }
674 
675 /// Returns the behavior when calling the given function. For use when the call
676 /// site is not known.
678  // If the function declares it doesn't access memory, we can't do better.
679  if (F->doesNotAccessMemory())
681 
683 
684  // If the function declares it only reads memory, go with that.
685  if (F->onlyReadsMemory())
686  Min = FMRB_OnlyReadsMemory;
687  else if (F->doesNotReadMemory())
689 
690  if (F->onlyAccessesArgMemory())
692  else if (F->onlyAccessesInaccessibleMemory())
696 
697  return Min;
698 }
699 
700 /// Returns true if this is a writeonly (i.e Mod only) parameter.
701 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
702  const TargetLibraryInfo &TLI) {
703  if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
704  return true;
705 
706  // We can bound the aliasing properties of memset_pattern16 just as we can
707  // for memcpy/memset. This is particularly important because the
708  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
709  // whenever possible.
710  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
711  // attributes.
712  LibFunc F;
713  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
714  F == LibFunc_memset_pattern16 && TLI.has(F))
715  if (ArgIdx == 0)
716  return true;
717 
718  // TODO: memset_pattern4, memset_pattern8
719  // TODO: _chk variants
720  // TODO: strcmp, strcpy
721 
722  return false;
723 }
724 
726  unsigned ArgIdx) {
727  // Checking for known builtin intrinsics and target library functions.
728  if (isWriteOnlyParam(CS, ArgIdx, TLI))
729  return ModRefInfo::Mod;
730 
731  if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
732  return ModRefInfo::Ref;
733 
734  if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
735  return ModRefInfo::NoModRef;
736 
737  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
738 }
739 
742  return II && II->getIntrinsicID() == IID;
743 }
744 
745 #ifndef NDEBUG
746 static const Function *getParent(const Value *V) {
747  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
748  if (!inst->getParent())
749  return nullptr;
750  return inst->getParent()->getParent();
751  }
752 
753  if (const Argument *arg = dyn_cast<Argument>(V))
754  return arg->getParent();
755 
756  return nullptr;
757 }
758 
759 static bool notDifferentParent(const Value *O1, const Value *O2) {
760 
761  const Function *F1 = getParent(O1);
762  const Function *F2 = getParent(O2);
763 
764  return !F1 || !F2 || F1 == F2;
765 }
766 #endif
767 
769  const MemoryLocation &LocB) {
770  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
771  "BasicAliasAnalysis doesn't support interprocedural queries.");
772 
773  // If we have a directly cached entry for these locations, we have recursed
774  // through this once, so just return the cached results. Notably, when this
775  // happens, we don't clear the cache.
776  auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
777  if (CacheIt != AliasCache.end())
778  return CacheIt->second;
779 
780  AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
781  LocB.Size, LocB.AATags);
782  // AliasCache rarely has more than 1 or 2 elements, always use
783  // shrink_and_clear so it quickly returns to the inline capacity of the
784  // SmallDenseMap if it ever grows larger.
785  // FIXME: This should really be shrink_to_inline_capacity_and_clear().
786  AliasCache.shrink_and_clear();
787  VisitedPhiBBs.clear();
788  return Alias;
789 }
790 
791 /// Checks to see if the specified callsite can clobber the specified memory
792 /// object.
793 ///
794 /// Since we only look at local properties of this function, we really can't
795 /// say much about this query. We do, however, use simple "address taken"
796 /// analysis on local objects.
798  const MemoryLocation &Loc) {
800  "AliasAnalysis query involving multiple functions!");
801 
802  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
803 
804  // Calls marked 'tail' cannot read or write allocas from the current frame
805  // because the current frame might be destroyed by the time they run. However,
806  // a tail call may use an alloca with byval. Calling with byval copies the
807  // contents of the alloca into argument registers or stack slots, so there is
808  // no lifetime issue.
809  if (isa<AllocaInst>(Object))
810  if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
811  if (CI->isTailCall() &&
812  !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
813  return ModRefInfo::NoModRef;
814 
815  // If the pointer is to a locally allocated object that does not escape,
816  // then the call can not mod/ref the pointer unless the call takes the pointer
817  // as an argument, and itself doesn't capture it.
818  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
819  isNonEscapingLocalObject(Object)) {
820 
821  // Optimistically assume that call doesn't touch Object and check this
822  // assumption in the following loop.
824  bool IsMustAlias = true;
825 
826  unsigned OperandNo = 0;
827  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
828  CI != CE; ++CI, ++OperandNo) {
829  // Only look at the no-capture or byval pointer arguments. If this
830  // pointer were passed to arguments that were neither of these, then it
831  // couldn't be no-capture.
832  if (!(*CI)->getType()->isPointerTy() ||
833  (!CS.doesNotCapture(OperandNo) &&
834  OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
835  continue;
836 
837  // Call doesn't access memory through this operand, so we don't care
838  // if it aliases with Object.
839  if (CS.doesNotAccessMemory(OperandNo))
840  continue;
841 
842  // If this is a no-capture pointer argument, see if we can tell that it
843  // is impossible to alias the pointer we're checking.
844  AliasResult AR =
846  if (AR != MustAlias)
847  IsMustAlias = false;
848  // Operand doesnt alias 'Object', continue looking for other aliases
849  if (AR == NoAlias)
850  continue;
851  // Operand aliases 'Object', but call doesn't modify it. Strengthen
852  // initial assumption and keep looking in case if there are more aliases.
853  if (CS.onlyReadsMemory(OperandNo)) {
854  Result = setRef(Result);
855  continue;
856  }
857  // Operand aliases 'Object' but call only writes into it.
858  if (CS.doesNotReadMemory(OperandNo)) {
859  Result = setMod(Result);
860  continue;
861  }
862  // This operand aliases 'Object' and call reads and writes into it.
863  // Setting ModRef will not yield an early return below, MustAlias is not
864  // used further.
865  Result = ModRefInfo::ModRef;
866  break;
867  }
868 
869  // No operand aliases, reset Must bit. Add below if at least one aliases
870  // and all aliases found are MustAlias.
871  if (isNoModRef(Result))
872  IsMustAlias = false;
873 
874  // Early return if we improved mod ref information
875  if (!isModAndRefSet(Result)) {
876  if (isNoModRef(Result))
877  return ModRefInfo::NoModRef;
878  return IsMustAlias ? setMust(Result) : clearMust(Result);
879  }
880  }
881 
882  // If the CallSite is to malloc or calloc, we can assume that it doesn't
883  // modify any IR visible value. This is only valid because we assume these
884  // routines do not read values visible in the IR. TODO: Consider special
885  // casing realloc and strdup routines which access only their arguments as
886  // well. Or alternatively, replace all of this with inaccessiblememonly once
887  // that's implemented fully.
888  auto *Inst = CS.getInstruction();
889  if (isMallocOrCallocLikeFn(Inst, &TLI)) {
890  // Be conservative if the accessed pointer may alias the allocation -
891  // fallback to the generic handling below.
892  if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
893  return ModRefInfo::NoModRef;
894  }
895 
896  // The semantics of memcpy intrinsics forbid overlap between their respective
897  // operands, i.e., source and destination of any given memcpy must no-alias.
898  // If Loc must-aliases either one of these two locations, then it necessarily
899  // no-aliases the other.
900  if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
901  AliasResult SrcAA, DestAA;
902 
904  Loc)) == MustAlias)
905  // Loc is exactly the memcpy source thus disjoint from memcpy dest.
906  return ModRefInfo::Ref;
907  if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
908  Loc)) == MustAlias)
909  // The converse case.
910  return ModRefInfo::Mod;
911 
912  // It's also possible for Loc to alias both src and dest, or neither.
914  if (SrcAA != NoAlias)
915  rv = setRef(rv);
916  if (DestAA != NoAlias)
917  rv = setMod(rv);
918  return rv;
919  }
920 
921  // While the assume intrinsic is marked as arbitrarily writing so that
922  // proper control dependencies will be maintained, it never aliases any
923  // particular memory location.
924  if (isIntrinsicCall(CS, Intrinsic::assume))
925  return ModRefInfo::NoModRef;
926 
927  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
928  // that proper control dependencies are maintained but they never mods any
929  // particular memory location.
930  //
931  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
932  // heap state at the point the guard is issued needs to be consistent in case
933  // the guard invokes the "deopt" continuation.
934  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
935  return ModRefInfo::Ref;
936 
937  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
938  // writing so that proper control dependencies are maintained but they never
939  // mod any particular memory location visible to the IR.
940  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
941  // intrinsic is now modeled as reading memory. This prevents hoisting the
942  // invariant.start intrinsic over stores. Consider:
943  // *ptr = 40;
944  // *ptr = 50;
945  // invariant_start(ptr)
946  // int val = *ptr;
947  // print(val);
948  //
949  // This cannot be transformed to:
950  //
951  // *ptr = 40;
952  // invariant_start(ptr)
953  // *ptr = 50;
954  // int val = *ptr;
955  // print(val);
956  //
957  // The transformation will cause the second store to be ignored (based on
958  // rules of invariant.start) and print 40, while the first program always
959  // prints 50.
960  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
961  return ModRefInfo::Ref;
962 
963  // The AAResultBase base class has some smarts, lets use them.
964  return AAResultBase::getModRefInfo(CS, Loc);
965 }
966 
968  ImmutableCallSite CS2) {
969  // While the assume intrinsic is marked as arbitrarily writing so that
970  // proper control dependencies will be maintained, it never aliases any
971  // particular memory location.
972  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
973  isIntrinsicCall(CS2, Intrinsic::assume))
974  return ModRefInfo::NoModRef;
975 
976  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
977  // that proper control dependencies are maintained but they never mod any
978  // particular memory location.
979  //
980  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
981  // heap state at the point the guard is issued needs to be consistent in case
982  // the guard invokes the "deopt" continuation.
983 
984  // NB! This function is *not* commutative, so we specical case two
985  // possibilities for guard intrinsics.
986 
987  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
991 
992  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
996 
997  // The AAResultBase base class has some smarts, lets use them.
998  return AAResultBase::getModRefInfo(CS1, CS2);
999 }
1000 
1001 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1002 /// both having the exact same pointer operand.
1004  LocationSize MaybeV1Size,
1005  const GEPOperator *GEP2,
1006  LocationSize MaybeV2Size,
1007  const DataLayout &DL) {
1010  GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1011  "Expected GEPs with the same pointer operand");
1012 
1013  // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1014  // such that the struct field accesses provably cannot alias.
1015  // We also need at least two indices (the pointer, and the struct field).
1016  if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1017  GEP1->getNumIndices() < 2)
1018  return MayAlias;
1019 
1020  // If we don't know the size of the accesses through both GEPs, we can't
1021  // determine whether the struct fields accessed can't alias.
1022  if (MaybeV1Size == LocationSize::unknown() ||
1023  MaybeV2Size == LocationSize::unknown())
1024  return MayAlias;
1025 
1026  const uint64_t V1Size = MaybeV1Size.getValue();
1027  const uint64_t V2Size = MaybeV2Size.getValue();
1028 
1029  ConstantInt *C1 =
1030  dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1031  ConstantInt *C2 =
1032  dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1033 
1034  // If the last (struct) indices are constants and are equal, the other indices
1035  // might be also be dynamically equal, so the GEPs can alias.
1036  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
1037  return MayAlias;
1038 
1039  // Find the last-indexed type of the GEP, i.e., the type you'd get if
1040  // you stripped the last index.
1041  // On the way, look at each indexed type. If there's something other
1042  // than an array, different indices can lead to different final types.
1043  SmallVector<Value *, 8> IntermediateIndices;
1044 
1045  // Insert the first index; we don't need to check the type indexed
1046  // through it as it only drops the pointer indirection.
1047  assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1048  IntermediateIndices.push_back(GEP1->getOperand(1));
1049 
1050  // Insert all the remaining indices but the last one.
1051  // Also, check that they all index through arrays.
1052  for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1053  if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1054  GEP1->getSourceElementType(), IntermediateIndices)))
1055  return MayAlias;
1056  IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1057  }
1058 
1060  GEP1->getSourceElementType(), IntermediateIndices);
1061  StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1062 
1063  if (isa<SequentialType>(Ty)) {
1064  // We know that:
1065  // - both GEPs begin indexing from the exact same pointer;
1066  // - the last indices in both GEPs are constants, indexing into a sequential
1067  // type (array or pointer);
1068  // - both GEPs only index through arrays prior to that.
1069  //
1070  // Because array indices greater than the number of elements are valid in
1071  // GEPs, unless we know the intermediate indices are identical between
1072  // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1073  // partially overlap. We also need to check that the loaded size matches
1074  // the element size, otherwise we could still have overlap.
1075  const uint64_t ElementSize =
1076  DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1077  if (V1Size != ElementSize || V2Size != ElementSize)
1078  return MayAlias;
1079 
1080  for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1081  if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1082  return MayAlias;
1083 
1084  // Now we know that the array/pointer that GEP1 indexes into and that
1085  // that GEP2 indexes into must either precisely overlap or be disjoint.
1086  // Because they cannot partially overlap and because fields in an array
1087  // cannot overlap, if we can prove the final indices are different between
1088  // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1089 
1090  // If the last indices are constants, we've already checked they don't
1091  // equal each other so we can exit early.
1092  if (C1 && C2)
1093  return NoAlias;
1094  {
1095  Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1096  Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1097  if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1098  // If one of the indices is a PHI node, be safe and only use
1099  // computeKnownBits so we don't make any assumptions about the
1100  // relationships between the two indices. This is important if we're
1101  // asking about values from different loop iterations. See PR32314.
1102  // TODO: We may be able to change the check so we only do this when
1103  // we definitely looked through a PHINode.
1104  if (GEP1LastIdx != GEP2LastIdx &&
1105  GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1106  KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1107  KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1108  if (Known1.Zero.intersects(Known2.One) ||
1109  Known1.One.intersects(Known2.Zero))
1110  return NoAlias;
1111  }
1112  } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1113  return NoAlias;
1114  }
1115  return MayAlias;
1116  } else if (!LastIndexedStruct || !C1 || !C2) {
1117  return MayAlias;
1118  }
1119 
1120  // We know that:
1121  // - both GEPs begin indexing from the exact same pointer;
1122  // - the last indices in both GEPs are constants, indexing into a struct;
1123  // - said indices are different, hence, the pointed-to fields are different;
1124  // - both GEPs only index through arrays prior to that.
1125  //
1126  // This lets us determine that the struct that GEP1 indexes into and the
1127  // struct that GEP2 indexes into must either precisely overlap or be
1128  // completely disjoint. Because they cannot partially overlap, indexing into
1129  // different non-overlapping fields of the struct will never alias.
1130 
1131  // Therefore, the only remaining thing needed to show that both GEPs can't
1132  // alias is that the fields are not overlapping.
1133  const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1134  const uint64_t StructSize = SL->getSizeInBytes();
1135  const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1136  const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1137 
1138  auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1139  uint64_t V2Off, uint64_t V2Size) {
1140  return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1141  ((V2Off + V2Size <= StructSize) ||
1142  (V2Off + V2Size - StructSize <= V1Off));
1143  };
1144 
1145  if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1146  EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1147  return NoAlias;
1148 
1149  return MayAlias;
1150 }
1151 
1152 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1153 // beginning of the object the GEP points would have a negative offset with
1154 // repsect to the alloca, that means the GEP can not alias pointer (b).
1155 // Note that the pointer based on the alloca may not be a GEP. For
1156 // example, it may be the alloca itself.
1157 // The same applies if (b) is based on a GlobalVariable. Note that just being
1158 // based on isIdentifiedObject() is not enough - we need an identified object
1159 // that does not permit access to negative offsets. For example, a negative
1160 // offset from a noalias argument or call can be inbounds w.r.t the actual
1161 // underlying object.
1162 //
1163 // For example, consider:
1164 //
1165 // struct { int f0, int f1, ...} foo;
1166 // foo alloca;
1167 // foo* random = bar(alloca);
1168 // int *f0 = &alloca.f0
1169 // int *f1 = &random->f1;
1170 //
1171 // Which is lowered, approximately, to:
1172 //
1173 // %alloca = alloca %struct.foo
1174 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1175 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1176 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1177 //
1178 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1179 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1180 // point into the same object. But since %f0 points to the beginning of %alloca,
1181 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1182 // than (%alloca - 1), and so is not inbounds, a contradiction.
1183 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1184  const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1185  LocationSize MaybeObjectAccessSize) {
1186  // If the object access size is unknown, or the GEP isn't inbounds, bail.
1187  if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1188  return false;
1189 
1190  const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1191 
1192  // We need the object to be an alloca or a globalvariable, and want to know
1193  // the offset of the pointer from the object precisely, so no variable
1194  // indices are allowed.
1195  if (!(isa<AllocaInst>(DecompObject.Base) ||
1196  isa<GlobalVariable>(DecompObject.Base)) ||
1197  !DecompObject.VarIndices.empty())
1198  return false;
1199 
1200  int64_t ObjectBaseOffset = DecompObject.StructOffset +
1201  DecompObject.OtherOffset;
1202 
1203  // If the GEP has no variable indices, we know the precise offset
1204  // from the base, then use it. If the GEP has variable indices,
1205  // we can't get exact GEP offset to identify pointer alias. So return
1206  // false in that case.
1207  if (!DecompGEP.VarIndices.empty())
1208  return false;
1209  int64_t GEPBaseOffset = DecompGEP.StructOffset;
1210  GEPBaseOffset += DecompGEP.OtherOffset;
1211 
1212  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1213 }
1214 
1215 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1216 /// another pointer.
1217 ///
1218 /// We know that V1 is a GEP, but we don't know anything about V2.
1219 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1220 /// V2.
1222 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1223  const AAMDNodes &V1AAInfo, const Value *V2,
1224  LocationSize V2Size, const AAMDNodes &V2AAInfo,
1225  const Value *UnderlyingV1, const Value *UnderlyingV2) {
1226  DecomposedGEP DecompGEP1, DecompGEP2;
1227  bool GEP1MaxLookupReached =
1228  DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1229  bool GEP2MaxLookupReached =
1230  DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1231 
1232  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1233  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1234 
1235  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1236  "DecomposeGEPExpression returned a result different from "
1237  "GetUnderlyingObject");
1238 
1239  // If the GEP's offset relative to its base is such that the base would
1240  // fall below the start of the object underlying V2, then the GEP and V2
1241  // cannot alias.
1242  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1243  isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1244  return NoAlias;
1245  // If we have two gep instructions with must-alias or not-alias'ing base
1246  // pointers, figure out if the indexes to the GEP tell us anything about the
1247  // derived pointer.
1248  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1249  // Check for the GEP base being at a negative offset, this time in the other
1250  // direction.
1251  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1252  isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1253  return NoAlias;
1254  // Do the base pointers alias?
1255  AliasResult BaseAlias =
1256  aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1257  UnderlyingV2, LocationSize::unknown(), AAMDNodes());
1258 
1259  // Check for geps of non-aliasing underlying pointers where the offsets are
1260  // identical.
1261  if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1262  // Do the base pointers alias assuming type and size.
1263  AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1264  UnderlyingV2, V2Size, V2AAInfo);
1265  if (PreciseBaseAlias == NoAlias) {
1266  // See if the computed offset from the common pointer tells us about the
1267  // relation of the resulting pointer.
1268  // If the max search depth is reached the result is undefined
1269  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1270  return MayAlias;
1271 
1272  // Same offsets.
1273  if (GEP1BaseOffset == GEP2BaseOffset &&
1274  DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1275  return NoAlias;
1276  }
1277  }
1278 
1279  // If we get a No or May, then return it immediately, no amount of analysis
1280  // will improve this situation.
1281  if (BaseAlias != MustAlias) {
1282  assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1283  return BaseAlias;
1284  }
1285 
1286  // Otherwise, we have a MustAlias. Since the base pointers alias each other
1287  // exactly, see if the computed offset from the common pointer tells us
1288  // about the relation of the resulting pointer.
1289  // If we know the two GEPs are based off of the exact same pointer (and not
1290  // just the same underlying object), see if that tells us anything about
1291  // the resulting pointers.
1293  GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1294  GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1295  AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1296  // If we couldn't find anything interesting, don't abandon just yet.
1297  if (R != MayAlias)
1298  return R;
1299  }
1300 
1301  // If the max search depth is reached, the result is undefined
1302  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1303  return MayAlias;
1304 
1305  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1306  // symbolic difference.
1307  GEP1BaseOffset -= GEP2BaseOffset;
1308  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1309 
1310  } else {
1311  // Check to see if these two pointers are related by the getelementptr
1312  // instruction. If one pointer is a GEP with a non-zero index of the other
1313  // pointer, we know they cannot alias.
1314 
1315  // If both accesses are unknown size, we can't do anything useful here.
1316  if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1317  return MayAlias;
1318 
1319  AliasResult R =
1320  aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), V2,
1321  LocationSize::unknown(), V2AAInfo, nullptr, UnderlyingV2);
1322  if (R != MustAlias) {
1323  // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1324  // If V2 is known not to alias GEP base pointer, then the two values
1325  // cannot alias per GEP semantics: "Any memory access must be done through
1326  // a pointer value associated with an address range of the memory access,
1327  // otherwise the behavior is undefined.".
1328  assert(R == NoAlias || R == MayAlias);
1329  return R;
1330  }
1331 
1332  // If the max search depth is reached the result is undefined
1333  if (GEP1MaxLookupReached)
1334  return MayAlias;
1335  }
1336 
1337  // In the two GEP Case, if there is no difference in the offsets of the
1338  // computed pointers, the resultant pointers are a must alias. This
1339  // happens when we have two lexically identical GEP's (for example).
1340  //
1341  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1342  // must aliases the GEP, the end result is a must alias also.
1343  if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1344  return MustAlias;
1345 
1346  // If there is a constant difference between the pointers, but the difference
1347  // is less than the size of the associated memory object, then we know
1348  // that the objects are partially overlapping. If the difference is
1349  // greater, we know they do not overlap.
1350  if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1351  if (GEP1BaseOffset >= 0) {
1352  if (V2Size != LocationSize::unknown()) {
1353  if ((uint64_t)GEP1BaseOffset < V2Size.getValue())
1354  return PartialAlias;
1355  return NoAlias;
1356  }
1357  } else {
1358  // We have the situation where:
1359  // + +
1360  // | BaseOffset |
1361  // ---------------->|
1362  // |-->V1Size |-------> V2Size
1363  // GEP1 V2
1364  // We need to know that V2Size is not unknown, otherwise we might have
1365  // stripped a gep with negative index ('gep <ptr>, -1, ...).
1366  if (V1Size != LocationSize::unknown() &&
1367  V2Size != LocationSize::unknown()) {
1368  if (-(uint64_t)GEP1BaseOffset < V1Size.getValue())
1369  return PartialAlias;
1370  return NoAlias;
1371  }
1372  }
1373  }
1374 
1375  if (!DecompGEP1.VarIndices.empty()) {
1376  uint64_t Modulo = 0;
1377  bool AllPositive = true;
1378  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1379 
1380  // Try to distinguish something like &A[i][1] against &A[42][0].
1381  // Grab the least significant bit set in any of the scales. We
1382  // don't need std::abs here (even if the scale's negative) as we'll
1383  // be ^'ing Modulo with itself later.
1384  Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1385 
1386  if (AllPositive) {
1387  // If the Value could change between cycles, then any reasoning about
1388  // the Value this cycle may not hold in the next cycle. We'll just
1389  // give up if we can't determine conditions that hold for every cycle:
1390  const Value *V = DecompGEP1.VarIndices[i].V;
1391 
1392  KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1393  bool SignKnownZero = Known.isNonNegative();
1394  bool SignKnownOne = Known.isNegative();
1395 
1396  // Zero-extension widens the variable, and so forces the sign
1397  // bit to zero.
1398  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1399  SignKnownZero |= IsZExt;
1400  SignKnownOne &= !IsZExt;
1401 
1402  // If the variable begins with a zero then we know it's
1403  // positive, regardless of whether the value is signed or
1404  // unsigned.
1405  int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1406  AllPositive =
1407  (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1408  }
1409  }
1410 
1411  Modulo = Modulo ^ (Modulo & (Modulo - 1));
1412 
1413  // We can compute the difference between the two addresses
1414  // mod Modulo. Check whether that difference guarantees that the
1415  // two locations do not alias.
1416  uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1417  if (V1Size != LocationSize::unknown() &&
1418  V2Size != LocationSize::unknown() && ModOffset >= V2Size.getValue() &&
1419  V1Size.getValue() <= Modulo - ModOffset)
1420  return NoAlias;
1421 
1422  // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1423  // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1424  // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1425  if (AllPositive && GEP1BaseOffset > 0 &&
1426  V2Size != LocationSize::unknown() &&
1427  V2Size.getValue() <= (uint64_t)GEP1BaseOffset)
1428  return NoAlias;
1429 
1430  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1431  GEP1BaseOffset, &AC, DT))
1432  return NoAlias;
1433  }
1434 
1435  // Statically, we can see that the base objects are the same, but the
1436  // pointers have dynamic offsets which we can't resolve. And none of our
1437  // little tricks above worked.
1438  return MayAlias;
1439 }
1440 
1442  // If the results agree, take it.
1443  if (A == B)
1444  return A;
1445  // A mix of PartialAlias and MustAlias is PartialAlias.
1446  if ((A == PartialAlias && B == MustAlias) ||
1447  (B == PartialAlias && A == MustAlias))
1448  return PartialAlias;
1449  // Otherwise, we don't know anything.
1450  return MayAlias;
1451 }
1452 
1453 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1454 /// against another.
1455 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1456  LocationSize SISize,
1457  const AAMDNodes &SIAAInfo,
1458  const Value *V2, LocationSize V2Size,
1459  const AAMDNodes &V2AAInfo,
1460  const Value *UnderV2) {
1461  // If the values are Selects with the same condition, we can do a more precise
1462  // check: just check for aliases between the values on corresponding arms.
1463  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1464  if (SI->getCondition() == SI2->getCondition()) {
1465  AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1466  SI2->getTrueValue(), V2Size, V2AAInfo);
1467  if (Alias == MayAlias)
1468  return MayAlias;
1469  AliasResult ThisAlias =
1470  aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1471  SI2->getFalseValue(), V2Size, V2AAInfo);
1472  return MergeAliasResults(ThisAlias, Alias);
1473  }
1474 
1475  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1476  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1477  AliasResult Alias =
1478  aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1479  SISize, SIAAInfo, UnderV2);
1480  if (Alias == MayAlias)
1481  return MayAlias;
1482 
1483  AliasResult ThisAlias =
1484  aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1485  UnderV2);
1486  return MergeAliasResults(ThisAlias, Alias);
1487 }
1488 
1489 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1490 /// another.
1491 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1492  const AAMDNodes &PNAAInfo, const Value *V2,
1493  LocationSize V2Size,
1494  const AAMDNodes &V2AAInfo,
1495  const Value *UnderV2) {
1496  // Track phi nodes we have visited. We use this information when we determine
1497  // value equivalence.
1498  VisitedPhiBBs.insert(PN->getParent());
1499 
1500  // If the values are PHIs in the same block, we can do a more precise
1501  // as well as efficient check: just check for aliases between the values
1502  // on corresponding edges.
1503  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1504  if (PN2->getParent() == PN->getParent()) {
1505  LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1506  MemoryLocation(V2, V2Size, V2AAInfo));
1507  if (PN > V2)
1508  std::swap(Locs.first, Locs.second);
1509  // Analyse the PHIs' inputs under the assumption that the PHIs are
1510  // NoAlias.
1511  // If the PHIs are May/MustAlias there must be (recursively) an input
1512  // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1513  // there must be an operation on the PHIs within the PHIs' value cycle
1514  // that causes a MayAlias.
1515  // Pretend the phis do not alias.
1516  AliasResult Alias = NoAlias;
1517  assert(AliasCache.count(Locs) &&
1518  "There must exist an entry for the phi node");
1519  AliasResult OrigAliasResult = AliasCache[Locs];
1520  AliasCache[Locs] = NoAlias;
1521 
1522  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1523  AliasResult ThisAlias =
1524  aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1525  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1526  V2Size, V2AAInfo);
1527  Alias = MergeAliasResults(ThisAlias, Alias);
1528  if (Alias == MayAlias)
1529  break;
1530  }
1531 
1532  // Reset if speculation failed.
1533  if (Alias != NoAlias)
1534  AliasCache[Locs] = OrigAliasResult;
1535 
1536  return Alias;
1537  }
1538 
1539  SmallVector<Value *, 4> V1Srcs;
1540  bool isRecursive = false;
1541  if (PV) {
1542  // If we have PhiValues then use it to get the underlying phi values.
1543  const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1544  // If we have more phi values than the search depth then return MayAlias
1545  // conservatively to avoid compile time explosion. The worst possible case
1546  // is if both sides are PHI nodes. In which case, this is O(m x n) time
1547  // where 'm' and 'n' are the number of PHI sources.
1548  if (PhiValueSet.size() > MaxLookupSearchDepth)
1549  return MayAlias;
1550  // Add the values to V1Srcs
1551  for (Value *PV1 : PhiValueSet) {
1552  if (EnableRecPhiAnalysis) {
1553  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1554  // Check whether the incoming value is a GEP that advances the pointer
1555  // result of this PHI node (e.g. in a loop). If this is the case, we
1556  // would recurse and always get a MayAlias. Handle this case specially
1557  // below.
1558  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1559  isa<ConstantInt>(PV1GEP->idx_begin())) {
1560  isRecursive = true;
1561  continue;
1562  }
1563  }
1564  }
1565  V1Srcs.push_back(PV1);
1566  }
1567  } else {
1568  // If we don't have PhiInfo then just look at the operands of the phi itself
1569  // FIXME: Remove this once we can guarantee that we have PhiInfo always
1570  SmallPtrSet<Value *, 4> UniqueSrc;
1571  for (Value *PV1 : PN->incoming_values()) {
1572  if (isa<PHINode>(PV1))
1573  // If any of the source itself is a PHI, return MayAlias conservatively
1574  // to avoid compile time explosion. The worst possible case is if both
1575  // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1576  // and 'n' are the number of PHI sources.
1577  return MayAlias;
1578 
1580  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1581  // Check whether the incoming value is a GEP that advances the pointer
1582  // result of this PHI node (e.g. in a loop). If this is the case, we
1583  // would recurse and always get a MayAlias. Handle this case specially
1584  // below.
1585  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1586  isa<ConstantInt>(PV1GEP->idx_begin())) {
1587  isRecursive = true;
1588  continue;
1589  }
1590  }
1591 
1592  if (UniqueSrc.insert(PV1).second)
1593  V1Srcs.push_back(PV1);
1594  }
1595  }
1596 
1597  // If V1Srcs is empty then that means that the phi has no underlying non-phi
1598  // value. This should only be possible in blocks unreachable from the entry
1599  // block, but return MayAlias just in case.
1600  if (V1Srcs.empty())
1601  return MayAlias;
1602 
1603  // If this PHI node is recursive, set the size of the accessed memory to
1604  // unknown to represent all the possible values the GEP could advance the
1605  // pointer to.
1606  if (isRecursive)
1607  PNSize = LocationSize::unknown();
1608 
1609  AliasResult Alias =
1610  aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1611  PNSize, PNAAInfo, UnderV2);
1612 
1613  // Early exit if the check of the first PHI source against V2 is MayAlias.
1614  // Other results are not possible.
1615  if (Alias == MayAlias)
1616  return MayAlias;
1617 
1618  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1619  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1620  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1621  Value *V = V1Srcs[i];
1622 
1623  AliasResult ThisAlias =
1624  aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1625  Alias = MergeAliasResults(ThisAlias, Alias);
1626  if (Alias == MayAlias)
1627  break;
1628  }
1629 
1630  return Alias;
1631 }
1632 
1633 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1634 /// array references.
1635 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1636  AAMDNodes V1AAInfo, const Value *V2,
1637  LocationSize V2Size, AAMDNodes V2AAInfo,
1638  const Value *O1, const Value *O2) {
1639  // If either of the memory references is empty, it doesn't matter what the
1640  // pointer values are.
1641  if (V1Size == 0 || V2Size == 0)
1642  return NoAlias;
1643 
1644  // Strip off any casts if they exist.
1647 
1648  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1649  // value for undef that aliases nothing in the program.
1650  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1651  return NoAlias;
1652 
1653  // Are we checking for alias of the same value?
1654  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1655  // different iterations. We must therefore make sure that this is not the
1656  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1657  // happen by looking at the visited phi nodes and making sure they cannot
1658  // reach the value.
1659  if (isValueEqualInPotentialCycles(V1, V2))
1660  return MustAlias;
1661 
1662  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1663  return NoAlias; // Scalars cannot alias each other
1664 
1665  // Figure out what objects these things are pointing to if we can.
1666  if (O1 == nullptr)
1667  O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1668 
1669  if (O2 == nullptr)
1670  O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1671 
1672  // Null values in the default address space don't point to any object, so they
1673  // don't alias any other pointer.
1674  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1675  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1676  return NoAlias;
1677  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1678  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1679  return NoAlias;
1680 
1681  if (O1 != O2) {
1682  // If V1/V2 point to two different objects, we know that we have no alias.
1683  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1684  return NoAlias;
1685 
1686  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1687  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1688  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1689  return NoAlias;
1690 
1691  // Function arguments can't alias with things that are known to be
1692  // unambigously identified at the function level.
1693  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1694  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1695  return NoAlias;
1696 
1697  // If one pointer is the result of a call/invoke or load and the other is a
1698  // non-escaping local object within the same function, then we know the
1699  // object couldn't escape to a point where the call could return it.
1700  //
1701  // Note that if the pointers are in different functions, there are a
1702  // variety of complications. A call with a nocapture argument may still
1703  // temporary store the nocapture argument's value in a temporary memory
1704  // location if that memory location doesn't escape. Or it may pass a
1705  // nocapture value to other functions as long as they don't capture it.
1707  return NoAlias;
1709  return NoAlias;
1710  }
1711 
1712  // If the size of one access is larger than the entire object on the other
1713  // side, then we know such behavior is undefined and can assume no alias.
1714  bool NullIsValidLocation = NullPointerIsDefined(&F);
1715  if ((V1Size.isPrecise() && isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI,
1716  NullIsValidLocation)) ||
1717  (V2Size.isPrecise() && isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI,
1718  NullIsValidLocation)))
1719  return NoAlias;
1720 
1721  // Check the cache before climbing up use-def chains. This also terminates
1722  // otherwise infinitely recursive queries.
1723  LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1724  MemoryLocation(V2, V2Size, V2AAInfo));
1725  if (V1 > V2)
1726  std::swap(Locs.first, Locs.second);
1727  std::pair<AliasCacheTy::iterator, bool> Pair =
1728  AliasCache.insert(std::make_pair(Locs, MayAlias));
1729  if (!Pair.second)
1730  return Pair.first->second;
1731 
1732  // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1733  // GEP can't simplify, we don't even look at the PHI cases.
1734  if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1735  std::swap(V1, V2);
1736  std::swap(V1Size, V2Size);
1737  std::swap(O1, O2);
1738  std::swap(V1AAInfo, V2AAInfo);
1739  }
1740  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1742  aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1743  if (Result != MayAlias)
1744  return AliasCache[Locs] = Result;
1745  }
1746 
1747  if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1748  std::swap(V1, V2);
1749  std::swap(O1, O2);
1750  std::swap(V1Size, V2Size);
1751  std::swap(V1AAInfo, V2AAInfo);
1752  }
1753  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1754  AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1755  V2, V2Size, V2AAInfo, O2);
1756  if (Result != MayAlias)
1757  return AliasCache[Locs] = Result;
1758  }
1759 
1760  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1761  std::swap(V1, V2);
1762  std::swap(O1, O2);
1763  std::swap(V1Size, V2Size);
1764  std::swap(V1AAInfo, V2AAInfo);
1765  }
1766  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1768  aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1769  if (Result != MayAlias)
1770  return AliasCache[Locs] = Result;
1771  }
1772 
1773  // If both pointers are pointing into the same object and one of them
1774  // accesses the entire object, then the accesses must overlap in some way.
1775  if (O1 == O2)
1776  if (V1Size.isPrecise() && V2Size.isPrecise() &&
1777  (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1778  isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1779  return AliasCache[Locs] = PartialAlias;
1780 
1781  // Recurse back into the best AA results we have, potentially with refined
1782  // memory locations. We have already ensured that BasicAA has a MayAlias
1783  // cache result for these, so any recursion back into BasicAA won't loop.
1784  AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1785  return AliasCache[Locs] = Result;
1786 }
1787 
1788 /// Check whether two Values can be considered equivalent.
1789 ///
1790 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1791 /// they can not be part of a cycle in the value graph by looking at all
1792 /// visited phi nodes an making sure that the phis cannot reach the value. We
1793 /// have to do this because we are looking through phi nodes (That is we say
1794 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1795 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1796  const Value *V2) {
1797  if (V != V2)
1798  return false;
1799 
1800  const Instruction *Inst = dyn_cast<Instruction>(V);
1801  if (!Inst)
1802  return true;
1803 
1804  if (VisitedPhiBBs.empty())
1805  return true;
1806 
1807  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1808  return false;
1809 
1810  // Make sure that the visited phis cannot reach the Value. This ensures that
1811  // the Values cannot come from different iterations of a potential cycle the
1812  // phi nodes could be involved in.
1813  for (auto *P : VisitedPhiBBs)
1814  if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1815  return false;
1816 
1817  return true;
1818 }
1819 
1820 /// Computes the symbolic difference between two de-composed GEPs.
1821 ///
1822 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1823 /// instructions GEP1 and GEP2 which have common base pointers.
1824 void BasicAAResult::GetIndexDifference(
1826  const SmallVectorImpl<VariableGEPIndex> &Src) {
1827  if (Src.empty())
1828  return;
1829 
1830  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1831  const Value *V = Src[i].V;
1832  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1833  int64_t Scale = Src[i].Scale;
1834 
1835  // Find V in Dest. This is N^2, but pointer indices almost never have more
1836  // than a few variable indexes.
1837  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1838  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1839  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1840  continue;
1841 
1842  // If we found it, subtract off Scale V's from the entry in Dest. If it
1843  // goes to zero, remove the entry.
1844  if (Dest[j].Scale != Scale)
1845  Dest[j].Scale -= Scale;
1846  else
1847  Dest.erase(Dest.begin() + j);
1848  Scale = 0;
1849  break;
1850  }
1851 
1852  // If we didn't consume this entry, add it to the end of the Dest list.
1853  if (Scale) {
1854  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1855  Dest.push_back(Entry);
1856  }
1857  }
1858 }
1859 
1860 bool BasicAAResult::constantOffsetHeuristic(
1861  const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1862  LocationSize MaybeV1Size, LocationSize MaybeV2Size, int64_t BaseOffset,
1863  AssumptionCache *AC, DominatorTree *DT) {
1864  if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1865  MaybeV2Size == LocationSize::unknown())
1866  return false;
1867 
1868  const uint64_t V1Size = MaybeV1Size.getValue();
1869  const uint64_t V2Size = MaybeV2Size.getValue();
1870 
1871  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1872 
1873  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1874  Var0.Scale != -Var1.Scale)
1875  return false;
1876 
1877  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1878 
1879  // We'll strip off the Extensions of Var0 and Var1 and do another round
1880  // of GetLinearExpression decomposition. In the example above, if Var0
1881  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1882 
1883  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1884  V1Offset(Width, 0);
1885  bool NSW = true, NUW = true;
1886  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1887  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1888  V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1889  NSW = true;
1890  NUW = true;
1891  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1892  V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1893 
1894  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1895  V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1896  return false;
1897 
1898  // We have a hit - Var0 and Var1 only differ by a constant offset!
1899 
1900  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1901  // Var1 is possible to calculate, but we're just interested in the absolute
1902  // minimum difference between the two. The minimum distance may occur due to
1903  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1904  // the minimum distance between %i and %i + 5 is 3.
1905  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1906  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1907  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1908 
1909  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1910  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1911  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1912  // V2Size can fit in the MinDiffBytes gap.
1913  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1914  V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1915 }
1916 
1917 //===----------------------------------------------------------------------===//
1918 // BasicAliasAnalysis Pass
1919 //===----------------------------------------------------------------------===//
1920 
1921 AnalysisKey BasicAA::Key;
1922 
1924  return BasicAAResult(F.getParent()->getDataLayout(),
1925  F,
1931 }
1932 
1935 }
1936 
1937 char BasicAAWrapperPass::ID = 0;
1938 
1939 void BasicAAWrapperPass::anchor() {}
1940 
1942  "Basic Alias Analysis (stateless AA impl)", false, true)
1947  "Basic Alias Analysis (stateless AA impl)", false, true)
1948 
1950  return new BasicAAWrapperPass();
1951 }
1952 
1954  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1955  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1956  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1957  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1958  auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1959 
1960  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
1961  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1962  LIWP ? &LIWP->getLoopInfo() : nullptr,
1963  PVWP ? &PVWP->getResult() : nullptr));
1964 
1965  return false;
1966 }
1967 
1969  AU.setPreservesAll();
1974 }
1975 
1977  return BasicAAResult(
1978  F.getParent()->getDataLayout(),
1979  F,
1981  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1982 }
The access may reference and may modify the value stored in memory.
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:81
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:468
static const unsigned MaxLookupSearchDepth
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Basic Alias Analysis(stateless AA impl)"
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:660
The access neither references nor modifies the value stored in memory.
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1557
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:834
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:770
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
#define LLVM_FALLTHROUGH
Definition: Compiler.h:86
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:471
static constexpr LocationSize unknown()
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
bool isPrecise() const
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can&#39;t be evaluated...
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
const Value * getTrueValue() const
The two locations do not alias at all.
Definition: AliasAnalysis.h:85
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:486
A cache of @llvm.assume calls within a function.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables...
This is the AA result object for the basic, local, and stateless alias analysis.
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:811
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:360
STATISTIC(NumFunctions, "Total number of functions")
F(f)
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:231
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
block Block Frequency true
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
Definition: PhiValues.cpp:114
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
The analysis pass which yields a PhiValues.
Definition: PhiValues.h:119
op_iterator op_begin()
Definition: User.h:230
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1503
This indicates that the function could not be classified into one of the behaviors above...
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
Definition: CallSite.h:454
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:196
AnalysisUsage & addRequired()
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:521
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:364
Class to represent struct types.
Definition: DerivedTypes.h:201
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:892
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
Definition: Operator.h:478
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
The access may reference the value stored in memory.
This file contains the simple types necessary to represent the attributes associated with functions a...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if &#39;V & Mask&#39; is known to be zero.
InstrTy * getInstruction() const
Definition: CallSite.h:92
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:939
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
This file implements a class to represent arbitrary precision integral constant values and operations...
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: Function.h:492
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
place backedge safepoints impl
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
IterTy data_operands_end() const
Definition: CallSite.h:249
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1569
void shrink_and_clear()
Definition: DenseMap.h:1056
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
unsigned getNumIndices() const
Definition: Operator.h:490
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:451
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool has(LibFunc F) const
Tests whether a library function is available.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:377
Wrapper pass for the legacy pass manager.
Definition: PhiValues.h:142
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:145
Value * getOperand(unsigned i) const
Definition: User.h:170
AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:79
BasicAAResult(const DataLayout &DL, const Function &F, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, PhiValues *PV=nullptr)
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, LocationSize MaybeV1Size, const GEPOperator *GEP2, LocationSize MaybeV2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:151
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition: Function.h:460
uint64_t getValue() const
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:154
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:232
bool hasOperandBundles() const
Definition: CallSite.h:535
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
Definition: Operator.h:467
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction &#39;To&#39; is reachable from &#39;From&#39;, returning true if uncertain.
Definition: CFG.cpp:187
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
const Value * getCondition() const
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:593
iterator erase(const_iterator CI)
Definition: SmallVector.h:445
unsigned getNumArgOperands() const
Definition: CallSite.h:293
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
size_t size() const
Definition: SmallVector.h:53
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2109
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:87
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
size_type size() const
Definition: SmallPtrSet.h:93
The two locations precisely alias each other.
Definition: AliasAnalysis.h:91
A function analysis which provides an AssumptionCache.
unsigned getNumOperands() const
Definition: User.h:192
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
Definition: CallSite.h:462
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
Provides information about what library functions are available for the current target.
A constant pointer value that points to null.
Definition: Constants.h:535
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:644
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: CallSite.h:480
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:381
uint64_t getSizeInBytes() const
Definition: DataLayout.h:529
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
Definition: CallSite.h:245
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Value * stripPointerCastsAndInvariantGroups() const
Strip off pointer casts, all-zero GEPs, aliases and invariant group info.
Definition: Value.cpp:551
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1440
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
The access may modify the value stored in memory.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
Class for arbitrary precision integers.
Definition: APInt.h:70
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
const Value * getArgumentAliasingToReturnedPointer(ImmutableCallSite CS)
This function returns call pointer argument that is considered the same by aliasing rules...
const Value * getFalseValue() const
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:446
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1315
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size. ...
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:543
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:482
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
This file provides utility analysis objects describing memory locations.
Establish a view to a call site for examination.
Definition: CallSite.h:714
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
#define I(x, y, z)
Definition: MD5.cpp:58
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1213
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: Function.h:485
iterator end()
Definition: DenseMap.h:84
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
Definition: Function.h:476
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:789
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: Function.h:501
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:146
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:598
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:642
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:107
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:411
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
static const Function * getParent(const Value *V)
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
This is the interface for LLVM&#39;s primary stateless and local alias analysis.
Various options to control the behavior of getObjectSize.
Type * getSourceElementType() const
Definition: Operator.cpp:23
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:260
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", "Basic Alias Analysis (stateless AA impl)", false, true) INITIALIZE_PASS_END(BasicAAWrapperPass
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:89
op_range incoming_values()
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:71
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:274
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void initializeBasicAAWrapperPassPass(PassRegistry &)
const BasicBlock * getParent() const
Definition: Instruction.h:67
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: CallSite.h:489
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.