LLVM  4.0.0
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Pass.h"
39 #include <algorithm>
40 
41 #define DEBUG_TYPE "basicaa"
42 
43 using namespace llvm;
44 
45 /// Enable analysis of recursive PHI nodes.
46 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
47  cl::init(false));
48 /// SearchLimitReached / SearchTimes shows how often the limit of
49 /// to decompose GEPs is reached. It will affect the precision
50 /// of basic alias analysis.
51 STATISTIC(SearchLimitReached, "Number of times the limit to "
52  "decompose GEPs is reached");
53 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
54 
55 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
56 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
57 /// careful with value equivalence. We use reachability to make sure a value
58 /// cannot be involved in a cycle.
60 
61 // The max limit of the search depth in DecomposeGEPExpression() and
62 // GetUnderlyingObject(), both functions need to use the same search
63 // depth otherwise the algorithm in aliasGEP will assert.
64 static const unsigned MaxLookupSearchDepth = 6;
65 
68  // We don't care if this analysis itself is preserved, it has no state. But
69  // we need to check that the analyses it depends on have been. Note that we
70  // may be created without handles to some analyses and in that case don't
71  // depend on them.
72  if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
73  (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
74  (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
75  return true;
76 
77  // Otherwise this analysis result remains valid.
78  return false;
79 }
80 
81 //===----------------------------------------------------------------------===//
82 // Useful predicates
83 //===----------------------------------------------------------------------===//
84 
85 /// Returns true if the pointer is to a function-local object that never
86 /// escapes from the function.
87 static bool isNonEscapingLocalObject(const Value *V) {
88  // If this is a local allocation, check to see if it escapes.
89  if (isa<AllocaInst>(V) || isNoAliasCall(V))
90  // Set StoreCaptures to True so that we can assume in our callers that the
91  // pointer is not the result of a load instruction. Currently
92  // PointerMayBeCaptured doesn't have any special analysis for the
93  // StoreCaptures=false case; if it did, our callers could be refined to be
94  // more precise.
95  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
96 
97  // If this is an argument that corresponds to a byval or noalias argument,
98  // then it has not escaped before entering the function. Check if it escapes
99  // inside the function.
100  if (const Argument *A = dyn_cast<Argument>(V))
101  if (A->hasByValAttr() || A->hasNoAliasAttr())
102  // Note even if the argument is marked nocapture, we still need to check
103  // for copies made inside the function. The nocapture attribute only
104  // specifies that there are no copies made that outlive the function.
105  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
106 
107  return false;
108 }
109 
110 /// Returns true if the pointer is one which would have been considered an
111 /// escape by isNonEscapingLocalObject.
112 static bool isEscapeSource(const Value *V) {
113  if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
114  return true;
115 
116  // The load case works because isNonEscapingLocalObject considers all
117  // stores to be escapes (it passes true for the StoreCaptures argument
118  // to PointerMayBeCaptured).
119  if (isa<LoadInst>(V))
120  return true;
121 
122  return false;
123 }
124 
125 /// Returns the size of the object specified by V or UnknownSize if unknown.
126 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
127  const TargetLibraryInfo &TLI,
128  bool RoundToAlign = false) {
129  uint64_t Size;
130  if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
131  return Size;
133 }
134 
135 /// Returns true if we can prove that the object specified by V is smaller than
136 /// Size.
137 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
138  const DataLayout &DL,
139  const TargetLibraryInfo &TLI) {
140  // Note that the meanings of the "object" are slightly different in the
141  // following contexts:
142  // c1: llvm::getObjectSize()
143  // c2: llvm.objectsize() intrinsic
144  // c3: isObjectSmallerThan()
145  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
146  // refers to the "entire object".
147  //
148  // Consider this example:
149  // char *p = (char*)malloc(100)
150  // char *q = p+80;
151  //
152  // In the context of c1 and c2, the "object" pointed by q refers to the
153  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
154  //
155  // However, in the context of c3, the "object" refers to the chunk of memory
156  // being allocated. So, the "object" has 100 bytes, and q points to the middle
157  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
158  // parameter, before the llvm::getObjectSize() is called to get the size of
159  // entire object, we should:
160  // - either rewind the pointer q to the base-address of the object in
161  // question (in this case rewind to p), or
162  // - just give up. It is up to caller to make sure the pointer is pointing
163  // to the base address the object.
164  //
165  // We go for 2nd option for simplicity.
166  if (!isIdentifiedObject(V))
167  return false;
168 
169  // This function needs to use the aligned object size because we allow
170  // reads a bit past the end given sufficient alignment.
171  uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
172 
173  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
174 }
175 
176 /// Returns true if we can prove that the object specified by V has size Size.
177 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
178  const TargetLibraryInfo &TLI) {
179  uint64_t ObjectSize = getObjectSize(V, DL, TLI);
180  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
181 }
182 
183 //===----------------------------------------------------------------------===//
184 // GetElementPtr Instruction Decomposition and Analysis
185 //===----------------------------------------------------------------------===//
186 
187 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
188 /// B are constant integers.
189 ///
190 /// Returns the scale and offset values as APInts and return V as a Value*, and
191 /// return whether we looked through any sign or zero extends. The incoming
192 /// Value is known to have IntegerType, and it may already be sign or zero
193 /// extended.
194 ///
195 /// Note that this looks through extends, so the high bits may not be
196 /// represented in the result.
197 /*static*/ const Value *BasicAAResult::GetLinearExpression(
198  const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
199  unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
200  AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
201  assert(V->getType()->isIntegerTy() && "Not an integer value");
202 
203  // Limit our recursion depth.
204  if (Depth == 6) {
205  Scale = 1;
206  Offset = 0;
207  return V;
208  }
209 
210  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
211  // If it's a constant, just convert it to an offset and remove the variable.
212  // If we've been called recursively, the Offset bit width will be greater
213  // than the constant's (the Offset's always as wide as the outermost call),
214  // so we'll zext here and process any extension in the isa<SExtInst> &
215  // isa<ZExtInst> cases below.
216  Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
217  assert(Scale == 0 && "Constant values don't have a scale");
218  return V;
219  }
220 
221  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
222  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
223 
224  // If we've been called recursively, then Offset and Scale will be wider
225  // than the BOp operands. We'll always zext it here as we'll process sign
226  // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
227  APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
228 
229  switch (BOp->getOpcode()) {
230  default:
231  // We don't understand this instruction, so we can't decompose it any
232  // further.
233  Scale = 1;
234  Offset = 0;
235  return V;
236  case Instruction::Or:
237  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
238  // analyze it.
239  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
240  BOp, DT)) {
241  Scale = 1;
242  Offset = 0;
243  return V;
244  }
246  case Instruction::Add:
247  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
248  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
249  Offset += RHS;
250  break;
251  case Instruction::Sub:
252  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
253  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
254  Offset -= RHS;
255  break;
256  case Instruction::Mul:
257  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
258  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
259  Offset *= RHS;
260  Scale *= RHS;
261  break;
262  case Instruction::Shl:
263  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
264  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
265  Offset <<= RHS.getLimitedValue();
266  Scale <<= RHS.getLimitedValue();
267  // the semantics of nsw and nuw for left shifts don't match those of
268  // multiplications, so we won't propagate them.
269  NSW = NUW = false;
270  return V;
271  }
272 
273  if (isa<OverflowingBinaryOperator>(BOp)) {
274  NUW &= BOp->hasNoUnsignedWrap();
275  NSW &= BOp->hasNoSignedWrap();
276  }
277  return V;
278  }
279  }
280 
281  // Since GEP indices are sign extended anyway, we don't care about the high
282  // bits of a sign or zero extended value - just scales and offsets. The
283  // extensions have to be consistent though.
284  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
285  Value *CastOp = cast<CastInst>(V)->getOperand(0);
286  unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
287  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
288  unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
289  const Value *Result =
290  GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
291  Depth + 1, AC, DT, NSW, NUW);
292 
293  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
294  // by just incrementing the number of bits we've extended by.
295  unsigned ExtendedBy = NewWidth - SmallWidth;
296 
297  if (isa<SExtInst>(V) && ZExtBits == 0) {
298  // sext(sext(%x, a), b) == sext(%x, a + b)
299 
300  if (NSW) {
301  // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
302  // into sext(%x) + sext(c). We'll sext the Offset ourselves:
303  unsigned OldWidth = Offset.getBitWidth();
304  Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
305  } else {
306  // We may have signed-wrapped, so don't decompose sext(%x + c) into
307  // sext(%x) + sext(c)
308  Scale = 1;
309  Offset = 0;
310  Result = CastOp;
311  ZExtBits = OldZExtBits;
312  SExtBits = OldSExtBits;
313  }
314  SExtBits += ExtendedBy;
315  } else {
316  // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
317 
318  if (!NUW) {
319  // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
320  // zext(%x) + zext(c)
321  Scale = 1;
322  Offset = 0;
323  Result = CastOp;
324  ZExtBits = OldZExtBits;
325  SExtBits = OldSExtBits;
326  }
327  ZExtBits += ExtendedBy;
328  }
329 
330  return Result;
331  }
332 
333  Scale = 1;
334  Offset = 0;
335  return V;
336 }
337 
338 /// To ensure a pointer offset fits in an integer of size PointerSize
339 /// (in bits) when that size is smaller than 64. This is an issue in
340 /// particular for 32b programs with negative indices that rely on two's
341 /// complement wrap-arounds for precise alias information.
342 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
343  assert(PointerSize <= 64 && "Invalid PointerSize!");
344  unsigned ShiftBits = 64 - PointerSize;
345  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
346 }
347 
348 /// If V is a symbolic pointer expression, decompose it into a base pointer
349 /// with a constant offset and a number of scaled symbolic offsets.
350 ///
351 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
352 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
353 /// specified amount, but which may have other unrepresented high bits. As
354 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
355 ///
356 /// When DataLayout is around, this function is capable of analyzing everything
357 /// that GetUnderlyingObject can look through. To be able to do that
358 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
359 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
360 /// through pointer casts.
361 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
362  DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
363  DominatorTree *DT) {
364  // Limit recursion depth to limit compile time in crazy cases.
365  unsigned MaxLookup = MaxLookupSearchDepth;
366  SearchTimes++;
367 
368  Decomposed.StructOffset = 0;
369  Decomposed.OtherOffset = 0;
370  Decomposed.VarIndices.clear();
371  do {
372  // See if this is a bitcast or GEP.
373  const Operator *Op = dyn_cast<Operator>(V);
374  if (!Op) {
375  // The only non-operator case we can handle are GlobalAliases.
376  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
377  if (!GA->isInterposable()) {
378  V = GA->getAliasee();
379  continue;
380  }
381  }
382  Decomposed.Base = V;
383  return false;
384  }
385 
386  if (Op->getOpcode() == Instruction::BitCast ||
387  Op->getOpcode() == Instruction::AddrSpaceCast) {
388  V = Op->getOperand(0);
389  continue;
390  }
391 
392  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
393  if (!GEPOp) {
394  if (auto CS = ImmutableCallSite(V))
395  if (const Value *RV = CS.getReturnedArgOperand()) {
396  V = RV;
397  continue;
398  }
399 
400  // If it's not a GEP, hand it off to SimplifyInstruction to see if it
401  // can come up with something. This matches what GetUnderlyingObject does.
402  if (const Instruction *I = dyn_cast<Instruction>(V))
403  // TODO: Get a DominatorTree and AssumptionCache and use them here
404  // (these are both now available in this function, but this should be
405  // updated when GetUnderlyingObject is updated). TLI should be
406  // provided also.
407  if (const Value *Simplified =
408  SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
409  V = Simplified;
410  continue;
411  }
412 
413  Decomposed.Base = V;
414  return false;
415  }
416 
417  // Don't attempt to analyze GEPs over unsized objects.
418  if (!GEPOp->getSourceElementType()->isSized()) {
419  Decomposed.Base = V;
420  return false;
421  }
422 
423  unsigned AS = GEPOp->getPointerAddressSpace();
424  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
425  gep_type_iterator GTI = gep_type_begin(GEPOp);
426  unsigned PointerSize = DL.getPointerSizeInBits(AS);
427  // Assume all GEP operands are constants until proven otherwise.
428  bool GepHasConstantOffset = true;
429  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
430  I != E; ++I, ++GTI) {
431  const Value *Index = *I;
432  // Compute the (potentially symbolic) offset in bytes for this index.
433  if (StructType *STy = GTI.getStructTypeOrNull()) {
434  // For a struct, add the member offset.
435  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
436  if (FieldNo == 0)
437  continue;
438 
439  Decomposed.StructOffset +=
440  DL.getStructLayout(STy)->getElementOffset(FieldNo);
441  continue;
442  }
443 
444  // For an array/pointer, add the element offset, explicitly scaled.
445  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
446  if (CIdx->isZero())
447  continue;
448  Decomposed.OtherOffset +=
449  DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
450  continue;
451  }
452 
453  GepHasConstantOffset = false;
454 
455  uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
456  unsigned ZExtBits = 0, SExtBits = 0;
457 
458  // If the integer type is smaller than the pointer size, it is implicitly
459  // sign extended to pointer size.
460  unsigned Width = Index->getType()->getIntegerBitWidth();
461  if (PointerSize > Width)
462  SExtBits += PointerSize - Width;
463 
464  // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
465  APInt IndexScale(Width, 0), IndexOffset(Width, 0);
466  bool NSW = true, NUW = true;
467  Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
468  SExtBits, DL, 0, AC, DT, NSW, NUW);
469 
470  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
471  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
472  Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
473  Scale *= IndexScale.getSExtValue();
474 
475  // If we already had an occurrence of this index variable, merge this
476  // scale into it. For example, we want to handle:
477  // A[x][x] -> x*16 + x*4 -> x*20
478  // This also ensures that 'x' only appears in the index list once.
479  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
480  if (Decomposed.VarIndices[i].V == Index &&
481  Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
482  Decomposed.VarIndices[i].SExtBits == SExtBits) {
483  Scale += Decomposed.VarIndices[i].Scale;
484  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
485  break;
486  }
487  }
488 
489  // Make sure that we have a scale that makes sense for this target's
490  // pointer size.
491  Scale = adjustToPointerSize(Scale, PointerSize);
492 
493  if (Scale) {
494  VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
495  static_cast<int64_t>(Scale)};
496  Decomposed.VarIndices.push_back(Entry);
497  }
498  }
499 
500  // Take care of wrap-arounds
501  if (GepHasConstantOffset) {
502  Decomposed.StructOffset =
503  adjustToPointerSize(Decomposed.StructOffset, PointerSize);
504  Decomposed.OtherOffset =
505  adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
506  }
507 
508  // Analyze the base pointer next.
509  V = GEPOp->getOperand(0);
510  } while (--MaxLookup);
511 
512  // If the chain of expressions is too deep, just return early.
513  Decomposed.Base = V;
514  SearchLimitReached++;
515  return true;
516 }
517 
518 /// Returns whether the given pointer value points to memory that is local to
519 /// the function, with global constants being considered local to all
520 /// functions.
522  bool OrLocal) {
523  assert(Visited.empty() && "Visited must be cleared after use!");
524 
525  unsigned MaxLookup = 8;
527  Worklist.push_back(Loc.Ptr);
528  do {
529  const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
530  if (!Visited.insert(V).second) {
531  Visited.clear();
532  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
533  }
534 
535  // An alloca instruction defines local memory.
536  if (OrLocal && isa<AllocaInst>(V))
537  continue;
538 
539  // A global constant counts as local memory for our purposes.
540  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
541  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
542  // global to be marked constant in some modules and non-constant in
543  // others. GV may even be a declaration, not a definition.
544  if (!GV->isConstant()) {
545  Visited.clear();
546  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
547  }
548  continue;
549  }
550 
551  // If both select values point to local memory, then so does the select.
552  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
553  Worklist.push_back(SI->getTrueValue());
554  Worklist.push_back(SI->getFalseValue());
555  continue;
556  }
557 
558  // If all values incoming to a phi node point to local memory, then so does
559  // the phi.
560  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
561  // Don't bother inspecting phi nodes with many operands.
562  if (PN->getNumIncomingValues() > MaxLookup) {
563  Visited.clear();
564  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
565  }
566  for (Value *IncValue : PN->incoming_values())
567  Worklist.push_back(IncValue);
568  continue;
569  }
570 
571  // Otherwise be conservative.
572  Visited.clear();
573  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
574 
575  } while (!Worklist.empty() && --MaxLookup);
576 
577  Visited.clear();
578  return Worklist.empty();
579 }
580 
581 /// Returns the behavior when calling the given call site.
583  if (CS.doesNotAccessMemory())
584  // Can't do better than this.
586 
588 
589  // If the callsite knows it only reads memory, don't return worse
590  // than that.
591  if (CS.onlyReadsMemory())
592  Min = FMRB_OnlyReadsMemory;
593  else if (CS.doesNotReadMemory())
595 
596  if (CS.onlyAccessesArgMemory())
598 
599  // If CS has operand bundles then aliasing attributes from the function it
600  // calls do not directly apply to the CallSite. This can be made more
601  // precise in the future.
602  if (!CS.hasOperandBundles())
603  if (const Function *F = CS.getCalledFunction())
604  Min =
606 
607  return Min;
608 }
609 
610 /// Returns the behavior when calling the given function. For use when the call
611 /// site is not known.
613  // If the function declares it doesn't access memory, we can't do better.
614  if (F->doesNotAccessMemory())
616 
618 
619  // If the function declares it only reads memory, go with that.
620  if (F->onlyReadsMemory())
621  Min = FMRB_OnlyReadsMemory;
622  else if (F->doesNotReadMemory())
624 
625  if (F->onlyAccessesArgMemory())
627  else if (F->onlyAccessesInaccessibleMemory())
631 
632  return Min;
633 }
634 
635 /// Returns true if this is a writeonly (i.e Mod only) parameter.
636 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
637  const TargetLibraryInfo &TLI) {
638  if (CS.paramHasAttr(ArgIdx + 1, Attribute::WriteOnly))
639  return true;
640 
641  // We can bound the aliasing properties of memset_pattern16 just as we can
642  // for memcpy/memset. This is particularly important because the
643  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
644  // whenever possible.
645  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
646  // attributes.
648  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
649  F == LibFunc::memset_pattern16 && TLI.has(F))
650  if (ArgIdx == 0)
651  return true;
652 
653  // TODO: memset_pattern4, memset_pattern8
654  // TODO: _chk variants
655  // TODO: strcmp, strcpy
656 
657  return false;
658 }
659 
661  unsigned ArgIdx) {
662 
663  // Checking for known builtin intrinsics and target library functions.
664  if (isWriteOnlyParam(CS, ArgIdx, TLI))
665  return MRI_Mod;
666 
667  if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadOnly))
668  return MRI_Ref;
669 
670  if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadNone))
671  return MRI_NoModRef;
672 
673  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
674 }
675 
678  return II && II->getIntrinsicID() == IID;
679 }
680 
681 #ifndef NDEBUG
682 static const Function *getParent(const Value *V) {
683  if (const Instruction *inst = dyn_cast<Instruction>(V))
684  return inst->getParent()->getParent();
685 
686  if (const Argument *arg = dyn_cast<Argument>(V))
687  return arg->getParent();
688 
689  return nullptr;
690 }
691 
692 static bool notDifferentParent(const Value *O1, const Value *O2) {
693 
694  const Function *F1 = getParent(O1);
695  const Function *F2 = getParent(O2);
696 
697  return !F1 || !F2 || F1 == F2;
698 }
699 #endif
700 
702  const MemoryLocation &LocB) {
703  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
704  "BasicAliasAnalysis doesn't support interprocedural queries.");
705 
706  // If we have a directly cached entry for these locations, we have recursed
707  // through this once, so just return the cached results. Notably, when this
708  // happens, we don't clear the cache.
709  auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
710  if (CacheIt != AliasCache.end())
711  return CacheIt->second;
712 
713  AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
714  LocB.Size, LocB.AATags);
715  // AliasCache rarely has more than 1 or 2 elements, always use
716  // shrink_and_clear so it quickly returns to the inline capacity of the
717  // SmallDenseMap if it ever grows larger.
718  // FIXME: This should really be shrink_to_inline_capacity_and_clear().
719  AliasCache.shrink_and_clear();
720  VisitedPhiBBs.clear();
721  return Alias;
722 }
723 
724 /// Checks to see if the specified callsite can clobber the specified memory
725 /// object.
726 ///
727 /// Since we only look at local properties of this function, we really can't
728 /// say much about this query. We do, however, use simple "address taken"
729 /// analysis on local objects.
731  const MemoryLocation &Loc) {
733  "AliasAnalysis query involving multiple functions!");
734 
735  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
736 
737  // If this is a tail call and Loc.Ptr points to a stack location, we know that
738  // the tail call cannot access or modify the local stack.
739  // We cannot exclude byval arguments here; these belong to the caller of
740  // the current function not to the current function, and a tail callee
741  // may reference them.
742  if (isa<AllocaInst>(Object))
743  if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
744  if (CI->isTailCall())
745  return MRI_NoModRef;
746 
747  // If the pointer is to a locally allocated object that does not escape,
748  // then the call can not mod/ref the pointer unless the call takes the pointer
749  // as an argument, and itself doesn't capture it.
750  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
751  isNonEscapingLocalObject(Object)) {
752  bool PassedAsArg = false;
753  unsigned OperandNo = 0;
754  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
755  CI != CE; ++CI, ++OperandNo) {
756  // Only look at the no-capture or byval pointer arguments. If this
757  // pointer were passed to arguments that were neither of these, then it
758  // couldn't be no-capture.
759  if (!(*CI)->getType()->isPointerTy() ||
760  (!CS.doesNotCapture(OperandNo) &&
761  OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
762  continue;
763 
764  // If this is a no-capture pointer argument, see if we can tell that it
765  // is impossible to alias the pointer we're checking. If not, we have to
766  // assume that the call could touch the pointer, even though it doesn't
767  // escape.
768  AliasResult AR =
770  if (AR) {
771  PassedAsArg = true;
772  break;
773  }
774  }
775 
776  if (!PassedAsArg)
777  return MRI_NoModRef;
778  }
779 
780  // If the CallSite is to malloc or calloc, we can assume that it doesn't
781  // modify any IR visible value. This is only valid because we assume these
782  // routines do not read values visible in the IR. TODO: Consider special
783  // casing realloc and strdup routines which access only their arguments as
784  // well. Or alternatively, replace all of this with inaccessiblememonly once
785  // that's implemented fully.
786  auto *Inst = CS.getInstruction();
787  if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI)) {
788  // Be conservative if the accessed pointer may alias the allocation -
789  // fallback to the generic handling below.
790  if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
791  return MRI_NoModRef;
792  }
793 
794  // The semantics of memcpy intrinsics forbid overlap between their respective
795  // operands, i.e., source and destination of any given memcpy must no-alias.
796  // If Loc must-aliases either one of these two locations, then it necessarily
797  // no-aliases the other.
798  if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
799  AliasResult SrcAA, DestAA;
800 
802  Loc)) == MustAlias)
803  // Loc is exactly the memcpy source thus disjoint from memcpy dest.
804  return MRI_Ref;
805  if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
806  Loc)) == MustAlias)
807  // The converse case.
808  return MRI_Mod;
809 
810  // It's also possible for Loc to alias both src and dest, or neither.
812  if (SrcAA != NoAlias)
813  rv = static_cast<ModRefInfo>(rv | MRI_Ref);
814  if (DestAA != NoAlias)
815  rv = static_cast<ModRefInfo>(rv | MRI_Mod);
816  return rv;
817  }
818 
819  // While the assume intrinsic is marked as arbitrarily writing so that
820  // proper control dependencies will be maintained, it never aliases any
821  // particular memory location.
822  if (isIntrinsicCall(CS, Intrinsic::assume))
823  return MRI_NoModRef;
824 
825  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
826  // that proper control dependencies are maintained but they never mods any
827  // particular memory location.
828  //
829  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
830  // heap state at the point the guard is issued needs to be consistent in case
831  // the guard invokes the "deopt" continuation.
832  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
833  return MRI_Ref;
834 
835  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
836  // writing so that proper control dependencies are maintained but they never
837  // mod any particular memory location visible to the IR.
838  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
839  // intrinsic is now modeled as reading memory. This prevents hoisting the
840  // invariant.start intrinsic over stores. Consider:
841  // *ptr = 40;
842  // *ptr = 50;
843  // invariant_start(ptr)
844  // int val = *ptr;
845  // print(val);
846  //
847  // This cannot be transformed to:
848  //
849  // *ptr = 40;
850  // invariant_start(ptr)
851  // *ptr = 50;
852  // int val = *ptr;
853  // print(val);
854  //
855  // The transformation will cause the second store to be ignored (based on
856  // rules of invariant.start) and print 40, while the first program always
857  // prints 50.
858  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
859  return MRI_Ref;
860 
861  // The AAResultBase base class has some smarts, lets use them.
862  return AAResultBase::getModRefInfo(CS, Loc);
863 }
864 
866  ImmutableCallSite CS2) {
867  // While the assume intrinsic is marked as arbitrarily writing so that
868  // proper control dependencies will be maintained, it never aliases any
869  // particular memory location.
870  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
871  isIntrinsicCall(CS2, Intrinsic::assume))
872  return MRI_NoModRef;
873 
874  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
875  // that proper control dependencies are maintained but they never mod any
876  // particular memory location.
877  //
878  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
879  // heap state at the point the guard is issued needs to be consistent in case
880  // the guard invokes the "deopt" continuation.
881 
882  // NB! This function is *not* commutative, so we specical case two
883  // possibilities for guard intrinsics.
884 
885  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
886  return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
887 
888  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
889  return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
890 
891  // The AAResultBase base class has some smarts, lets use them.
892  return AAResultBase::getModRefInfo(CS1, CS2);
893 }
894 
895 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
896 /// both having the exact same pointer operand.
898  uint64_t V1Size,
899  const GEPOperator *GEP2,
900  uint64_t V2Size,
901  const DataLayout &DL) {
902 
905  GEP1->getPointerOperand()->getType() ==
906  GEP2->getPointerOperand()->getType() &&
907  "Expected GEPs with the same pointer operand");
908 
909  // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
910  // such that the struct field accesses provably cannot alias.
911  // We also need at least two indices (the pointer, and the struct field).
912  if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
913  GEP1->getNumIndices() < 2)
914  return MayAlias;
915 
916  // If we don't know the size of the accesses through both GEPs, we can't
917  // determine whether the struct fields accessed can't alias.
918  if (V1Size == MemoryLocation::UnknownSize ||
919  V2Size == MemoryLocation::UnknownSize)
920  return MayAlias;
921 
922  ConstantInt *C1 =
923  dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
924  ConstantInt *C2 =
925  dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
926 
927  // If the last (struct) indices are constants and are equal, the other indices
928  // might be also be dynamically equal, so the GEPs can alias.
929  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
930  return MayAlias;
931 
932  // Find the last-indexed type of the GEP, i.e., the type you'd get if
933  // you stripped the last index.
934  // On the way, look at each indexed type. If there's something other
935  // than an array, different indices can lead to different final types.
936  SmallVector<Value *, 8> IntermediateIndices;
937 
938  // Insert the first index; we don't need to check the type indexed
939  // through it as it only drops the pointer indirection.
940  assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
941  IntermediateIndices.push_back(GEP1->getOperand(1));
942 
943  // Insert all the remaining indices but the last one.
944  // Also, check that they all index through arrays.
945  for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
946  if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
947  GEP1->getSourceElementType(), IntermediateIndices)))
948  return MayAlias;
949  IntermediateIndices.push_back(GEP1->getOperand(i + 1));
950  }
951 
953  GEP1->getSourceElementType(), IntermediateIndices);
954  StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
955 
956  if (isa<SequentialType>(Ty)) {
957  // We know that:
958  // - both GEPs begin indexing from the exact same pointer;
959  // - the last indices in both GEPs are constants, indexing into a sequential
960  // type (array or pointer);
961  // - both GEPs only index through arrays prior to that.
962  //
963  // Because array indices greater than the number of elements are valid in
964  // GEPs, unless we know the intermediate indices are identical between
965  // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
966  // partially overlap. We also need to check that the loaded size matches
967  // the element size, otherwise we could still have overlap.
968  const uint64_t ElementSize =
969  DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
970  if (V1Size != ElementSize || V2Size != ElementSize)
971  return MayAlias;
972 
973  for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
974  if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
975  return MayAlias;
976 
977  // Now we know that the array/pointer that GEP1 indexes into and that
978  // that GEP2 indexes into must either precisely overlap or be disjoint.
979  // Because they cannot partially overlap and because fields in an array
980  // cannot overlap, if we can prove the final indices are different between
981  // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
982 
983  // If the last indices are constants, we've already checked they don't
984  // equal each other so we can exit early.
985  if (C1 && C2)
986  return NoAlias;
987  if (isKnownNonEqual(GEP1->getOperand(GEP1->getNumOperands() - 1),
988  GEP2->getOperand(GEP2->getNumOperands() - 1),
989  DL))
990  return NoAlias;
991  return MayAlias;
992  } else if (!LastIndexedStruct || !C1 || !C2) {
993  return MayAlias;
994  }
995 
996  // We know that:
997  // - both GEPs begin indexing from the exact same pointer;
998  // - the last indices in both GEPs are constants, indexing into a struct;
999  // - said indices are different, hence, the pointed-to fields are different;
1000  // - both GEPs only index through arrays prior to that.
1001  //
1002  // This lets us determine that the struct that GEP1 indexes into and the
1003  // struct that GEP2 indexes into must either precisely overlap or be
1004  // completely disjoint. Because they cannot partially overlap, indexing into
1005  // different non-overlapping fields of the struct will never alias.
1006 
1007  // Therefore, the only remaining thing needed to show that both GEPs can't
1008  // alias is that the fields are not overlapping.
1009  const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1010  const uint64_t StructSize = SL->getSizeInBytes();
1011  const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1012  const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1013 
1014  auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1015  uint64_t V2Off, uint64_t V2Size) {
1016  return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1017  ((V2Off + V2Size <= StructSize) ||
1018  (V2Off + V2Size - StructSize <= V1Off));
1019  };
1020 
1021  if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1022  EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1023  return NoAlias;
1024 
1025  return MayAlias;
1026 }
1027 
1028 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1029 // beginning of the object the GEP points would have a negative offset with
1030 // repsect to the alloca, that means the GEP can not alias pointer (b).
1031 // Note that the pointer based on the alloca may not be a GEP. For
1032 // example, it may be the alloca itself.
1033 // The same applies if (b) is based on a GlobalVariable. Note that just being
1034 // based on isIdentifiedObject() is not enough - we need an identified object
1035 // that does not permit access to negative offsets. For example, a negative
1036 // offset from a noalias argument or call can be inbounds w.r.t the actual
1037 // underlying object.
1038 //
1039 // For example, consider:
1040 //
1041 // struct { int f0, int f1, ...} foo;
1042 // foo alloca;
1043 // foo* random = bar(alloca);
1044 // int *f0 = &alloca.f0
1045 // int *f1 = &random->f1;
1046 //
1047 // Which is lowered, approximately, to:
1048 //
1049 // %alloca = alloca %struct.foo
1050 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1051 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1052 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1053 //
1054 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1055 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1056 // point into the same object. But since %f0 points to the beginning of %alloca,
1057 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1058 // than (%alloca - 1), and so is not inbounds, a contradiction.
1059 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1060  const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1061  uint64_t ObjectAccessSize) {
1062  // If the object access size is unknown, or the GEP isn't inbounds, bail.
1063  if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1064  return false;
1065 
1066  // We need the object to be an alloca or a globalvariable, and want to know
1067  // the offset of the pointer from the object precisely, so no variable
1068  // indices are allowed.
1069  if (!(isa<AllocaInst>(DecompObject.Base) ||
1070  isa<GlobalVariable>(DecompObject.Base)) ||
1071  !DecompObject.VarIndices.empty())
1072  return false;
1073 
1074  int64_t ObjectBaseOffset = DecompObject.StructOffset +
1075  DecompObject.OtherOffset;
1076 
1077  // If the GEP has no variable indices, we know the precise offset
1078  // from the base, then use it. If the GEP has variable indices, we're in
1079  // a bit more trouble: we can't count on the constant offsets that come
1080  // from non-struct sources, since these can be "rewound" by a negative
1081  // variable offset. So use only offsets that came from structs.
1082  int64_t GEPBaseOffset = DecompGEP.StructOffset;
1083  if (DecompGEP.VarIndices.empty())
1084  GEPBaseOffset += DecompGEP.OtherOffset;
1085 
1086  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1087 }
1088 
1089 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1090 /// another pointer.
1091 ///
1092 /// We know that V1 is a GEP, but we don't know anything about V2.
1093 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1094 /// V2.
1095 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1096  const AAMDNodes &V1AAInfo, const Value *V2,
1097  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1098  const Value *UnderlyingV1,
1099  const Value *UnderlyingV2) {
1100  DecomposedGEP DecompGEP1, DecompGEP2;
1101  bool GEP1MaxLookupReached =
1102  DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1103  bool GEP2MaxLookupReached =
1104  DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1105 
1106  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1107  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1108 
1109  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1110  "DecomposeGEPExpression returned a result different from "
1111  "GetUnderlyingObject");
1112 
1113  // If the GEP's offset relative to its base is such that the base would
1114  // fall below the start of the object underlying V2, then the GEP and V2
1115  // cannot alias.
1116  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1117  isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1118  return NoAlias;
1119  // If we have two gep instructions with must-alias or not-alias'ing base
1120  // pointers, figure out if the indexes to the GEP tell us anything about the
1121  // derived pointer.
1122  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1123  // Check for the GEP base being at a negative offset, this time in the other
1124  // direction.
1125  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1126  isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1127  return NoAlias;
1128  // Do the base pointers alias?
1129  AliasResult BaseAlias =
1130  aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1131  UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1132 
1133  // Check for geps of non-aliasing underlying pointers where the offsets are
1134  // identical.
1135  if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1136  // Do the base pointers alias assuming type and size.
1137  AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1138  UnderlyingV2, V2Size, V2AAInfo);
1139  if (PreciseBaseAlias == NoAlias) {
1140  // See if the computed offset from the common pointer tells us about the
1141  // relation of the resulting pointer.
1142  // If the max search depth is reached the result is undefined
1143  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1144  return MayAlias;
1145 
1146  // Same offsets.
1147  if (GEP1BaseOffset == GEP2BaseOffset &&
1148  DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1149  return NoAlias;
1150  }
1151  }
1152 
1153  // If we get a No or May, then return it immediately, no amount of analysis
1154  // will improve this situation.
1155  if (BaseAlias != MustAlias)
1156  return BaseAlias;
1157 
1158  // Otherwise, we have a MustAlias. Since the base pointers alias each other
1159  // exactly, see if the computed offset from the common pointer tells us
1160  // about the relation of the resulting pointer.
1161  // If we know the two GEPs are based off of the exact same pointer (and not
1162  // just the same underlying object), see if that tells us anything about
1163  // the resulting pointers.
1164  if (GEP1->getPointerOperand()->stripPointerCasts() ==
1165  GEP2->getPointerOperand()->stripPointerCasts() &&
1166  GEP1->getPointerOperand()->getType() ==
1167  GEP2->getPointerOperand()->getType()) {
1168  AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1169  // If we couldn't find anything interesting, don't abandon just yet.
1170  if (R != MayAlias)
1171  return R;
1172  }
1173 
1174  // If the max search depth is reached, the result is undefined
1175  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1176  return MayAlias;
1177 
1178  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1179  // symbolic difference.
1180  GEP1BaseOffset -= GEP2BaseOffset;
1181  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1182 
1183  } else {
1184  // Check to see if these two pointers are related by the getelementptr
1185  // instruction. If one pointer is a GEP with a non-zero index of the other
1186  // pointer, we know they cannot alias.
1187 
1188  // If both accesses are unknown size, we can't do anything useful here.
1189  if (V1Size == MemoryLocation::UnknownSize &&
1190  V2Size == MemoryLocation::UnknownSize)
1191  return MayAlias;
1192 
1193  AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1195  V2AAInfo, nullptr, UnderlyingV2);
1196  if (R != MustAlias)
1197  // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1198  // If V2 is known not to alias GEP base pointer, then the two values
1199  // cannot alias per GEP semantics: "Any memory access must be done through
1200  // a pointer value associated with an address range of the memory access,
1201  // otherwise the behavior is undefined.".
1202  return R;
1203 
1204  // If the max search depth is reached the result is undefined
1205  if (GEP1MaxLookupReached)
1206  return MayAlias;
1207  }
1208 
1209  // In the two GEP Case, if there is no difference in the offsets of the
1210  // computed pointers, the resultant pointers are a must alias. This
1211  // happens when we have two lexically identical GEP's (for example).
1212  //
1213  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1214  // must aliases the GEP, the end result is a must alias also.
1215  if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1216  return MustAlias;
1217 
1218  // If there is a constant difference between the pointers, but the difference
1219  // is less than the size of the associated memory object, then we know
1220  // that the objects are partially overlapping. If the difference is
1221  // greater, we know they do not overlap.
1222  if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1223  if (GEP1BaseOffset >= 0) {
1224  if (V2Size != MemoryLocation::UnknownSize) {
1225  if ((uint64_t)GEP1BaseOffset < V2Size)
1226  return PartialAlias;
1227  return NoAlias;
1228  }
1229  } else {
1230  // We have the situation where:
1231  // + +
1232  // | BaseOffset |
1233  // ---------------->|
1234  // |-->V1Size |-------> V2Size
1235  // GEP1 V2
1236  // We need to know that V2Size is not unknown, otherwise we might have
1237  // stripped a gep with negative index ('gep <ptr>, -1, ...).
1238  if (V1Size != MemoryLocation::UnknownSize &&
1239  V2Size != MemoryLocation::UnknownSize) {
1240  if (-(uint64_t)GEP1BaseOffset < V1Size)
1241  return PartialAlias;
1242  return NoAlias;
1243  }
1244  }
1245  }
1246 
1247  if (!DecompGEP1.VarIndices.empty()) {
1248  uint64_t Modulo = 0;
1249  bool AllPositive = true;
1250  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1251 
1252  // Try to distinguish something like &A[i][1] against &A[42][0].
1253  // Grab the least significant bit set in any of the scales. We
1254  // don't need std::abs here (even if the scale's negative) as we'll
1255  // be ^'ing Modulo with itself later.
1256  Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1257 
1258  if (AllPositive) {
1259  // If the Value could change between cycles, then any reasoning about
1260  // the Value this cycle may not hold in the next cycle. We'll just
1261  // give up if we can't determine conditions that hold for every cycle:
1262  const Value *V = DecompGEP1.VarIndices[i].V;
1263 
1264  bool SignKnownZero, SignKnownOne;
1265  ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
1266  0, &AC, nullptr, DT);
1267 
1268  // Zero-extension widens the variable, and so forces the sign
1269  // bit to zero.
1270  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1271  SignKnownZero |= IsZExt;
1272  SignKnownOne &= !IsZExt;
1273 
1274  // If the variable begins with a zero then we know it's
1275  // positive, regardless of whether the value is signed or
1276  // unsigned.
1277  int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1278  AllPositive =
1279  (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1280  }
1281  }
1282 
1283  Modulo = Modulo ^ (Modulo & (Modulo - 1));
1284 
1285  // We can compute the difference between the two addresses
1286  // mod Modulo. Check whether that difference guarantees that the
1287  // two locations do not alias.
1288  uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1289  if (V1Size != MemoryLocation::UnknownSize &&
1290  V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1291  V1Size <= Modulo - ModOffset)
1292  return NoAlias;
1293 
1294  // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1295  // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1296  // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1297  if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1298  return NoAlias;
1299 
1300  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1301  GEP1BaseOffset, &AC, DT))
1302  return NoAlias;
1303  }
1304 
1305  // Statically, we can see that the base objects are the same, but the
1306  // pointers have dynamic offsets which we can't resolve. And none of our
1307  // little tricks above worked.
1308  //
1309  // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
1310  // practical effect of this is protecting TBAA in the case of dynamic
1311  // indices into arrays of unions or malloc'd memory.
1312  return PartialAlias;
1313 }
1314 
1316  // If the results agree, take it.
1317  if (A == B)
1318  return A;
1319  // A mix of PartialAlias and MustAlias is PartialAlias.
1320  if ((A == PartialAlias && B == MustAlias) ||
1321  (B == PartialAlias && A == MustAlias))
1322  return PartialAlias;
1323  // Otherwise, we don't know anything.
1324  return MayAlias;
1325 }
1326 
1327 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1328 /// against another.
1329 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1330  const AAMDNodes &SIAAInfo,
1331  const Value *V2, uint64_t V2Size,
1332  const AAMDNodes &V2AAInfo,
1333  const Value *UnderV2) {
1334  // If the values are Selects with the same condition, we can do a more precise
1335  // check: just check for aliases between the values on corresponding arms.
1336  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1337  if (SI->getCondition() == SI2->getCondition()) {
1338  AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1339  SI2->getTrueValue(), V2Size, V2AAInfo);
1340  if (Alias == MayAlias)
1341  return MayAlias;
1342  AliasResult ThisAlias =
1343  aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1344  SI2->getFalseValue(), V2Size, V2AAInfo);
1345  return MergeAliasResults(ThisAlias, Alias);
1346  }
1347 
1348  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1349  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1350  AliasResult Alias =
1351  aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1352  SISize, SIAAInfo, UnderV2);
1353  if (Alias == MayAlias)
1354  return MayAlias;
1355 
1356  AliasResult ThisAlias =
1357  aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1358  UnderV2);
1359  return MergeAliasResults(ThisAlias, Alias);
1360 }
1361 
1362 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1363 /// another.
1364 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1365  const AAMDNodes &PNAAInfo, const Value *V2,
1366  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1367  const Value *UnderV2) {
1368  // Track phi nodes we have visited. We use this information when we determine
1369  // value equivalence.
1370  VisitedPhiBBs.insert(PN->getParent());
1371 
1372  // If the values are PHIs in the same block, we can do a more precise
1373  // as well as efficient check: just check for aliases between the values
1374  // on corresponding edges.
1375  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1376  if (PN2->getParent() == PN->getParent()) {
1377  LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1378  MemoryLocation(V2, V2Size, V2AAInfo));
1379  if (PN > V2)
1380  std::swap(Locs.first, Locs.second);
1381  // Analyse the PHIs' inputs under the assumption that the PHIs are
1382  // NoAlias.
1383  // If the PHIs are May/MustAlias there must be (recursively) an input
1384  // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1385  // there must be an operation on the PHIs within the PHIs' value cycle
1386  // that causes a MayAlias.
1387  // Pretend the phis do not alias.
1388  AliasResult Alias = NoAlias;
1389  assert(AliasCache.count(Locs) &&
1390  "There must exist an entry for the phi node");
1391  AliasResult OrigAliasResult = AliasCache[Locs];
1392  AliasCache[Locs] = NoAlias;
1393 
1394  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1395  AliasResult ThisAlias =
1396  aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1397  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1398  V2Size, V2AAInfo);
1399  Alias = MergeAliasResults(ThisAlias, Alias);
1400  if (Alias == MayAlias)
1401  break;
1402  }
1403 
1404  // Reset if speculation failed.
1405  if (Alias != NoAlias)
1406  AliasCache[Locs] = OrigAliasResult;
1407 
1408  return Alias;
1409  }
1410 
1411  SmallPtrSet<Value *, 4> UniqueSrc;
1412  SmallVector<Value *, 4> V1Srcs;
1413  bool isRecursive = false;
1414  for (Value *PV1 : PN->incoming_values()) {
1415  if (isa<PHINode>(PV1))
1416  // If any of the source itself is a PHI, return MayAlias conservatively
1417  // to avoid compile time explosion. The worst possible case is if both
1418  // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1419  // and 'n' are the number of PHI sources.
1420  return MayAlias;
1421 
1423  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1424  // Check whether the incoming value is a GEP that advances the pointer
1425  // result of this PHI node (e.g. in a loop). If this is the case, we
1426  // would recurse and always get a MayAlias. Handle this case specially
1427  // below.
1428  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1429  isa<ConstantInt>(PV1GEP->idx_begin())) {
1430  isRecursive = true;
1431  continue;
1432  }
1433  }
1434 
1435  if (UniqueSrc.insert(PV1).second)
1436  V1Srcs.push_back(PV1);
1437  }
1438 
1439  // If this PHI node is recursive, set the size of the accessed memory to
1440  // unknown to represent all the possible values the GEP could advance the
1441  // pointer to.
1442  if (isRecursive)
1443  PNSize = MemoryLocation::UnknownSize;
1444 
1445  AliasResult Alias =
1446  aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1447  PNSize, PNAAInfo, UnderV2);
1448 
1449  // Early exit if the check of the first PHI source against V2 is MayAlias.
1450  // Other results are not possible.
1451  if (Alias == MayAlias)
1452  return MayAlias;
1453 
1454  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1455  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1456  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1457  Value *V = V1Srcs[i];
1458 
1459  AliasResult ThisAlias =
1460  aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1461  Alias = MergeAliasResults(ThisAlias, Alias);
1462  if (Alias == MayAlias)
1463  break;
1464  }
1465 
1466  return Alias;
1467 }
1468 
1469 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1470 /// array references.
1471 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1472  AAMDNodes V1AAInfo, const Value *V2,
1473  uint64_t V2Size, AAMDNodes V2AAInfo,
1474  const Value *O1, const Value *O2) {
1475  // If either of the memory references is empty, it doesn't matter what the
1476  // pointer values are.
1477  if (V1Size == 0 || V2Size == 0)
1478  return NoAlias;
1479 
1480  // Strip off any casts if they exist.
1481  V1 = V1->stripPointerCasts();
1482  V2 = V2->stripPointerCasts();
1483 
1484  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1485  // value for undef that aliases nothing in the program.
1486  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1487  return NoAlias;
1488 
1489  // Are we checking for alias of the same value?
1490  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1491  // different iterations. We must therefore make sure that this is not the
1492  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1493  // happen by looking at the visited phi nodes and making sure they cannot
1494  // reach the value.
1495  if (isValueEqualInPotentialCycles(V1, V2))
1496  return MustAlias;
1497 
1498  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1499  return NoAlias; // Scalars cannot alias each other
1500 
1501  // Figure out what objects these things are pointing to if we can.
1502  if (O1 == nullptr)
1504 
1505  if (O2 == nullptr)
1507 
1508  // Null values in the default address space don't point to any object, so they
1509  // don't alias any other pointer.
1510  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1511  if (CPN->getType()->getAddressSpace() == 0)
1512  return NoAlias;
1513  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1514  if (CPN->getType()->getAddressSpace() == 0)
1515  return NoAlias;
1516 
1517  if (O1 != O2) {
1518  // If V1/V2 point to two different objects, we know that we have no alias.
1519  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1520  return NoAlias;
1521 
1522  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1523  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1524  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1525  return NoAlias;
1526 
1527  // Function arguments can't alias with things that are known to be
1528  // unambigously identified at the function level.
1529  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1530  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1531  return NoAlias;
1532 
1533  // Most objects can't alias null.
1534  if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1535  (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1536  return NoAlias;
1537 
1538  // If one pointer is the result of a call/invoke or load and the other is a
1539  // non-escaping local object within the same function, then we know the
1540  // object couldn't escape to a point where the call could return it.
1541  //
1542  // Note that if the pointers are in different functions, there are a
1543  // variety of complications. A call with a nocapture argument may still
1544  // temporary store the nocapture argument's value in a temporary memory
1545  // location if that memory location doesn't escape. Or it may pass a
1546  // nocapture value to other functions as long as they don't capture it.
1548  return NoAlias;
1550  return NoAlias;
1551  }
1552 
1553  // If the size of one access is larger than the entire object on the other
1554  // side, then we know such behavior is undefined and can assume no alias.
1555  if ((V1Size != MemoryLocation::UnknownSize &&
1556  isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1557  (V2Size != MemoryLocation::UnknownSize &&
1558  isObjectSmallerThan(O1, V2Size, DL, TLI)))
1559  return NoAlias;
1560 
1561  // Check the cache before climbing up use-def chains. This also terminates
1562  // otherwise infinitely recursive queries.
1563  LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1564  MemoryLocation(V2, V2Size, V2AAInfo));
1565  if (V1 > V2)
1566  std::swap(Locs.first, Locs.second);
1567  std::pair<AliasCacheTy::iterator, bool> Pair =
1568  AliasCache.insert(std::make_pair(Locs, MayAlias));
1569  if (!Pair.second)
1570  return Pair.first->second;
1571 
1572  // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1573  // GEP can't simplify, we don't even look at the PHI cases.
1574  if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1575  std::swap(V1, V2);
1576  std::swap(V1Size, V2Size);
1577  std::swap(O1, O2);
1578  std::swap(V1AAInfo, V2AAInfo);
1579  }
1580  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1581  AliasResult Result =
1582  aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1583  if (Result != MayAlias)
1584  return AliasCache[Locs] = Result;
1585  }
1586 
1587  if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1588  std::swap(V1, V2);
1589  std::swap(O1, O2);
1590  std::swap(V1Size, V2Size);
1591  std::swap(V1AAInfo, V2AAInfo);
1592  }
1593  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1594  AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1595  V2, V2Size, V2AAInfo, O2);
1596  if (Result != MayAlias)
1597  return AliasCache[Locs] = Result;
1598  }
1599 
1600  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1601  std::swap(V1, V2);
1602  std::swap(O1, O2);
1603  std::swap(V1Size, V2Size);
1604  std::swap(V1AAInfo, V2AAInfo);
1605  }
1606  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1607  AliasResult Result =
1608  aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1609  if (Result != MayAlias)
1610  return AliasCache[Locs] = Result;
1611  }
1612 
1613  // If both pointers are pointing into the same object and one of them
1614  // accesses the entire object, then the accesses must overlap in some way.
1615  if (O1 == O2)
1616  if ((V1Size != MemoryLocation::UnknownSize &&
1617  isObjectSize(O1, V1Size, DL, TLI)) ||
1618  (V2Size != MemoryLocation::UnknownSize &&
1619  isObjectSize(O2, V2Size, DL, TLI)))
1620  return AliasCache[Locs] = PartialAlias;
1621 
1622  // Recurse back into the best AA results we have, potentially with refined
1623  // memory locations. We have already ensured that BasicAA has a MayAlias
1624  // cache result for these, so any recursion back into BasicAA won't loop.
1625  AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1626  return AliasCache[Locs] = Result;
1627 }
1628 
1629 /// Check whether two Values can be considered equivalent.
1630 ///
1631 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1632 /// they can not be part of a cycle in the value graph by looking at all
1633 /// visited phi nodes an making sure that the phis cannot reach the value. We
1634 /// have to do this because we are looking through phi nodes (That is we say
1635 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1636 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1637  const Value *V2) {
1638  if (V != V2)
1639  return false;
1640 
1641  const Instruction *Inst = dyn_cast<Instruction>(V);
1642  if (!Inst)
1643  return true;
1644 
1645  if (VisitedPhiBBs.empty())
1646  return true;
1647 
1648  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1649  return false;
1650 
1651  // Make sure that the visited phis cannot reach the Value. This ensures that
1652  // the Values cannot come from different iterations of a potential cycle the
1653  // phi nodes could be involved in.
1654  for (auto *P : VisitedPhiBBs)
1655  if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1656  return false;
1657 
1658  return true;
1659 }
1660 
1661 /// Computes the symbolic difference between two de-composed GEPs.
1662 ///
1663 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1664 /// instructions GEP1 and GEP2 which have common base pointers.
1665 void BasicAAResult::GetIndexDifference(
1667  const SmallVectorImpl<VariableGEPIndex> &Src) {
1668  if (Src.empty())
1669  return;
1670 
1671  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1672  const Value *V = Src[i].V;
1673  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1674  int64_t Scale = Src[i].Scale;
1675 
1676  // Find V in Dest. This is N^2, but pointer indices almost never have more
1677  // than a few variable indexes.
1678  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1679  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1680  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1681  continue;
1682 
1683  // If we found it, subtract off Scale V's from the entry in Dest. If it
1684  // goes to zero, remove the entry.
1685  if (Dest[j].Scale != Scale)
1686  Dest[j].Scale -= Scale;
1687  else
1688  Dest.erase(Dest.begin() + j);
1689  Scale = 0;
1690  break;
1691  }
1692 
1693  // If we didn't consume this entry, add it to the end of the Dest list.
1694  if (Scale) {
1695  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1696  Dest.push_back(Entry);
1697  }
1698  }
1699 }
1700 
1701 bool BasicAAResult::constantOffsetHeuristic(
1702  const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1703  uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1704  DominatorTree *DT) {
1705  if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1706  V2Size == MemoryLocation::UnknownSize)
1707  return false;
1708 
1709  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1710 
1711  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1712  Var0.Scale != -Var1.Scale)
1713  return false;
1714 
1715  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1716 
1717  // We'll strip off the Extensions of Var0 and Var1 and do another round
1718  // of GetLinearExpression decomposition. In the example above, if Var0
1719  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1720 
1721  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1722  V1Offset(Width, 0);
1723  bool NSW = true, NUW = true;
1724  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1725  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1726  V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1727  NSW = true;
1728  NUW = true;
1729  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1730  V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1731 
1732  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1733  V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1734  return false;
1735 
1736  // We have a hit - Var0 and Var1 only differ by a constant offset!
1737 
1738  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1739  // Var1 is possible to calculate, but we're just interested in the absolute
1740  // minimum difference between the two. The minimum distance may occur due to
1741  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1742  // the minimum distance between %i and %i + 5 is 3.
1743  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1744  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1745  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1746 
1747  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1748  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1749  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1750  // V2Size can fit in the MinDiffBytes gap.
1751  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1752  V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1753 }
1754 
1755 //===----------------------------------------------------------------------===//
1756 // BasicAliasAnalysis Pass
1757 //===----------------------------------------------------------------------===//
1758 
1759 AnalysisKey BasicAA::Key;
1760 
1762  return BasicAAResult(F.getParent()->getDataLayout(),
1766  AM.getCachedResult<LoopAnalysis>(F));
1767 }
1768 
1771 }
1772 
1773 char BasicAAWrapperPass::ID = 0;
1774 void BasicAAWrapperPass::anchor() {}
1775 
1777  "Basic Alias Analysis (stateless AA impl)", true, true)
1782  "Basic Alias Analysis (stateless AA impl)", true, true)
1783 
1785  return new BasicAAWrapperPass();
1786 }
1787 
1789  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1790  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1791  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1792  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1793 
1794  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1795  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1796  LIWP ? &LIWP->getLoopInfo() : nullptr));
1797 
1798  return false;
1799 }
1800 
1802  AU.setPreservesAll();
1806 }
1807 
1809  return BasicAAResult(
1810  F.getParent()->getDataLayout(),
1812  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1813 }
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:81
The two locations precisely alias each other.
Definition: AliasAnalysis.h:85
static const unsigned MaxLookupSearchDepth
Type * getSourceElementType() const
Definition: Operator.cpp:9
Basic Alias Analysis(stateless AA impl)"
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:543
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
LLVM Argument representation.
Definition: Argument.h:34
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1309
STATISTIC(NumFunctions,"Total number of functions")
size_t i
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:550
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:321
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
unsigned getNumOperands() const
Definition: User.h:167
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:83
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
This class represents a function call, abstracting a target machine's calling convention.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of .assume calls within a function.
This is the AA result object for the basic, local, and stateless alias analysis.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
Definition: CallSite.h:429
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:409
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: Function.h:345
The two locations do not alias at all.
Definition: AliasAnalysis.h:79
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:189
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
The access modifies the value stored in memory.
op_iterator op_begin()
Definition: User.h:205
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:81
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass,"basicaa","Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
This indicates that the function could not be classified into one of the behaviors above...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:172
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:670
AnalysisUsage & addRequired()
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:496
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:53
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:94
This class represents the LLVM 'select' instruction.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:566
bool has(LibFunc::Func F) const
Tests whether a library function is available.
Class to represent struct types.
Definition: DerivedTypes.h:199
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
Definition: Function.h:329
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
The access references the value stored in memory.
Definition: AliasAnalysis.h:98
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:806
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
place backedge safepoints impl
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
Definition: CallSite.h:238
void shrink_and_clear()
Definition: DenseMap.h:945
#define F(x, y, z)
Definition: MD5.cpp:51
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1015
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
FunctionModRefBehavior
Summary of how a function affects memory in the program.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, bool RoundToAlign=false, ObjSizeMode Mode=ObjSizeMode::Exact)
Compute the size of the object pointed by Ptr.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:96
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition: Function.h:313
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V has size Size.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool hasOperandBundles() const
Definition: CallSite.h:492
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:254
The access neither references nor modifies the value stored in memory.
Definition: AliasAnalysis.h:96
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const
Searches for a particular function name.
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:107
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs...ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:653
size_type size() const
Definition: SmallPtrSet.h:99
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
const Value * getCondition() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:115
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if 'V & Mask' is known to be zero.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1947
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: Function.h:354
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:73
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:207
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
uint32_t Offset
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:555
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1255
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
Definition: Operator.h:401
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
Value * getOperand(unsigned i) const
Definition: User.h:145
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', returning true if uncertain.
Definition: CFG.cpp:186
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:96
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:98
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
iterator erase(const_iterator CI)
Definition: SmallVector.h:431
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
const Value * getTrueValue() const
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: Function.h:338
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:1793
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:421
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
A function analysis which provides an AssumptionCache.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:446
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
Definition: CallSite.h:437
InstrTy * getInstruction() const
Definition: CallSite.h:93
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
unsigned getNumIndices() const
Definition: Operator.h:424
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:33
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V is smaller than Size. ...
Provides information about what library functions are available for the current target.
A constant pointer value that points to null.
Definition: Constants.h:529
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:625
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
uint64_t getSizeInBytes() const
Definition: DataLayout.h:503
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:490
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:122
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
unsigned getNumArgOperands() const
Definition: CallSite.h:288
Class for arbitrary precision integers.
Definition: APInt.h:77
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:195
void setPreservesAll()
Set by analyses that do not transform their input at all.
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:49
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
Basic Alias true
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool isKnownNonNull(const Value *V)
Return true if this pointer couldn't possibly be null by its definition.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine whether the sign bit is known to be zero or one.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:385
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:665
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
#define I(x, y, z)
Definition: MD5.cpp:54
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1099
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:359
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:349
iterator end()
Definition: DenseMap.h:69
iterator find(const KeyT &Val)
Definition: DenseMap.h:127
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:420
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:391
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:525
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
FunTy * getCalledFunction() const
getCalledFunction - Return the function being called if this is a direct call, otherwise return null ...
Definition: CallSite.h:110
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:108
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:239
static const Function * getParent(const Value *V)
const Value * getFalseValue() const
This is the interface for LLVM's primary stateless and local alias analysis.
A container for analyses that lazily runs them and caches their results.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:217
IterTy data_operands_end() const
Definition: CallSite.h:242
op_range incoming_values()
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:162
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:64
const BasicBlock * getParent() const
Definition: Instruction.h:62
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void initializeBasicAAWrapperPassPass(PassRegistry &)
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.