LLVM  6.0.0svn
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
69  cl::init(false));
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74  "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // GetUnderlyingObject(), both functions need to use the same search
85 // depth otherwise the algorithm in aliasGEP will assert.
86 static const unsigned MaxLookupSearchDepth = 6;
87 
90  // We don't care if this analysis itself is preserved, it has no state. But
91  // we need to check that the analyses it depends on have been. Note that we
92  // may be created without handles to some analyses and in that case don't
93  // depend on them.
94  if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
95  (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
96  (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
97  return true;
98 
99  // Otherwise this analysis result remains valid.
100  return false;
101 }
102 
103 //===----------------------------------------------------------------------===//
104 // Useful predicates
105 //===----------------------------------------------------------------------===//
106 
107 /// Returns true if the pointer is to a function-local object that never
108 /// escapes from the function.
109 static bool isNonEscapingLocalObject(const Value *V) {
110  // If this is a local allocation, check to see if it escapes.
111  if (isa<AllocaInst>(V) || isNoAliasCall(V))
112  // Set StoreCaptures to True so that we can assume in our callers that the
113  // pointer is not the result of a load instruction. Currently
114  // PointerMayBeCaptured doesn't have any special analysis for the
115  // StoreCaptures=false case; if it did, our callers could be refined to be
116  // more precise.
117  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
118 
119  // If this is an argument that corresponds to a byval or noalias argument,
120  // then it has not escaped before entering the function. Check if it escapes
121  // inside the function.
122  if (const Argument *A = dyn_cast<Argument>(V))
123  if (A->hasByValAttr() || A->hasNoAliasAttr())
124  // Note even if the argument is marked nocapture, we still need to check
125  // for copies made inside the function. The nocapture attribute only
126  // specifies that there are no copies made that outlive the function.
127  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
128 
129  return false;
130 }
131 
132 /// Returns true if the pointer is one which would have been considered an
133 /// escape by isNonEscapingLocalObject.
134 static bool isEscapeSource(const Value *V) {
135  if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
136  return true;
137 
138  // The load case works because isNonEscapingLocalObject considers all
139  // stores to be escapes (it passes true for the StoreCaptures argument
140  // to PointerMayBeCaptured).
141  if (isa<LoadInst>(V))
142  return true;
143 
144  return false;
145 }
146 
147 /// Returns the size of the object specified by V or UnknownSize if unknown.
148 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
149  const TargetLibraryInfo &TLI,
150  bool RoundToAlign = false) {
151  uint64_t Size;
152  ObjectSizeOpts Opts;
153  Opts.RoundToAlign = RoundToAlign;
154  if (getObjectSize(V, Size, DL, &TLI, Opts))
155  return Size;
157 }
158 
159 /// Returns true if we can prove that the object specified by V is smaller than
160 /// Size.
161 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
162  const DataLayout &DL,
163  const TargetLibraryInfo &TLI) {
164  // Note that the meanings of the "object" are slightly different in the
165  // following contexts:
166  // c1: llvm::getObjectSize()
167  // c2: llvm.objectsize() intrinsic
168  // c3: isObjectSmallerThan()
169  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
170  // refers to the "entire object".
171  //
172  // Consider this example:
173  // char *p = (char*)malloc(100)
174  // char *q = p+80;
175  //
176  // In the context of c1 and c2, the "object" pointed by q refers to the
177  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
178  //
179  // However, in the context of c3, the "object" refers to the chunk of memory
180  // being allocated. So, the "object" has 100 bytes, and q points to the middle
181  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
182  // parameter, before the llvm::getObjectSize() is called to get the size of
183  // entire object, we should:
184  // - either rewind the pointer q to the base-address of the object in
185  // question (in this case rewind to p), or
186  // - just give up. It is up to caller to make sure the pointer is pointing
187  // to the base address the object.
188  //
189  // We go for 2nd option for simplicity.
190  if (!isIdentifiedObject(V))
191  return false;
192 
193  // This function needs to use the aligned object size because we allow
194  // reads a bit past the end given sufficient alignment.
195  uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
196 
197  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199 
200 /// Returns true if we can prove that the object specified by V has size Size.
201 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
202  const TargetLibraryInfo &TLI) {
203  uint64_t ObjectSize = getObjectSize(V, DL, TLI);
204  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
205 }
206 
207 //===----------------------------------------------------------------------===//
208 // GetElementPtr Instruction Decomposition and Analysis
209 //===----------------------------------------------------------------------===//
210 
211 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
212 /// B are constant integers.
213 ///
214 /// Returns the scale and offset values as APInts and return V as a Value*, and
215 /// return whether we looked through any sign or zero extends. The incoming
216 /// Value is known to have IntegerType, and it may already be sign or zero
217 /// extended.
218 ///
219 /// Note that this looks through extends, so the high bits may not be
220 /// represented in the result.
221 /*static*/ const Value *BasicAAResult::GetLinearExpression(
222  const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
223  unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
224  AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
225  assert(V->getType()->isIntegerTy() && "Not an integer value");
226 
227  // Limit our recursion depth.
228  if (Depth == 6) {
229  Scale = 1;
230  Offset = 0;
231  return V;
232  }
233 
234  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
235  // If it's a constant, just convert it to an offset and remove the variable.
236  // If we've been called recursively, the Offset bit width will be greater
237  // than the constant's (the Offset's always as wide as the outermost call),
238  // so we'll zext here and process any extension in the isa<SExtInst> &
239  // isa<ZExtInst> cases below.
240  Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
241  assert(Scale == 0 && "Constant values don't have a scale");
242  return V;
243  }
244 
245  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
246  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
247  // If we've been called recursively, then Offset and Scale will be wider
248  // than the BOp operands. We'll always zext it here as we'll process sign
249  // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
250  APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
251 
252  switch (BOp->getOpcode()) {
253  default:
254  // We don't understand this instruction, so we can't decompose it any
255  // further.
256  Scale = 1;
257  Offset = 0;
258  return V;
259  case Instruction::Or:
260  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
261  // analyze it.
262  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
263  BOp, DT)) {
264  Scale = 1;
265  Offset = 0;
266  return V;
267  }
269  case Instruction::Add:
270  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
271  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
272  Offset += RHS;
273  break;
274  case Instruction::Sub:
275  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
276  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
277  Offset -= RHS;
278  break;
279  case Instruction::Mul:
280  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
281  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
282  Offset *= RHS;
283  Scale *= RHS;
284  break;
285  case Instruction::Shl:
286  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
287  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
288  Offset <<= RHS.getLimitedValue();
289  Scale <<= RHS.getLimitedValue();
290  // the semantics of nsw and nuw for left shifts don't match those of
291  // multiplications, so we won't propagate them.
292  NSW = NUW = false;
293  return V;
294  }
295 
296  if (isa<OverflowingBinaryOperator>(BOp)) {
297  NUW &= BOp->hasNoUnsignedWrap();
298  NSW &= BOp->hasNoSignedWrap();
299  }
300  return V;
301  }
302  }
303 
304  // Since GEP indices are sign extended anyway, we don't care about the high
305  // bits of a sign or zero extended value - just scales and offsets. The
306  // extensions have to be consistent though.
307  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
308  Value *CastOp = cast<CastInst>(V)->getOperand(0);
309  unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
310  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
311  unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
312  const Value *Result =
313  GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
314  Depth + 1, AC, DT, NSW, NUW);
315 
316  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
317  // by just incrementing the number of bits we've extended by.
318  unsigned ExtendedBy = NewWidth - SmallWidth;
319 
320  if (isa<SExtInst>(V) && ZExtBits == 0) {
321  // sext(sext(%x, a), b) == sext(%x, a + b)
322 
323  if (NSW) {
324  // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
325  // into sext(%x) + sext(c). We'll sext the Offset ourselves:
326  unsigned OldWidth = Offset.getBitWidth();
327  Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
328  } else {
329  // We may have signed-wrapped, so don't decompose sext(%x + c) into
330  // sext(%x) + sext(c)
331  Scale = 1;
332  Offset = 0;
333  Result = CastOp;
334  ZExtBits = OldZExtBits;
335  SExtBits = OldSExtBits;
336  }
337  SExtBits += ExtendedBy;
338  } else {
339  // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
340 
341  if (!NUW) {
342  // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
343  // zext(%x) + zext(c)
344  Scale = 1;
345  Offset = 0;
346  Result = CastOp;
347  ZExtBits = OldZExtBits;
348  SExtBits = OldSExtBits;
349  }
350  ZExtBits += ExtendedBy;
351  }
352 
353  return Result;
354  }
355 
356  Scale = 1;
357  Offset = 0;
358  return V;
359 }
360 
361 /// To ensure a pointer offset fits in an integer of size PointerSize
362 /// (in bits) when that size is smaller than 64. This is an issue in
363 /// particular for 32b programs with negative indices that rely on two's
364 /// complement wrap-arounds for precise alias information.
365 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
366  assert(PointerSize <= 64 && "Invalid PointerSize!");
367  unsigned ShiftBits = 64 - PointerSize;
368  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
369 }
370 
371 /// If V is a symbolic pointer expression, decompose it into a base pointer
372 /// with a constant offset and a number of scaled symbolic offsets.
373 ///
374 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
375 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
376 /// specified amount, but which may have other unrepresented high bits. As
377 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
378 ///
379 /// When DataLayout is around, this function is capable of analyzing everything
380 /// that GetUnderlyingObject can look through. To be able to do that
381 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
382 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
383 /// through pointer casts.
384 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
385  DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
386  DominatorTree *DT) {
387  // Limit recursion depth to limit compile time in crazy cases.
388  unsigned MaxLookup = MaxLookupSearchDepth;
389  SearchTimes++;
390 
391  Decomposed.StructOffset = 0;
392  Decomposed.OtherOffset = 0;
393  Decomposed.VarIndices.clear();
394  do {
395  // See if this is a bitcast or GEP.
396  const Operator *Op = dyn_cast<Operator>(V);
397  if (!Op) {
398  // The only non-operator case we can handle are GlobalAliases.
399  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
400  if (!GA->isInterposable()) {
401  V = GA->getAliasee();
402  continue;
403  }
404  }
405  Decomposed.Base = V;
406  return false;
407  }
408 
409  if (Op->getOpcode() == Instruction::BitCast ||
410  Op->getOpcode() == Instruction::AddrSpaceCast) {
411  V = Op->getOperand(0);
412  continue;
413  }
414 
415  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
416  if (!GEPOp) {
417  if (auto CS = ImmutableCallSite(V))
418  if (const Value *RV = CS.getReturnedArgOperand()) {
419  V = RV;
420  continue;
421  }
422 
423  // If it's not a GEP, hand it off to SimplifyInstruction to see if it
424  // can come up with something. This matches what GetUnderlyingObject does.
425  if (const Instruction *I = dyn_cast<Instruction>(V))
426  // TODO: Get a DominatorTree and AssumptionCache and use them here
427  // (these are both now available in this function, but this should be
428  // updated when GetUnderlyingObject is updated). TLI should be
429  // provided also.
430  if (const Value *Simplified =
431  SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
432  V = Simplified;
433  continue;
434  }
435 
436  Decomposed.Base = V;
437  return false;
438  }
439 
440  // Don't attempt to analyze GEPs over unsized objects.
441  if (!GEPOp->getSourceElementType()->isSized()) {
442  Decomposed.Base = V;
443  return false;
444  }
445 
446  unsigned AS = GEPOp->getPointerAddressSpace();
447  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
448  gep_type_iterator GTI = gep_type_begin(GEPOp);
449  unsigned PointerSize = DL.getPointerSizeInBits(AS);
450  // Assume all GEP operands are constants until proven otherwise.
451  bool GepHasConstantOffset = true;
452  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
453  I != E; ++I, ++GTI) {
454  const Value *Index = *I;
455  // Compute the (potentially symbolic) offset in bytes for this index.
456  if (StructType *STy = GTI.getStructTypeOrNull()) {
457  // For a struct, add the member offset.
458  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
459  if (FieldNo == 0)
460  continue;
461 
462  Decomposed.StructOffset +=
463  DL.getStructLayout(STy)->getElementOffset(FieldNo);
464  continue;
465  }
466 
467  // For an array/pointer, add the element offset, explicitly scaled.
468  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
469  if (CIdx->isZero())
470  continue;
471  Decomposed.OtherOffset +=
472  DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
473  continue;
474  }
475 
476  GepHasConstantOffset = false;
477 
478  uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
479  unsigned ZExtBits = 0, SExtBits = 0;
480 
481  // If the integer type is smaller than the pointer size, it is implicitly
482  // sign extended to pointer size.
483  unsigned Width = Index->getType()->getIntegerBitWidth();
484  if (PointerSize > Width)
485  SExtBits += PointerSize - Width;
486 
487  // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
488  APInt IndexScale(Width, 0), IndexOffset(Width, 0);
489  bool NSW = true, NUW = true;
490  Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
491  SExtBits, DL, 0, AC, DT, NSW, NUW);
492 
493  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
494  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
495  Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
496  Scale *= IndexScale.getSExtValue();
497 
498  // If we already had an occurrence of this index variable, merge this
499  // scale into it. For example, we want to handle:
500  // A[x][x] -> x*16 + x*4 -> x*20
501  // This also ensures that 'x' only appears in the index list once.
502  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
503  if (Decomposed.VarIndices[i].V == Index &&
504  Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
505  Decomposed.VarIndices[i].SExtBits == SExtBits) {
506  Scale += Decomposed.VarIndices[i].Scale;
507  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
508  break;
509  }
510  }
511 
512  // Make sure that we have a scale that makes sense for this target's
513  // pointer size.
514  Scale = adjustToPointerSize(Scale, PointerSize);
515 
516  if (Scale) {
517  VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
518  static_cast<int64_t>(Scale)};
519  Decomposed.VarIndices.push_back(Entry);
520  }
521  }
522 
523  // Take care of wrap-arounds
524  if (GepHasConstantOffset) {
525  Decomposed.StructOffset =
526  adjustToPointerSize(Decomposed.StructOffset, PointerSize);
527  Decomposed.OtherOffset =
528  adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
529  }
530 
531  // Analyze the base pointer next.
532  V = GEPOp->getOperand(0);
533  } while (--MaxLookup);
534 
535  // If the chain of expressions is too deep, just return early.
536  Decomposed.Base = V;
537  SearchLimitReached++;
538  return true;
539 }
540 
541 /// Returns whether the given pointer value points to memory that is local to
542 /// the function, with global constants being considered local to all
543 /// functions.
545  bool OrLocal) {
546  assert(Visited.empty() && "Visited must be cleared after use!");
547 
548  unsigned MaxLookup = 8;
550  Worklist.push_back(Loc.Ptr);
551  do {
552  const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
553  if (!Visited.insert(V).second) {
554  Visited.clear();
555  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
556  }
557 
558  // An alloca instruction defines local memory.
559  if (OrLocal && isa<AllocaInst>(V))
560  continue;
561 
562  // A global constant counts as local memory for our purposes.
563  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
564  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
565  // global to be marked constant in some modules and non-constant in
566  // others. GV may even be a declaration, not a definition.
567  if (!GV->isConstant()) {
568  Visited.clear();
569  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
570  }
571  continue;
572  }
573 
574  // If both select values point to local memory, then so does the select.
575  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
576  Worklist.push_back(SI->getTrueValue());
577  Worklist.push_back(SI->getFalseValue());
578  continue;
579  }
580 
581  // If all values incoming to a phi node point to local memory, then so does
582  // the phi.
583  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
584  // Don't bother inspecting phi nodes with many operands.
585  if (PN->getNumIncomingValues() > MaxLookup) {
586  Visited.clear();
587  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
588  }
589  for (Value *IncValue : PN->incoming_values())
590  Worklist.push_back(IncValue);
591  continue;
592  }
593 
594  // Otherwise be conservative.
595  Visited.clear();
596  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
597  } while (!Worklist.empty() && --MaxLookup);
598 
599  Visited.clear();
600  return Worklist.empty();
601 }
602 
603 /// Returns the behavior when calling the given call site.
605  if (CS.doesNotAccessMemory())
606  // Can't do better than this.
608 
610 
611  // If the callsite knows it only reads memory, don't return worse
612  // than that.
613  if (CS.onlyReadsMemory())
614  Min = FMRB_OnlyReadsMemory;
615  else if (CS.doesNotReadMemory())
617 
618  if (CS.onlyAccessesArgMemory())
620  else if (CS.onlyAccessesInaccessibleMemory())
624 
625  // If CS has operand bundles then aliasing attributes from the function it
626  // calls do not directly apply to the CallSite. This can be made more
627  // precise in the future.
628  if (!CS.hasOperandBundles())
629  if (const Function *F = CS.getCalledFunction())
630  Min =
632 
633  return Min;
634 }
635 
636 /// Returns the behavior when calling the given function. For use when the call
637 /// site is not known.
639  // If the function declares it doesn't access memory, we can't do better.
640  if (F->doesNotAccessMemory())
642 
644 
645  // If the function declares it only reads memory, go with that.
646  if (F->onlyReadsMemory())
647  Min = FMRB_OnlyReadsMemory;
648  else if (F->doesNotReadMemory())
650 
651  if (F->onlyAccessesArgMemory())
653  else if (F->onlyAccessesInaccessibleMemory())
657 
658  return Min;
659 }
660 
661 /// Returns true if this is a writeonly (i.e Mod only) parameter.
662 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
663  const TargetLibraryInfo &TLI) {
664  if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
665  return true;
666 
667  // We can bound the aliasing properties of memset_pattern16 just as we can
668  // for memcpy/memset. This is particularly important because the
669  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
670  // whenever possible.
671  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
672  // attributes.
673  LibFunc F;
674  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
675  F == LibFunc_memset_pattern16 && TLI.has(F))
676  if (ArgIdx == 0)
677  return true;
678 
679  // TODO: memset_pattern4, memset_pattern8
680  // TODO: _chk variants
681  // TODO: strcmp, strcpy
682 
683  return false;
684 }
685 
687  unsigned ArgIdx) {
688  // Checking for known builtin intrinsics and target library functions.
689  if (isWriteOnlyParam(CS, ArgIdx, TLI))
690  return ModRefInfo::Mod;
691 
692  if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
693  return ModRefInfo::Ref;
694 
695  if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
696  return ModRefInfo::NoModRef;
697 
698  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
699 }
700 
703  return II && II->getIntrinsicID() == IID;
704 }
705 
706 #ifndef NDEBUG
707 static const Function *getParent(const Value *V) {
708  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
709  if (!inst->getParent())
710  return nullptr;
711  return inst->getParent()->getParent();
712  }
713 
714  if (const Argument *arg = dyn_cast<Argument>(V))
715  return arg->getParent();
716 
717  return nullptr;
718 }
719 
720 static bool notDifferentParent(const Value *O1, const Value *O2) {
721 
722  const Function *F1 = getParent(O1);
723  const Function *F2 = getParent(O2);
724 
725  return !F1 || !F2 || F1 == F2;
726 }
727 #endif
728 
730  const MemoryLocation &LocB) {
731  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
732  "BasicAliasAnalysis doesn't support interprocedural queries.");
733 
734  // If we have a directly cached entry for these locations, we have recursed
735  // through this once, so just return the cached results. Notably, when this
736  // happens, we don't clear the cache.
737  auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
738  if (CacheIt != AliasCache.end())
739  return CacheIt->second;
740 
741  AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
742  LocB.Size, LocB.AATags);
743  // AliasCache rarely has more than 1 or 2 elements, always use
744  // shrink_and_clear so it quickly returns to the inline capacity of the
745  // SmallDenseMap if it ever grows larger.
746  // FIXME: This should really be shrink_to_inline_capacity_and_clear().
747  AliasCache.shrink_and_clear();
748  VisitedPhiBBs.clear();
749  return Alias;
750 }
751 
752 /// Checks to see if the specified callsite can clobber the specified memory
753 /// object.
754 ///
755 /// Since we only look at local properties of this function, we really can't
756 /// say much about this query. We do, however, use simple "address taken"
757 /// analysis on local objects.
759  const MemoryLocation &Loc) {
761  "AliasAnalysis query involving multiple functions!");
762 
763  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
764 
765  // If this is a tail call and Loc.Ptr points to a stack location, we know that
766  // the tail call cannot access or modify the local stack.
767  // We cannot exclude byval arguments here; these belong to the caller of
768  // the current function not to the current function, and a tail callee
769  // may reference them.
770  if (isa<AllocaInst>(Object))
771  if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
772  if (CI->isTailCall())
773  return ModRefInfo::NoModRef;
774 
775  // If the pointer is to a locally allocated object that does not escape,
776  // then the call can not mod/ref the pointer unless the call takes the pointer
777  // as an argument, and itself doesn't capture it.
778  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
779  isNonEscapingLocalObject(Object)) {
780 
781  // Optimistically assume that call doesn't touch Object and check this
782  // assumption in the following loop.
784 
785  unsigned OperandNo = 0;
786  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
787  CI != CE; ++CI, ++OperandNo) {
788  // Only look at the no-capture or byval pointer arguments. If this
789  // pointer were passed to arguments that were neither of these, then it
790  // couldn't be no-capture.
791  if (!(*CI)->getType()->isPointerTy() ||
792  (!CS.doesNotCapture(OperandNo) &&
793  OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
794  continue;
795 
796  // Call doesn't access memory through this operand, so we don't care
797  // if it aliases with Object.
798  if (CS.doesNotAccessMemory(OperandNo))
799  continue;
800 
801  // If this is a no-capture pointer argument, see if we can tell that it
802  // is impossible to alias the pointer we're checking.
803  AliasResult AR =
805 
806  // Operand doesnt alias 'Object', continue looking for other aliases
807  if (AR == NoAlias)
808  continue;
809  // Operand aliases 'Object', but call doesn't modify it. Strengthen
810  // initial assumption and keep looking in case if there are more aliases.
811  if (CS.onlyReadsMemory(OperandNo)) {
812  Result = setRef(Result);
813  continue;
814  }
815  // Operand aliases 'Object' but call only writes into it.
816  if (CS.doesNotReadMemory(OperandNo)) {
817  Result = setMod(Result);
818  continue;
819  }
820  // This operand aliases 'Object' and call reads and writes into it.
821  Result = ModRefInfo::ModRef;
822  break;
823  }
824 
825  // Early return if we improved mod ref information
826  if (!isModAndRefSet(Result))
827  return Result;
828  }
829 
830  // If the CallSite is to malloc or calloc, we can assume that it doesn't
831  // modify any IR visible value. This is only valid because we assume these
832  // routines do not read values visible in the IR. TODO: Consider special
833  // casing realloc and strdup routines which access only their arguments as
834  // well. Or alternatively, replace all of this with inaccessiblememonly once
835  // that's implemented fully.
836  auto *Inst = CS.getInstruction();
837  if (isMallocOrCallocLikeFn(Inst, &TLI)) {
838  // Be conservative if the accessed pointer may alias the allocation -
839  // fallback to the generic handling below.
840  if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
841  return ModRefInfo::NoModRef;
842  }
843 
844  // The semantics of memcpy intrinsics forbid overlap between their respective
845  // operands, i.e., source and destination of any given memcpy must no-alias.
846  // If Loc must-aliases either one of these two locations, then it necessarily
847  // no-aliases the other.
848  if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
849  AliasResult SrcAA, DestAA;
850 
852  Loc)) == MustAlias)
853  // Loc is exactly the memcpy source thus disjoint from memcpy dest.
854  return ModRefInfo::Ref;
855  if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
856  Loc)) == MustAlias)
857  // The converse case.
858  return ModRefInfo::Mod;
859 
860  // It's also possible for Loc to alias both src and dest, or neither.
862  if (SrcAA != NoAlias)
863  rv = setRef(rv);
864  if (DestAA != NoAlias)
865  rv = setMod(rv);
866  return rv;
867  }
868 
869  // While the assume intrinsic is marked as arbitrarily writing so that
870  // proper control dependencies will be maintained, it never aliases any
871  // particular memory location.
872  if (isIntrinsicCall(CS, Intrinsic::assume))
873  return ModRefInfo::NoModRef;
874 
875  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
876  // that proper control dependencies are maintained but they never mods any
877  // particular memory location.
878  //
879  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
880  // heap state at the point the guard is issued needs to be consistent in case
881  // the guard invokes the "deopt" continuation.
882  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
883  return ModRefInfo::Ref;
884 
885  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
886  // writing so that proper control dependencies are maintained but they never
887  // mod any particular memory location visible to the IR.
888  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
889  // intrinsic is now modeled as reading memory. This prevents hoisting the
890  // invariant.start intrinsic over stores. Consider:
891  // *ptr = 40;
892  // *ptr = 50;
893  // invariant_start(ptr)
894  // int val = *ptr;
895  // print(val);
896  //
897  // This cannot be transformed to:
898  //
899  // *ptr = 40;
900  // invariant_start(ptr)
901  // *ptr = 50;
902  // int val = *ptr;
903  // print(val);
904  //
905  // The transformation will cause the second store to be ignored (based on
906  // rules of invariant.start) and print 40, while the first program always
907  // prints 50.
908  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
909  return ModRefInfo::Ref;
910 
911  // The AAResultBase base class has some smarts, lets use them.
912  return AAResultBase::getModRefInfo(CS, Loc);
913 }
914 
916  ImmutableCallSite CS2) {
917  // While the assume intrinsic is marked as arbitrarily writing so that
918  // proper control dependencies will be maintained, it never aliases any
919  // particular memory location.
920  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
921  isIntrinsicCall(CS2, Intrinsic::assume))
922  return ModRefInfo::NoModRef;
923 
924  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
925  // that proper control dependencies are maintained but they never mod any
926  // particular memory location.
927  //
928  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
929  // heap state at the point the guard is issued needs to be consistent in case
930  // the guard invokes the "deopt" continuation.
931 
932  // NB! This function is *not* commutative, so we specical case two
933  // possibilities for guard intrinsics.
934 
935  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
939 
940  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
944 
945  // The AAResultBase base class has some smarts, lets use them.
946  return AAResultBase::getModRefInfo(CS1, CS2);
947 }
948 
949 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
950 /// both having the exact same pointer operand.
952  uint64_t V1Size,
953  const GEPOperator *GEP2,
954  uint64_t V2Size,
955  const DataLayout &DL) {
958  GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
959  "Expected GEPs with the same pointer operand");
960 
961  // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
962  // such that the struct field accesses provably cannot alias.
963  // We also need at least two indices (the pointer, and the struct field).
964  if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
965  GEP1->getNumIndices() < 2)
966  return MayAlias;
967 
968  // If we don't know the size of the accesses through both GEPs, we can't
969  // determine whether the struct fields accessed can't alias.
970  if (V1Size == MemoryLocation::UnknownSize ||
971  V2Size == MemoryLocation::UnknownSize)
972  return MayAlias;
973 
974  ConstantInt *C1 =
975  dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
976  ConstantInt *C2 =
977  dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
978 
979  // If the last (struct) indices are constants and are equal, the other indices
980  // might be also be dynamically equal, so the GEPs can alias.
981  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
982  return MayAlias;
983 
984  // Find the last-indexed type of the GEP, i.e., the type you'd get if
985  // you stripped the last index.
986  // On the way, look at each indexed type. If there's something other
987  // than an array, different indices can lead to different final types.
988  SmallVector<Value *, 8> IntermediateIndices;
989 
990  // Insert the first index; we don't need to check the type indexed
991  // through it as it only drops the pointer indirection.
992  assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
993  IntermediateIndices.push_back(GEP1->getOperand(1));
994 
995  // Insert all the remaining indices but the last one.
996  // Also, check that they all index through arrays.
997  for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
998  if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
999  GEP1->getSourceElementType(), IntermediateIndices)))
1000  return MayAlias;
1001  IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1002  }
1003 
1005  GEP1->getSourceElementType(), IntermediateIndices);
1006  StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1007 
1008  if (isa<SequentialType>(Ty)) {
1009  // We know that:
1010  // - both GEPs begin indexing from the exact same pointer;
1011  // - the last indices in both GEPs are constants, indexing into a sequential
1012  // type (array or pointer);
1013  // - both GEPs only index through arrays prior to that.
1014  //
1015  // Because array indices greater than the number of elements are valid in
1016  // GEPs, unless we know the intermediate indices are identical between
1017  // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1018  // partially overlap. We also need to check that the loaded size matches
1019  // the element size, otherwise we could still have overlap.
1020  const uint64_t ElementSize =
1021  DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1022  if (V1Size != ElementSize || V2Size != ElementSize)
1023  return MayAlias;
1024 
1025  for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1026  if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1027  return MayAlias;
1028 
1029  // Now we know that the array/pointer that GEP1 indexes into and that
1030  // that GEP2 indexes into must either precisely overlap or be disjoint.
1031  // Because they cannot partially overlap and because fields in an array
1032  // cannot overlap, if we can prove the final indices are different between
1033  // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1034 
1035  // If the last indices are constants, we've already checked they don't
1036  // equal each other so we can exit early.
1037  if (C1 && C2)
1038  return NoAlias;
1039  {
1040  Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1041  Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1042  if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1043  // If one of the indices is a PHI node, be safe and only use
1044  // computeKnownBits so we don't make any assumptions about the
1045  // relationships between the two indices. This is important if we're
1046  // asking about values from different loop iterations. See PR32314.
1047  // TODO: We may be able to change the check so we only do this when
1048  // we definitely looked through a PHINode.
1049  if (GEP1LastIdx != GEP2LastIdx &&
1050  GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1051  KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1052  KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1053  if (Known1.Zero.intersects(Known2.One) ||
1054  Known1.One.intersects(Known2.Zero))
1055  return NoAlias;
1056  }
1057  } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1058  return NoAlias;
1059  }
1060  return MayAlias;
1061  } else if (!LastIndexedStruct || !C1 || !C2) {
1062  return MayAlias;
1063  }
1064 
1065  // We know that:
1066  // - both GEPs begin indexing from the exact same pointer;
1067  // - the last indices in both GEPs are constants, indexing into a struct;
1068  // - said indices are different, hence, the pointed-to fields are different;
1069  // - both GEPs only index through arrays prior to that.
1070  //
1071  // This lets us determine that the struct that GEP1 indexes into and the
1072  // struct that GEP2 indexes into must either precisely overlap or be
1073  // completely disjoint. Because they cannot partially overlap, indexing into
1074  // different non-overlapping fields of the struct will never alias.
1075 
1076  // Therefore, the only remaining thing needed to show that both GEPs can't
1077  // alias is that the fields are not overlapping.
1078  const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1079  const uint64_t StructSize = SL->getSizeInBytes();
1080  const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1081  const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1082 
1083  auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1084  uint64_t V2Off, uint64_t V2Size) {
1085  return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1086  ((V2Off + V2Size <= StructSize) ||
1087  (V2Off + V2Size - StructSize <= V1Off));
1088  };
1089 
1090  if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1091  EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1092  return NoAlias;
1093 
1094  return MayAlias;
1095 }
1096 
1097 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1098 // beginning of the object the GEP points would have a negative offset with
1099 // repsect to the alloca, that means the GEP can not alias pointer (b).
1100 // Note that the pointer based on the alloca may not be a GEP. For
1101 // example, it may be the alloca itself.
1102 // The same applies if (b) is based on a GlobalVariable. Note that just being
1103 // based on isIdentifiedObject() is not enough - we need an identified object
1104 // that does not permit access to negative offsets. For example, a negative
1105 // offset from a noalias argument or call can be inbounds w.r.t the actual
1106 // underlying object.
1107 //
1108 // For example, consider:
1109 //
1110 // struct { int f0, int f1, ...} foo;
1111 // foo alloca;
1112 // foo* random = bar(alloca);
1113 // int *f0 = &alloca.f0
1114 // int *f1 = &random->f1;
1115 //
1116 // Which is lowered, approximately, to:
1117 //
1118 // %alloca = alloca %struct.foo
1119 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1120 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1121 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1122 //
1123 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1124 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1125 // point into the same object. But since %f0 points to the beginning of %alloca,
1126 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1127 // than (%alloca - 1), and so is not inbounds, a contradiction.
1128 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1129  const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1130  uint64_t ObjectAccessSize) {
1131  // If the object access size is unknown, or the GEP isn't inbounds, bail.
1132  if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1133  return false;
1134 
1135  // We need the object to be an alloca or a globalvariable, and want to know
1136  // the offset of the pointer from the object precisely, so no variable
1137  // indices are allowed.
1138  if (!(isa<AllocaInst>(DecompObject.Base) ||
1139  isa<GlobalVariable>(DecompObject.Base)) ||
1140  !DecompObject.VarIndices.empty())
1141  return false;
1142 
1143  int64_t ObjectBaseOffset = DecompObject.StructOffset +
1144  DecompObject.OtherOffset;
1145 
1146  // If the GEP has no variable indices, we know the precise offset
1147  // from the base, then use it. If the GEP has variable indices, we're in
1148  // a bit more trouble: we can't count on the constant offsets that come
1149  // from non-struct sources, since these can be "rewound" by a negative
1150  // variable offset. So use only offsets that came from structs.
1151  int64_t GEPBaseOffset = DecompGEP.StructOffset;
1152  if (DecompGEP.VarIndices.empty())
1153  GEPBaseOffset += DecompGEP.OtherOffset;
1154 
1155  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1156 }
1157 
1158 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1159 /// another pointer.
1160 ///
1161 /// We know that V1 is a GEP, but we don't know anything about V2.
1162 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1163 /// V2.
1164 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1165  const AAMDNodes &V1AAInfo, const Value *V2,
1166  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1167  const Value *UnderlyingV1,
1168  const Value *UnderlyingV2) {
1169  DecomposedGEP DecompGEP1, DecompGEP2;
1170  bool GEP1MaxLookupReached =
1171  DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1172  bool GEP2MaxLookupReached =
1173  DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1174 
1175  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1176  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1177 
1178  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1179  "DecomposeGEPExpression returned a result different from "
1180  "GetUnderlyingObject");
1181 
1182  // If the GEP's offset relative to its base is such that the base would
1183  // fall below the start of the object underlying V2, then the GEP and V2
1184  // cannot alias.
1185  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1186  isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1187  return NoAlias;
1188  // If we have two gep instructions with must-alias or not-alias'ing base
1189  // pointers, figure out if the indexes to the GEP tell us anything about the
1190  // derived pointer.
1191  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1192  // Check for the GEP base being at a negative offset, this time in the other
1193  // direction.
1194  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1195  isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1196  return NoAlias;
1197  // Do the base pointers alias?
1198  AliasResult BaseAlias =
1199  aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1200  UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1201 
1202  // Check for geps of non-aliasing underlying pointers where the offsets are
1203  // identical.
1204  if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1205  // Do the base pointers alias assuming type and size.
1206  AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1207  UnderlyingV2, V2Size, V2AAInfo);
1208  if (PreciseBaseAlias == NoAlias) {
1209  // See if the computed offset from the common pointer tells us about the
1210  // relation of the resulting pointer.
1211  // If the max search depth is reached the result is undefined
1212  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1213  return MayAlias;
1214 
1215  // Same offsets.
1216  if (GEP1BaseOffset == GEP2BaseOffset &&
1217  DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1218  return NoAlias;
1219  }
1220  }
1221 
1222  // If we get a No or May, then return it immediately, no amount of analysis
1223  // will improve this situation.
1224  if (BaseAlias != MustAlias) {
1225  assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1226  return BaseAlias;
1227  }
1228 
1229  // Otherwise, we have a MustAlias. Since the base pointers alias each other
1230  // exactly, see if the computed offset from the common pointer tells us
1231  // about the relation of the resulting pointer.
1232  // If we know the two GEPs are based off of the exact same pointer (and not
1233  // just the same underlying object), see if that tells us anything about
1234  // the resulting pointers.
1236  GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
1237  GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1238  AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1239  // If we couldn't find anything interesting, don't abandon just yet.
1240  if (R != MayAlias)
1241  return R;
1242  }
1243 
1244  // If the max search depth is reached, the result is undefined
1245  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1246  return MayAlias;
1247 
1248  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1249  // symbolic difference.
1250  GEP1BaseOffset -= GEP2BaseOffset;
1251  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1252 
1253  } else {
1254  // Check to see if these two pointers are related by the getelementptr
1255  // instruction. If one pointer is a GEP with a non-zero index of the other
1256  // pointer, we know they cannot alias.
1257 
1258  // If both accesses are unknown size, we can't do anything useful here.
1259  if (V1Size == MemoryLocation::UnknownSize &&
1260  V2Size == MemoryLocation::UnknownSize)
1261  return MayAlias;
1262 
1263  AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1265  V2AAInfo, nullptr, UnderlyingV2);
1266  if (R != MustAlias) {
1267  // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1268  // If V2 is known not to alias GEP base pointer, then the two values
1269  // cannot alias per GEP semantics: "Any memory access must be done through
1270  // a pointer value associated with an address range of the memory access,
1271  // otherwise the behavior is undefined.".
1272  assert(R == NoAlias || R == MayAlias);
1273  return R;
1274  }
1275 
1276  // If the max search depth is reached the result is undefined
1277  if (GEP1MaxLookupReached)
1278  return MayAlias;
1279  }
1280 
1281  // In the two GEP Case, if there is no difference in the offsets of the
1282  // computed pointers, the resultant pointers are a must alias. This
1283  // happens when we have two lexically identical GEP's (for example).
1284  //
1285  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1286  // must aliases the GEP, the end result is a must alias also.
1287  if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1288  return MustAlias;
1289 
1290  // If there is a constant difference between the pointers, but the difference
1291  // is less than the size of the associated memory object, then we know
1292  // that the objects are partially overlapping. If the difference is
1293  // greater, we know they do not overlap.
1294  if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1295  if (GEP1BaseOffset >= 0) {
1296  if (V2Size != MemoryLocation::UnknownSize) {
1297  if ((uint64_t)GEP1BaseOffset < V2Size)
1298  return PartialAlias;
1299  return NoAlias;
1300  }
1301  } else {
1302  // We have the situation where:
1303  // + +
1304  // | BaseOffset |
1305  // ---------------->|
1306  // |-->V1Size |-------> V2Size
1307  // GEP1 V2
1308  // We need to know that V2Size is not unknown, otherwise we might have
1309  // stripped a gep with negative index ('gep <ptr>, -1, ...).
1310  if (V1Size != MemoryLocation::UnknownSize &&
1311  V2Size != MemoryLocation::UnknownSize) {
1312  if (-(uint64_t)GEP1BaseOffset < V1Size)
1313  return PartialAlias;
1314  return NoAlias;
1315  }
1316  }
1317  }
1318 
1319  if (!DecompGEP1.VarIndices.empty()) {
1320  uint64_t Modulo = 0;
1321  bool AllPositive = true;
1322  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1323 
1324  // Try to distinguish something like &A[i][1] against &A[42][0].
1325  // Grab the least significant bit set in any of the scales. We
1326  // don't need std::abs here (even if the scale's negative) as we'll
1327  // be ^'ing Modulo with itself later.
1328  Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1329 
1330  if (AllPositive) {
1331  // If the Value could change between cycles, then any reasoning about
1332  // the Value this cycle may not hold in the next cycle. We'll just
1333  // give up if we can't determine conditions that hold for every cycle:
1334  const Value *V = DecompGEP1.VarIndices[i].V;
1335 
1336  KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1337  bool SignKnownZero = Known.isNonNegative();
1338  bool SignKnownOne = Known.isNegative();
1339 
1340  // Zero-extension widens the variable, and so forces the sign
1341  // bit to zero.
1342  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1343  SignKnownZero |= IsZExt;
1344  SignKnownOne &= !IsZExt;
1345 
1346  // If the variable begins with a zero then we know it's
1347  // positive, regardless of whether the value is signed or
1348  // unsigned.
1349  int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1350  AllPositive =
1351  (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1352  }
1353  }
1354 
1355  Modulo = Modulo ^ (Modulo & (Modulo - 1));
1356 
1357  // We can compute the difference between the two addresses
1358  // mod Modulo. Check whether that difference guarantees that the
1359  // two locations do not alias.
1360  uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1361  if (V1Size != MemoryLocation::UnknownSize &&
1362  V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1363  V1Size <= Modulo - ModOffset)
1364  return NoAlias;
1365 
1366  // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1367  // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1368  // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1369  if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1370  return NoAlias;
1371 
1372  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1373  GEP1BaseOffset, &AC, DT))
1374  return NoAlias;
1375  }
1376 
1377  // Statically, we can see that the base objects are the same, but the
1378  // pointers have dynamic offsets which we can't resolve. And none of our
1379  // little tricks above worked.
1380  return MayAlias;
1381 }
1382 
1384  // If the results agree, take it.
1385  if (A == B)
1386  return A;
1387  // A mix of PartialAlias and MustAlias is PartialAlias.
1388  if ((A == PartialAlias && B == MustAlias) ||
1389  (B == PartialAlias && A == MustAlias))
1390  return PartialAlias;
1391  // Otherwise, we don't know anything.
1392  return MayAlias;
1393 }
1394 
1395 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1396 /// against another.
1397 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1398  const AAMDNodes &SIAAInfo,
1399  const Value *V2, uint64_t V2Size,
1400  const AAMDNodes &V2AAInfo,
1401  const Value *UnderV2) {
1402  // If the values are Selects with the same condition, we can do a more precise
1403  // check: just check for aliases between the values on corresponding arms.
1404  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1405  if (SI->getCondition() == SI2->getCondition()) {
1406  AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1407  SI2->getTrueValue(), V2Size, V2AAInfo);
1408  if (Alias == MayAlias)
1409  return MayAlias;
1410  AliasResult ThisAlias =
1411  aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1412  SI2->getFalseValue(), V2Size, V2AAInfo);
1413  return MergeAliasResults(ThisAlias, Alias);
1414  }
1415 
1416  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1417  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1418  AliasResult Alias =
1419  aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1420  SISize, SIAAInfo, UnderV2);
1421  if (Alias == MayAlias)
1422  return MayAlias;
1423 
1424  AliasResult ThisAlias =
1425  aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1426  UnderV2);
1427  return MergeAliasResults(ThisAlias, Alias);
1428 }
1429 
1430 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1431 /// another.
1432 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1433  const AAMDNodes &PNAAInfo, const Value *V2,
1434  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1435  const Value *UnderV2) {
1436  // Track phi nodes we have visited. We use this information when we determine
1437  // value equivalence.
1438  VisitedPhiBBs.insert(PN->getParent());
1439 
1440  // If the values are PHIs in the same block, we can do a more precise
1441  // as well as efficient check: just check for aliases between the values
1442  // on corresponding edges.
1443  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1444  if (PN2->getParent() == PN->getParent()) {
1445  LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1446  MemoryLocation(V2, V2Size, V2AAInfo));
1447  if (PN > V2)
1448  std::swap(Locs.first, Locs.second);
1449  // Analyse the PHIs' inputs under the assumption that the PHIs are
1450  // NoAlias.
1451  // If the PHIs are May/MustAlias there must be (recursively) an input
1452  // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1453  // there must be an operation on the PHIs within the PHIs' value cycle
1454  // that causes a MayAlias.
1455  // Pretend the phis do not alias.
1456  AliasResult Alias = NoAlias;
1457  assert(AliasCache.count(Locs) &&
1458  "There must exist an entry for the phi node");
1459  AliasResult OrigAliasResult = AliasCache[Locs];
1460  AliasCache[Locs] = NoAlias;
1461 
1462  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1463  AliasResult ThisAlias =
1464  aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1465  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1466  V2Size, V2AAInfo);
1467  Alias = MergeAliasResults(ThisAlias, Alias);
1468  if (Alias == MayAlias)
1469  break;
1470  }
1471 
1472  // Reset if speculation failed.
1473  if (Alias != NoAlias)
1474  AliasCache[Locs] = OrigAliasResult;
1475 
1476  return Alias;
1477  }
1478 
1479  SmallPtrSet<Value *, 4> UniqueSrc;
1480  SmallVector<Value *, 4> V1Srcs;
1481  bool isRecursive = false;
1482  for (Value *PV1 : PN->incoming_values()) {
1483  if (isa<PHINode>(PV1))
1484  // If any of the source itself is a PHI, return MayAlias conservatively
1485  // to avoid compile time explosion. The worst possible case is if both
1486  // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1487  // and 'n' are the number of PHI sources.
1488  return MayAlias;
1489 
1491  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1492  // Check whether the incoming value is a GEP that advances the pointer
1493  // result of this PHI node (e.g. in a loop). If this is the case, we
1494  // would recurse and always get a MayAlias. Handle this case specially
1495  // below.
1496  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1497  isa<ConstantInt>(PV1GEP->idx_begin())) {
1498  isRecursive = true;
1499  continue;
1500  }
1501  }
1502 
1503  if (UniqueSrc.insert(PV1).second)
1504  V1Srcs.push_back(PV1);
1505  }
1506 
1507  // If this PHI node is recursive, set the size of the accessed memory to
1508  // unknown to represent all the possible values the GEP could advance the
1509  // pointer to.
1510  if (isRecursive)
1511  PNSize = MemoryLocation::UnknownSize;
1512 
1513  AliasResult Alias =
1514  aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1515  PNSize, PNAAInfo, UnderV2);
1516 
1517  // Early exit if the check of the first PHI source against V2 is MayAlias.
1518  // Other results are not possible.
1519  if (Alias == MayAlias)
1520  return MayAlias;
1521 
1522  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1523  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1524  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1525  Value *V = V1Srcs[i];
1526 
1527  AliasResult ThisAlias =
1528  aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1529  Alias = MergeAliasResults(ThisAlias, Alias);
1530  if (Alias == MayAlias)
1531  break;
1532  }
1533 
1534  return Alias;
1535 }
1536 
1537 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1538 /// array references.
1539 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1540  AAMDNodes V1AAInfo, const Value *V2,
1541  uint64_t V2Size, AAMDNodes V2AAInfo,
1542  const Value *O1, const Value *O2) {
1543  // If either of the memory references is empty, it doesn't matter what the
1544  // pointer values are.
1545  if (V1Size == 0 || V2Size == 0)
1546  return NoAlias;
1547 
1548  // Strip off any casts if they exist.
1549  V1 = V1->stripPointerCastsAndBarriers();
1550  V2 = V2->stripPointerCastsAndBarriers();
1551 
1552  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1553  // value for undef that aliases nothing in the program.
1554  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1555  return NoAlias;
1556 
1557  // Are we checking for alias of the same value?
1558  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1559  // different iterations. We must therefore make sure that this is not the
1560  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1561  // happen by looking at the visited phi nodes and making sure they cannot
1562  // reach the value.
1563  if (isValueEqualInPotentialCycles(V1, V2))
1564  return MustAlias;
1565 
1566  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1567  return NoAlias; // Scalars cannot alias each other
1568 
1569  // Figure out what objects these things are pointing to if we can.
1570  if (O1 == nullptr)
1571  O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1572 
1573  if (O2 == nullptr)
1574  O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1575 
1576  // Null values in the default address space don't point to any object, so they
1577  // don't alias any other pointer.
1578  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1579  if (CPN->getType()->getAddressSpace() == 0)
1580  return NoAlias;
1581  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1582  if (CPN->getType()->getAddressSpace() == 0)
1583  return NoAlias;
1584 
1585  if (O1 != O2) {
1586  // If V1/V2 point to two different objects, we know that we have no alias.
1587  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1588  return NoAlias;
1589 
1590  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1591  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1592  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1593  return NoAlias;
1594 
1595  // Function arguments can't alias with things that are known to be
1596  // unambigously identified at the function level.
1597  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1598  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1599  return NoAlias;
1600 
1601  // If one pointer is the result of a call/invoke or load and the other is a
1602  // non-escaping local object within the same function, then we know the
1603  // object couldn't escape to a point where the call could return it.
1604  //
1605  // Note that if the pointers are in different functions, there are a
1606  // variety of complications. A call with a nocapture argument may still
1607  // temporary store the nocapture argument's value in a temporary memory
1608  // location if that memory location doesn't escape. Or it may pass a
1609  // nocapture value to other functions as long as they don't capture it.
1611  return NoAlias;
1613  return NoAlias;
1614  }
1615 
1616  // If the size of one access is larger than the entire object on the other
1617  // side, then we know such behavior is undefined and can assume no alias.
1618  if ((V1Size != MemoryLocation::UnknownSize &&
1619  isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1620  (V2Size != MemoryLocation::UnknownSize &&
1621  isObjectSmallerThan(O1, V2Size, DL, TLI)))
1622  return NoAlias;
1623 
1624  // Check the cache before climbing up use-def chains. This also terminates
1625  // otherwise infinitely recursive queries.
1626  LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1627  MemoryLocation(V2, V2Size, V2AAInfo));
1628  if (V1 > V2)
1629  std::swap(Locs.first, Locs.second);
1630  std::pair<AliasCacheTy::iterator, bool> Pair =
1631  AliasCache.insert(std::make_pair(Locs, MayAlias));
1632  if (!Pair.second)
1633  return Pair.first->second;
1634 
1635  // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1636  // GEP can't simplify, we don't even look at the PHI cases.
1637  if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1638  std::swap(V1, V2);
1639  std::swap(V1Size, V2Size);
1640  std::swap(O1, O2);
1641  std::swap(V1AAInfo, V2AAInfo);
1642  }
1643  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1644  AliasResult Result =
1645  aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1646  if (Result != MayAlias)
1647  return AliasCache[Locs] = Result;
1648  }
1649 
1650  if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1651  std::swap(V1, V2);
1652  std::swap(O1, O2);
1653  std::swap(V1Size, V2Size);
1654  std::swap(V1AAInfo, V2AAInfo);
1655  }
1656  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1657  AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1658  V2, V2Size, V2AAInfo, O2);
1659  if (Result != MayAlias)
1660  return AliasCache[Locs] = Result;
1661  }
1662 
1663  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1664  std::swap(V1, V2);
1665  std::swap(O1, O2);
1666  std::swap(V1Size, V2Size);
1667  std::swap(V1AAInfo, V2AAInfo);
1668  }
1669  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1670  AliasResult Result =
1671  aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1672  if (Result != MayAlias)
1673  return AliasCache[Locs] = Result;
1674  }
1675 
1676  // If both pointers are pointing into the same object and one of them
1677  // accesses the entire object, then the accesses must overlap in some way.
1678  if (O1 == O2)
1679  if (V1Size != MemoryLocation::UnknownSize &&
1680  V2Size != MemoryLocation::UnknownSize &&
1681  (isObjectSize(O1, V1Size, DL, TLI) ||
1682  isObjectSize(O2, V2Size, DL, TLI)))
1683  return AliasCache[Locs] = PartialAlias;
1684 
1685  // Recurse back into the best AA results we have, potentially with refined
1686  // memory locations. We have already ensured that BasicAA has a MayAlias
1687  // cache result for these, so any recursion back into BasicAA won't loop.
1688  AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1689  return AliasCache[Locs] = Result;
1690 }
1691 
1692 /// Check whether two Values can be considered equivalent.
1693 ///
1694 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1695 /// they can not be part of a cycle in the value graph by looking at all
1696 /// visited phi nodes an making sure that the phis cannot reach the value. We
1697 /// have to do this because we are looking through phi nodes (That is we say
1698 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1699 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1700  const Value *V2) {
1701  if (V != V2)
1702  return false;
1703 
1704  const Instruction *Inst = dyn_cast<Instruction>(V);
1705  if (!Inst)
1706  return true;
1707 
1708  if (VisitedPhiBBs.empty())
1709  return true;
1710 
1711  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1712  return false;
1713 
1714  // Make sure that the visited phis cannot reach the Value. This ensures that
1715  // the Values cannot come from different iterations of a potential cycle the
1716  // phi nodes could be involved in.
1717  for (auto *P : VisitedPhiBBs)
1718  if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1719  return false;
1720 
1721  return true;
1722 }
1723 
1724 /// Computes the symbolic difference between two de-composed GEPs.
1725 ///
1726 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1727 /// instructions GEP1 and GEP2 which have common base pointers.
1728 void BasicAAResult::GetIndexDifference(
1730  const SmallVectorImpl<VariableGEPIndex> &Src) {
1731  if (Src.empty())
1732  return;
1733 
1734  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1735  const Value *V = Src[i].V;
1736  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1737  int64_t Scale = Src[i].Scale;
1738 
1739  // Find V in Dest. This is N^2, but pointer indices almost never have more
1740  // than a few variable indexes.
1741  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1742  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1743  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1744  continue;
1745 
1746  // If we found it, subtract off Scale V's from the entry in Dest. If it
1747  // goes to zero, remove the entry.
1748  if (Dest[j].Scale != Scale)
1749  Dest[j].Scale -= Scale;
1750  else
1751  Dest.erase(Dest.begin() + j);
1752  Scale = 0;
1753  break;
1754  }
1755 
1756  // If we didn't consume this entry, add it to the end of the Dest list.
1757  if (Scale) {
1758  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1759  Dest.push_back(Entry);
1760  }
1761  }
1762 }
1763 
1764 bool BasicAAResult::constantOffsetHeuristic(
1765  const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1766  uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1767  DominatorTree *DT) {
1768  if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1769  V2Size == MemoryLocation::UnknownSize)
1770  return false;
1771 
1772  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1773 
1774  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1775  Var0.Scale != -Var1.Scale)
1776  return false;
1777 
1778  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1779 
1780  // We'll strip off the Extensions of Var0 and Var1 and do another round
1781  // of GetLinearExpression decomposition. In the example above, if Var0
1782  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1783 
1784  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1785  V1Offset(Width, 0);
1786  bool NSW = true, NUW = true;
1787  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1788  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1789  V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1790  NSW = true;
1791  NUW = true;
1792  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1793  V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1794 
1795  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1796  V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1797  return false;
1798 
1799  // We have a hit - Var0 and Var1 only differ by a constant offset!
1800 
1801  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1802  // Var1 is possible to calculate, but we're just interested in the absolute
1803  // minimum difference between the two. The minimum distance may occur due to
1804  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1805  // the minimum distance between %i and %i + 5 is 3.
1806  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1807  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1808  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1809 
1810  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1811  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1812  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1813  // V2Size can fit in the MinDiffBytes gap.
1814  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1815  V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1816 }
1817 
1818 //===----------------------------------------------------------------------===//
1819 // BasicAliasAnalysis Pass
1820 //===----------------------------------------------------------------------===//
1821 
1822 AnalysisKey BasicAA::Key;
1823 
1825  return BasicAAResult(F.getParent()->getDataLayout(),
1829  AM.getCachedResult<LoopAnalysis>(F));
1830 }
1831 
1834 }
1835 
1836 char BasicAAWrapperPass::ID = 0;
1837 
1838 void BasicAAWrapperPass::anchor() {}
1839 
1841  "Basic Alias Analysis (stateless AA impl)", true, true)
1846  "Basic Alias Analysis (stateless AA impl)", true, true)
1847 
1849  return new BasicAAWrapperPass();
1850 }
1851 
1853  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1854  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1855  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1856  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1857 
1858  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1859  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1860  LIWP ? &LIWP->getLoopInfo() : nullptr));
1861 
1862  return false;
1863 }
1864 
1866  AU.setPreservesAll();
1870 }
1871 
1873  return BasicAAResult(
1874  F.getParent()->getDataLayout(),
1876  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1877 }
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:81
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:403
The two locations precisely alias each other.
Definition: AliasAnalysis.h:91
static const unsigned MaxLookupSearchDepth
Basic Alias Analysis(stateless AA impl)"
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:577
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:841
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:471
const Value * stripPointerCastsAndBarriers() const
Strip off pointer casts, all-zero GEPs, aliases and barriers.
Definition: Value.cpp:570
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:89
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
const Value * getTrueValue() const
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:468
A cache of .assume calls within a function.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables...
This is the AA result object for the basic, local, and stateless alias analysis.
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:818
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:344
STATISTIC(NumFunctions, "Total number of functions")
The two locations do not alias at all.
Definition: AliasAnalysis.h:85
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:238
F(f)
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
The access neither references nor modifies the value stored in memory.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
op_iterator op_begin()
Definition: User.h:214
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:87
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
This indicates that the function could not be classified into one of the behaviors above...
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
Definition: CallSite.h:454
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
AnalysisUsage & addRequired()
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:491
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
Class to represent struct types.
Definition: DerivedTypes.h:201
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:899
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
Definition: Operator.h:460
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:933
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
This file implements a class to represent arbitrary precision integral constant values and operations...
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: Function.h:427
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
place backedge safepoints impl
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
IterTy data_operands_end() const
Definition: CallSite.h:249
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1554
void shrink_and_clear()
Definition: DenseMap.h:1012
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
unsigned getNumIndices() const
Definition: Operator.h:472
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:433
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool has(LibFunc F) const
Tests whether a library function is available.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:377
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V has size Size.
Value * getOperand(unsigned i) const
Definition: User.h:154
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition: Function.h:395
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:221
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if &#39;V & Mask&#39; is known to be zero.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
The access may reference and may modify the value stored in memory.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:79
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:216
bool hasOperandBundles() const
Definition: CallSite.h:535
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
Definition: Operator.h:449
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction &#39;To&#39; is reachable from &#39;From&#39;, returning true if uncertain.
Definition: CFG.cpp:186
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
const Value * getCondition() const
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:593
const AMDGPUAS & AS
iterator erase(const_iterator CI)
Definition: SmallVector.h:449
unsigned getNumArgOperands() const
Definition: CallSite.h:293
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2094
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
size_type size() const
Definition: SmallPtrSet.h:93
A function analysis which provides an AssumptionCache.
unsigned getNumOperands() const
Definition: User.h:176
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
Definition: CallSite.h:462
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V is smaller than Size. ...
Provides information about what library functions are available for the current target.
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
A constant pointer value that points to null.
Definition: Constants.h:530
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: CallSite.h:480
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
uint64_t getSizeInBytes() const
Definition: DataLayout.h:499
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
Definition: CallSite.h:245
unsigned getNumIncomingValues() const
Return the number of incoming edges.
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
Class for arbitrary precision integers.
Definition: APInt.h:69
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
void setPreservesAll()
Set by analyses that do not transform their input at all.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
const Value * getFalseValue() const
The access may modify the value stored in memory.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:446
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:403
Basic Alias true
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1300
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:513
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:475
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
This file provides utility analysis objects describing memory locations.
Establish a view to a call site for examination.
Definition: CallSite.h:713
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
#define I(x, y, z)
Definition: MD5.cpp:58
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1213
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: Function.h:420
iterator end()
Definition: DenseMap.h:79
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
Definition: Function.h:411
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:706
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: Function.h:436
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:141
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:598
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:559
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:107
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:556
LLVM Value Representation.
Definition: Value.h:73
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:386
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
The access may reference the value stored in memory.
static const Function * getParent(const Value *V)
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
This is the interface for LLVM&#39;s primary stateless and local alias analysis.
Various options to control the behavior of getObjectSize.
Type * getSourceElementType() const
Definition: Operator.cpp:23
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
op_range incoming_values()
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void initializeBasicAAWrapperPassPass(PassRegistry &)
const BasicBlock * getParent() const
Definition: Instruction.h:67
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: CallSite.h:489
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.