LLVM  6.0.0svn
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Pass.h"
39 #include "llvm/Support/KnownBits.h"
40 #include <algorithm>
41 
42 #define DEBUG_TYPE "basicaa"
43 
44 using namespace llvm;
45 
46 /// Enable analysis of recursive PHI nodes.
47 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
48  cl::init(false));
49 /// SearchLimitReached / SearchTimes shows how often the limit of
50 /// to decompose GEPs is reached. It will affect the precision
51 /// of basic alias analysis.
52 STATISTIC(SearchLimitReached, "Number of times the limit to "
53  "decompose GEPs is reached");
54 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
55 
56 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
57 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
58 /// careful with value equivalence. We use reachability to make sure a value
59 /// cannot be involved in a cycle.
61 
62 // The max limit of the search depth in DecomposeGEPExpression() and
63 // GetUnderlyingObject(), both functions need to use the same search
64 // depth otherwise the algorithm in aliasGEP will assert.
65 static const unsigned MaxLookupSearchDepth = 6;
66 
69  // We don't care if this analysis itself is preserved, it has no state. But
70  // we need to check that the analyses it depends on have been. Note that we
71  // may be created without handles to some analyses and in that case don't
72  // depend on them.
73  if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
74  (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
75  (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
76  return true;
77 
78  // Otherwise this analysis result remains valid.
79  return false;
80 }
81 
82 //===----------------------------------------------------------------------===//
83 // Useful predicates
84 //===----------------------------------------------------------------------===//
85 
86 /// Returns true if the pointer is to a function-local object that never
87 /// escapes from the function.
88 static bool isNonEscapingLocalObject(const Value *V) {
89  // If this is a local allocation, check to see if it escapes.
90  if (isa<AllocaInst>(V) || isNoAliasCall(V))
91  // Set StoreCaptures to True so that we can assume in our callers that the
92  // pointer is not the result of a load instruction. Currently
93  // PointerMayBeCaptured doesn't have any special analysis for the
94  // StoreCaptures=false case; if it did, our callers could be refined to be
95  // more precise.
96  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
97 
98  // If this is an argument that corresponds to a byval or noalias argument,
99  // then it has not escaped before entering the function. Check if it escapes
100  // inside the function.
101  if (const Argument *A = dyn_cast<Argument>(V))
102  if (A->hasByValAttr() || A->hasNoAliasAttr())
103  // Note even if the argument is marked nocapture, we still need to check
104  // for copies made inside the function. The nocapture attribute only
105  // specifies that there are no copies made that outlive the function.
106  return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
107 
108  return false;
109 }
110 
111 /// Returns true if the pointer is one which would have been considered an
112 /// escape by isNonEscapingLocalObject.
113 static bool isEscapeSource(const Value *V) {
114  if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
115  return true;
116 
117  // The load case works because isNonEscapingLocalObject considers all
118  // stores to be escapes (it passes true for the StoreCaptures argument
119  // to PointerMayBeCaptured).
120  if (isa<LoadInst>(V))
121  return true;
122 
123  return false;
124 }
125 
126 /// Returns the size of the object specified by V or UnknownSize if unknown.
127 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
128  const TargetLibraryInfo &TLI,
129  bool RoundToAlign = false) {
130  uint64_t Size;
131  ObjectSizeOpts Opts;
132  Opts.RoundToAlign = RoundToAlign;
133  if (getObjectSize(V, Size, DL, &TLI, Opts))
134  return Size;
136 }
137 
138 /// Returns true if we can prove that the object specified by V is smaller than
139 /// Size.
140 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
141  const DataLayout &DL,
142  const TargetLibraryInfo &TLI) {
143  // Note that the meanings of the "object" are slightly different in the
144  // following contexts:
145  // c1: llvm::getObjectSize()
146  // c2: llvm.objectsize() intrinsic
147  // c3: isObjectSmallerThan()
148  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
149  // refers to the "entire object".
150  //
151  // Consider this example:
152  // char *p = (char*)malloc(100)
153  // char *q = p+80;
154  //
155  // In the context of c1 and c2, the "object" pointed by q refers to the
156  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
157  //
158  // However, in the context of c3, the "object" refers to the chunk of memory
159  // being allocated. So, the "object" has 100 bytes, and q points to the middle
160  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
161  // parameter, before the llvm::getObjectSize() is called to get the size of
162  // entire object, we should:
163  // - either rewind the pointer q to the base-address of the object in
164  // question (in this case rewind to p), or
165  // - just give up. It is up to caller to make sure the pointer is pointing
166  // to the base address the object.
167  //
168  // We go for 2nd option for simplicity.
169  if (!isIdentifiedObject(V))
170  return false;
171 
172  // This function needs to use the aligned object size because we allow
173  // reads a bit past the end given sufficient alignment.
174  uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
175 
176  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
177 }
178 
179 /// Returns true if we can prove that the object specified by V has size Size.
180 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
181  const TargetLibraryInfo &TLI) {
182  uint64_t ObjectSize = getObjectSize(V, DL, TLI);
183  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
184 }
185 
186 //===----------------------------------------------------------------------===//
187 // GetElementPtr Instruction Decomposition and Analysis
188 //===----------------------------------------------------------------------===//
189 
190 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
191 /// B are constant integers.
192 ///
193 /// Returns the scale and offset values as APInts and return V as a Value*, and
194 /// return whether we looked through any sign or zero extends. The incoming
195 /// Value is known to have IntegerType, and it may already be sign or zero
196 /// extended.
197 ///
198 /// Note that this looks through extends, so the high bits may not be
199 /// represented in the result.
200 /*static*/ const Value *BasicAAResult::GetLinearExpression(
201  const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
202  unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
203  AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
204  assert(V->getType()->isIntegerTy() && "Not an integer value");
205 
206  // Limit our recursion depth.
207  if (Depth == 6) {
208  Scale = 1;
209  Offset = 0;
210  return V;
211  }
212 
213  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
214  // If it's a constant, just convert it to an offset and remove the variable.
215  // If we've been called recursively, the Offset bit width will be greater
216  // than the constant's (the Offset's always as wide as the outermost call),
217  // so we'll zext here and process any extension in the isa<SExtInst> &
218  // isa<ZExtInst> cases below.
219  Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
220  assert(Scale == 0 && "Constant values don't have a scale");
221  return V;
222  }
223 
224  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
225  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
226 
227  // If we've been called recursively, then Offset and Scale will be wider
228  // than the BOp operands. We'll always zext it here as we'll process sign
229  // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
230  APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
231 
232  switch (BOp->getOpcode()) {
233  default:
234  // We don't understand this instruction, so we can't decompose it any
235  // further.
236  Scale = 1;
237  Offset = 0;
238  return V;
239  case Instruction::Or:
240  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
241  // analyze it.
242  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
243  BOp, DT)) {
244  Scale = 1;
245  Offset = 0;
246  return V;
247  }
249  case Instruction::Add:
250  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
251  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
252  Offset += RHS;
253  break;
254  case Instruction::Sub:
255  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
256  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
257  Offset -= RHS;
258  break;
259  case Instruction::Mul:
260  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
261  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
262  Offset *= RHS;
263  Scale *= RHS;
264  break;
265  case Instruction::Shl:
266  V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
267  SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
268  Offset <<= RHS.getLimitedValue();
269  Scale <<= RHS.getLimitedValue();
270  // the semantics of nsw and nuw for left shifts don't match those of
271  // multiplications, so we won't propagate them.
272  NSW = NUW = false;
273  return V;
274  }
275 
276  if (isa<OverflowingBinaryOperator>(BOp)) {
277  NUW &= BOp->hasNoUnsignedWrap();
278  NSW &= BOp->hasNoSignedWrap();
279  }
280  return V;
281  }
282  }
283 
284  // Since GEP indices are sign extended anyway, we don't care about the high
285  // bits of a sign or zero extended value - just scales and offsets. The
286  // extensions have to be consistent though.
287  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
288  Value *CastOp = cast<CastInst>(V)->getOperand(0);
289  unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
290  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
291  unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
292  const Value *Result =
293  GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
294  Depth + 1, AC, DT, NSW, NUW);
295 
296  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
297  // by just incrementing the number of bits we've extended by.
298  unsigned ExtendedBy = NewWidth - SmallWidth;
299 
300  if (isa<SExtInst>(V) && ZExtBits == 0) {
301  // sext(sext(%x, a), b) == sext(%x, a + b)
302 
303  if (NSW) {
304  // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
305  // into sext(%x) + sext(c). We'll sext the Offset ourselves:
306  unsigned OldWidth = Offset.getBitWidth();
307  Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
308  } else {
309  // We may have signed-wrapped, so don't decompose sext(%x + c) into
310  // sext(%x) + sext(c)
311  Scale = 1;
312  Offset = 0;
313  Result = CastOp;
314  ZExtBits = OldZExtBits;
315  SExtBits = OldSExtBits;
316  }
317  SExtBits += ExtendedBy;
318  } else {
319  // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
320 
321  if (!NUW) {
322  // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
323  // zext(%x) + zext(c)
324  Scale = 1;
325  Offset = 0;
326  Result = CastOp;
327  ZExtBits = OldZExtBits;
328  SExtBits = OldSExtBits;
329  }
330  ZExtBits += ExtendedBy;
331  }
332 
333  return Result;
334  }
335 
336  Scale = 1;
337  Offset = 0;
338  return V;
339 }
340 
341 /// To ensure a pointer offset fits in an integer of size PointerSize
342 /// (in bits) when that size is smaller than 64. This is an issue in
343 /// particular for 32b programs with negative indices that rely on two's
344 /// complement wrap-arounds for precise alias information.
345 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
346  assert(PointerSize <= 64 && "Invalid PointerSize!");
347  unsigned ShiftBits = 64 - PointerSize;
348  return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
349 }
350 
351 /// If V is a symbolic pointer expression, decompose it into a base pointer
352 /// with a constant offset and a number of scaled symbolic offsets.
353 ///
354 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
355 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
356 /// specified amount, but which may have other unrepresented high bits. As
357 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
358 ///
359 /// When DataLayout is around, this function is capable of analyzing everything
360 /// that GetUnderlyingObject can look through. To be able to do that
361 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
362 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
363 /// through pointer casts.
364 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
365  DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
366  DominatorTree *DT) {
367  // Limit recursion depth to limit compile time in crazy cases.
368  unsigned MaxLookup = MaxLookupSearchDepth;
369  SearchTimes++;
370 
371  Decomposed.StructOffset = 0;
372  Decomposed.OtherOffset = 0;
373  Decomposed.VarIndices.clear();
374  do {
375  // See if this is a bitcast or GEP.
376  const Operator *Op = dyn_cast<Operator>(V);
377  if (!Op) {
378  // The only non-operator case we can handle are GlobalAliases.
379  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
380  if (!GA->isInterposable()) {
381  V = GA->getAliasee();
382  continue;
383  }
384  }
385  Decomposed.Base = V;
386  return false;
387  }
388 
389  if (Op->getOpcode() == Instruction::BitCast ||
390  Op->getOpcode() == Instruction::AddrSpaceCast) {
391  V = Op->getOperand(0);
392  continue;
393  }
394 
395  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
396  if (!GEPOp) {
397  if (auto CS = ImmutableCallSite(V))
398  if (const Value *RV = CS.getReturnedArgOperand()) {
399  V = RV;
400  continue;
401  }
402 
403  // If it's not a GEP, hand it off to SimplifyInstruction to see if it
404  // can come up with something. This matches what GetUnderlyingObject does.
405  if (const Instruction *I = dyn_cast<Instruction>(V))
406  // TODO: Get a DominatorTree and AssumptionCache and use them here
407  // (these are both now available in this function, but this should be
408  // updated when GetUnderlyingObject is updated). TLI should be
409  // provided also.
410  if (const Value *Simplified =
411  SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
412  V = Simplified;
413  continue;
414  }
415 
416  Decomposed.Base = V;
417  return false;
418  }
419 
420  // Don't attempt to analyze GEPs over unsized objects.
421  if (!GEPOp->getSourceElementType()->isSized()) {
422  Decomposed.Base = V;
423  return false;
424  }
425 
426  unsigned AS = GEPOp->getPointerAddressSpace();
427  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
428  gep_type_iterator GTI = gep_type_begin(GEPOp);
429  unsigned PointerSize = DL.getPointerSizeInBits(AS);
430  // Assume all GEP operands are constants until proven otherwise.
431  bool GepHasConstantOffset = true;
432  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
433  I != E; ++I, ++GTI) {
434  const Value *Index = *I;
435  // Compute the (potentially symbolic) offset in bytes for this index.
436  if (StructType *STy = GTI.getStructTypeOrNull()) {
437  // For a struct, add the member offset.
438  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
439  if (FieldNo == 0)
440  continue;
441 
442  Decomposed.StructOffset +=
443  DL.getStructLayout(STy)->getElementOffset(FieldNo);
444  continue;
445  }
446 
447  // For an array/pointer, add the element offset, explicitly scaled.
448  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
449  if (CIdx->isZero())
450  continue;
451  Decomposed.OtherOffset +=
452  DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
453  continue;
454  }
455 
456  GepHasConstantOffset = false;
457 
458  uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
459  unsigned ZExtBits = 0, SExtBits = 0;
460 
461  // If the integer type is smaller than the pointer size, it is implicitly
462  // sign extended to pointer size.
463  unsigned Width = Index->getType()->getIntegerBitWidth();
464  if (PointerSize > Width)
465  SExtBits += PointerSize - Width;
466 
467  // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
468  APInt IndexScale(Width, 0), IndexOffset(Width, 0);
469  bool NSW = true, NUW = true;
470  Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
471  SExtBits, DL, 0, AC, DT, NSW, NUW);
472 
473  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
474  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
475  Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
476  Scale *= IndexScale.getSExtValue();
477 
478  // If we already had an occurrence of this index variable, merge this
479  // scale into it. For example, we want to handle:
480  // A[x][x] -> x*16 + x*4 -> x*20
481  // This also ensures that 'x' only appears in the index list once.
482  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
483  if (Decomposed.VarIndices[i].V == Index &&
484  Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
485  Decomposed.VarIndices[i].SExtBits == SExtBits) {
486  Scale += Decomposed.VarIndices[i].Scale;
487  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
488  break;
489  }
490  }
491 
492  // Make sure that we have a scale that makes sense for this target's
493  // pointer size.
494  Scale = adjustToPointerSize(Scale, PointerSize);
495 
496  if (Scale) {
497  VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
498  static_cast<int64_t>(Scale)};
499  Decomposed.VarIndices.push_back(Entry);
500  }
501  }
502 
503  // Take care of wrap-arounds
504  if (GepHasConstantOffset) {
505  Decomposed.StructOffset =
506  adjustToPointerSize(Decomposed.StructOffset, PointerSize);
507  Decomposed.OtherOffset =
508  adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
509  }
510 
511  // Analyze the base pointer next.
512  V = GEPOp->getOperand(0);
513  } while (--MaxLookup);
514 
515  // If the chain of expressions is too deep, just return early.
516  Decomposed.Base = V;
517  SearchLimitReached++;
518  return true;
519 }
520 
521 /// Returns whether the given pointer value points to memory that is local to
522 /// the function, with global constants being considered local to all
523 /// functions.
525  bool OrLocal) {
526  assert(Visited.empty() && "Visited must be cleared after use!");
527 
528  unsigned MaxLookup = 8;
530  Worklist.push_back(Loc.Ptr);
531  do {
532  const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
533  if (!Visited.insert(V).second) {
534  Visited.clear();
535  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
536  }
537 
538  // An alloca instruction defines local memory.
539  if (OrLocal && isa<AllocaInst>(V))
540  continue;
541 
542  // A global constant counts as local memory for our purposes.
543  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
544  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
545  // global to be marked constant in some modules and non-constant in
546  // others. GV may even be a declaration, not a definition.
547  if (!GV->isConstant()) {
548  Visited.clear();
549  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
550  }
551  continue;
552  }
553 
554  // If both select values point to local memory, then so does the select.
555  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
556  Worklist.push_back(SI->getTrueValue());
557  Worklist.push_back(SI->getFalseValue());
558  continue;
559  }
560 
561  // If all values incoming to a phi node point to local memory, then so does
562  // the phi.
563  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
564  // Don't bother inspecting phi nodes with many operands.
565  if (PN->getNumIncomingValues() > MaxLookup) {
566  Visited.clear();
567  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
568  }
569  for (Value *IncValue : PN->incoming_values())
570  Worklist.push_back(IncValue);
571  continue;
572  }
573 
574  // Otherwise be conservative.
575  Visited.clear();
576  return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
577 
578  } while (!Worklist.empty() && --MaxLookup);
579 
580  Visited.clear();
581  return Worklist.empty();
582 }
583 
584 /// Returns the behavior when calling the given call site.
586  if (CS.doesNotAccessMemory())
587  // Can't do better than this.
589 
591 
592  // If the callsite knows it only reads memory, don't return worse
593  // than that.
594  if (CS.onlyReadsMemory())
595  Min = FMRB_OnlyReadsMemory;
596  else if (CS.doesNotReadMemory())
598 
599  if (CS.onlyAccessesArgMemory())
601 
602  // If CS has operand bundles then aliasing attributes from the function it
603  // calls do not directly apply to the CallSite. This can be made more
604  // precise in the future.
605  if (!CS.hasOperandBundles())
606  if (const Function *F = CS.getCalledFunction())
607  Min =
609 
610  return Min;
611 }
612 
613 /// Returns the behavior when calling the given function. For use when the call
614 /// site is not known.
616  // If the function declares it doesn't access memory, we can't do better.
617  if (F->doesNotAccessMemory())
619 
621 
622  // If the function declares it only reads memory, go with that.
623  if (F->onlyReadsMemory())
624  Min = FMRB_OnlyReadsMemory;
625  else if (F->doesNotReadMemory())
627 
628  if (F->onlyAccessesArgMemory())
630  else if (F->onlyAccessesInaccessibleMemory())
634 
635  return Min;
636 }
637 
638 /// Returns true if this is a writeonly (i.e Mod only) parameter.
639 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
640  const TargetLibraryInfo &TLI) {
641  if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
642  return true;
643 
644  // We can bound the aliasing properties of memset_pattern16 just as we can
645  // for memcpy/memset. This is particularly important because the
646  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
647  // whenever possible.
648  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
649  // attributes.
650  LibFunc F;
651  if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
652  F == LibFunc_memset_pattern16 && TLI.has(F))
653  if (ArgIdx == 0)
654  return true;
655 
656  // TODO: memset_pattern4, memset_pattern8
657  // TODO: _chk variants
658  // TODO: strcmp, strcpy
659 
660  return false;
661 }
662 
664  unsigned ArgIdx) {
665 
666  // Checking for known builtin intrinsics and target library functions.
667  if (isWriteOnlyParam(CS, ArgIdx, TLI))
668  return MRI_Mod;
669 
670  if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
671  return MRI_Ref;
672 
673  if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
674  return MRI_NoModRef;
675 
676  return AAResultBase::getArgModRefInfo(CS, ArgIdx);
677 }
678 
681  return II && II->getIntrinsicID() == IID;
682 }
683 
684 #ifndef NDEBUG
685 static const Function *getParent(const Value *V) {
686  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
687  if (!inst->getParent())
688  return nullptr;
689  return inst->getParent()->getParent();
690  }
691 
692  if (const Argument *arg = dyn_cast<Argument>(V))
693  return arg->getParent();
694 
695  return nullptr;
696 }
697 
698 static bool notDifferentParent(const Value *O1, const Value *O2) {
699 
700  const Function *F1 = getParent(O1);
701  const Function *F2 = getParent(O2);
702 
703  return !F1 || !F2 || F1 == F2;
704 }
705 #endif
706 
708  const MemoryLocation &LocB) {
709  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
710  "BasicAliasAnalysis doesn't support interprocedural queries.");
711 
712  // If we have a directly cached entry for these locations, we have recursed
713  // through this once, so just return the cached results. Notably, when this
714  // happens, we don't clear the cache.
715  auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
716  if (CacheIt != AliasCache.end())
717  return CacheIt->second;
718 
719  AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
720  LocB.Size, LocB.AATags);
721  // AliasCache rarely has more than 1 or 2 elements, always use
722  // shrink_and_clear so it quickly returns to the inline capacity of the
723  // SmallDenseMap if it ever grows larger.
724  // FIXME: This should really be shrink_to_inline_capacity_and_clear().
725  AliasCache.shrink_and_clear();
726  VisitedPhiBBs.clear();
727  return Alias;
728 }
729 
730 /// Checks to see if the specified callsite can clobber the specified memory
731 /// object.
732 ///
733 /// Since we only look at local properties of this function, we really can't
734 /// say much about this query. We do, however, use simple "address taken"
735 /// analysis on local objects.
737  const MemoryLocation &Loc) {
739  "AliasAnalysis query involving multiple functions!");
740 
741  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
742 
743  // If this is a tail call and Loc.Ptr points to a stack location, we know that
744  // the tail call cannot access or modify the local stack.
745  // We cannot exclude byval arguments here; these belong to the caller of
746  // the current function not to the current function, and a tail callee
747  // may reference them.
748  if (isa<AllocaInst>(Object))
749  if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
750  if (CI->isTailCall())
751  return MRI_NoModRef;
752 
753  // If the pointer is to a locally allocated object that does not escape,
754  // then the call can not mod/ref the pointer unless the call takes the pointer
755  // as an argument, and itself doesn't capture it.
756  if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
757  isNonEscapingLocalObject(Object)) {
758 
759  // Optimistically assume that call doesn't touch Object and check this
760  // assumption in the following loop.
761  ModRefInfo Result = MRI_NoModRef;
762 
763  unsigned OperandNo = 0;
764  for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
765  CI != CE; ++CI, ++OperandNo) {
766  // Only look at the no-capture or byval pointer arguments. If this
767  // pointer were passed to arguments that were neither of these, then it
768  // couldn't be no-capture.
769  if (!(*CI)->getType()->isPointerTy() ||
770  (!CS.doesNotCapture(OperandNo) &&
771  OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
772  continue;
773 
774  // Call doesn't access memory through this operand, so we don't care
775  // if it aliases with Object.
776  if (CS.doesNotAccessMemory(OperandNo))
777  continue;
778 
779  // If this is a no-capture pointer argument, see if we can tell that it
780  // is impossible to alias the pointer we're checking.
781  AliasResult AR =
783 
784  // Operand doesnt alias 'Object', continue looking for other aliases
785  if (AR == NoAlias)
786  continue;
787  // Operand aliases 'Object', but call doesn't modify it. Strengthen
788  // initial assumption and keep looking in case if there are more aliases.
789  if (CS.onlyReadsMemory(OperandNo)) {
790  Result = static_cast<ModRefInfo>(Result | MRI_Ref);
791  continue;
792  }
793  // Operand aliases 'Object' but call only writes into it.
794  if (CS.doesNotReadMemory(OperandNo)) {
795  Result = static_cast<ModRefInfo>(Result | MRI_Mod);
796  continue;
797  }
798  // This operand aliases 'Object' and call reads and writes into it.
799  Result = MRI_ModRef;
800  break;
801  }
802 
803  // Early return if we improved mod ref information
804  if (Result != MRI_ModRef)
805  return Result;
806  }
807 
808  // If the CallSite is to malloc or calloc, we can assume that it doesn't
809  // modify any IR visible value. This is only valid because we assume these
810  // routines do not read values visible in the IR. TODO: Consider special
811  // casing realloc and strdup routines which access only their arguments as
812  // well. Or alternatively, replace all of this with inaccessiblememonly once
813  // that's implemented fully.
814  auto *Inst = CS.getInstruction();
815  if (isMallocOrCallocLikeFn(Inst, &TLI)) {
816  // Be conservative if the accessed pointer may alias the allocation -
817  // fallback to the generic handling below.
818  if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
819  return MRI_NoModRef;
820  }
821 
822  // The semantics of memcpy intrinsics forbid overlap between their respective
823  // operands, i.e., source and destination of any given memcpy must no-alias.
824  // If Loc must-aliases either one of these two locations, then it necessarily
825  // no-aliases the other.
826  if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
827  AliasResult SrcAA, DestAA;
828 
830  Loc)) == MustAlias)
831  // Loc is exactly the memcpy source thus disjoint from memcpy dest.
832  return MRI_Ref;
833  if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
834  Loc)) == MustAlias)
835  // The converse case.
836  return MRI_Mod;
837 
838  // It's also possible for Loc to alias both src and dest, or neither.
840  if (SrcAA != NoAlias)
841  rv = static_cast<ModRefInfo>(rv | MRI_Ref);
842  if (DestAA != NoAlias)
843  rv = static_cast<ModRefInfo>(rv | MRI_Mod);
844  return rv;
845  }
846 
847  // While the assume intrinsic is marked as arbitrarily writing so that
848  // proper control dependencies will be maintained, it never aliases any
849  // particular memory location.
850  if (isIntrinsicCall(CS, Intrinsic::assume))
851  return MRI_NoModRef;
852 
853  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
854  // that proper control dependencies are maintained but they never mods any
855  // particular memory location.
856  //
857  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
858  // heap state at the point the guard is issued needs to be consistent in case
859  // the guard invokes the "deopt" continuation.
860  if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
861  return MRI_Ref;
862 
863  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
864  // writing so that proper control dependencies are maintained but they never
865  // mod any particular memory location visible to the IR.
866  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
867  // intrinsic is now modeled as reading memory. This prevents hoisting the
868  // invariant.start intrinsic over stores. Consider:
869  // *ptr = 40;
870  // *ptr = 50;
871  // invariant_start(ptr)
872  // int val = *ptr;
873  // print(val);
874  //
875  // This cannot be transformed to:
876  //
877  // *ptr = 40;
878  // invariant_start(ptr)
879  // *ptr = 50;
880  // int val = *ptr;
881  // print(val);
882  //
883  // The transformation will cause the second store to be ignored (based on
884  // rules of invariant.start) and print 40, while the first program always
885  // prints 50.
886  if (isIntrinsicCall(CS, Intrinsic::invariant_start))
887  return MRI_Ref;
888 
889  // The AAResultBase base class has some smarts, lets use them.
890  return AAResultBase::getModRefInfo(CS, Loc);
891 }
892 
894  ImmutableCallSite CS2) {
895  // While the assume intrinsic is marked as arbitrarily writing so that
896  // proper control dependencies will be maintained, it never aliases any
897  // particular memory location.
898  if (isIntrinsicCall(CS1, Intrinsic::assume) ||
899  isIntrinsicCall(CS2, Intrinsic::assume))
900  return MRI_NoModRef;
901 
902  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
903  // that proper control dependencies are maintained but they never mod any
904  // particular memory location.
905  //
906  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
907  // heap state at the point the guard is issued needs to be consistent in case
908  // the guard invokes the "deopt" continuation.
909 
910  // NB! This function is *not* commutative, so we specical case two
911  // possibilities for guard intrinsics.
912 
913  if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
914  return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
915 
916  if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
917  return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
918 
919  // The AAResultBase base class has some smarts, lets use them.
920  return AAResultBase::getModRefInfo(CS1, CS2);
921 }
922 
923 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
924 /// both having the exact same pointer operand.
926  uint64_t V1Size,
927  const GEPOperator *GEP2,
928  uint64_t V2Size,
929  const DataLayout &DL) {
930 
933  GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
934  "Expected GEPs with the same pointer operand");
935 
936  // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
937  // such that the struct field accesses provably cannot alias.
938  // We also need at least two indices (the pointer, and the struct field).
939  if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
940  GEP1->getNumIndices() < 2)
941  return MayAlias;
942 
943  // If we don't know the size of the accesses through both GEPs, we can't
944  // determine whether the struct fields accessed can't alias.
945  if (V1Size == MemoryLocation::UnknownSize ||
946  V2Size == MemoryLocation::UnknownSize)
947  return MayAlias;
948 
949  ConstantInt *C1 =
950  dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
951  ConstantInt *C2 =
952  dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
953 
954  // If the last (struct) indices are constants and are equal, the other indices
955  // might be also be dynamically equal, so the GEPs can alias.
956  if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
957  return MayAlias;
958 
959  // Find the last-indexed type of the GEP, i.e., the type you'd get if
960  // you stripped the last index.
961  // On the way, look at each indexed type. If there's something other
962  // than an array, different indices can lead to different final types.
963  SmallVector<Value *, 8> IntermediateIndices;
964 
965  // Insert the first index; we don't need to check the type indexed
966  // through it as it only drops the pointer indirection.
967  assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
968  IntermediateIndices.push_back(GEP1->getOperand(1));
969 
970  // Insert all the remaining indices but the last one.
971  // Also, check that they all index through arrays.
972  for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
973  if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
974  GEP1->getSourceElementType(), IntermediateIndices)))
975  return MayAlias;
976  IntermediateIndices.push_back(GEP1->getOperand(i + 1));
977  }
978 
980  GEP1->getSourceElementType(), IntermediateIndices);
981  StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
982 
983  if (isa<SequentialType>(Ty)) {
984  // We know that:
985  // - both GEPs begin indexing from the exact same pointer;
986  // - the last indices in both GEPs are constants, indexing into a sequential
987  // type (array or pointer);
988  // - both GEPs only index through arrays prior to that.
989  //
990  // Because array indices greater than the number of elements are valid in
991  // GEPs, unless we know the intermediate indices are identical between
992  // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
993  // partially overlap. We also need to check that the loaded size matches
994  // the element size, otherwise we could still have overlap.
995  const uint64_t ElementSize =
996  DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
997  if (V1Size != ElementSize || V2Size != ElementSize)
998  return MayAlias;
999 
1000  for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1001  if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1002  return MayAlias;
1003 
1004  // Now we know that the array/pointer that GEP1 indexes into and that
1005  // that GEP2 indexes into must either precisely overlap or be disjoint.
1006  // Because they cannot partially overlap and because fields in an array
1007  // cannot overlap, if we can prove the final indices are different between
1008  // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1009 
1010  // If the last indices are constants, we've already checked they don't
1011  // equal each other so we can exit early.
1012  if (C1 && C2)
1013  return NoAlias;
1014  {
1015  Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1016  Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1017  if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1018  // If one of the indices is a PHI node, be safe and only use
1019  // computeKnownBits so we don't make any assumptions about the
1020  // relationships between the two indices. This is important if we're
1021  // asking about values from different loop iterations. See PR32314.
1022  // TODO: We may be able to change the check so we only do this when
1023  // we definitely looked through a PHINode.
1024  if (GEP1LastIdx != GEP2LastIdx &&
1025  GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1026  KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1027  KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1028  if (Known1.Zero.intersects(Known2.One) ||
1029  Known1.One.intersects(Known2.Zero))
1030  return NoAlias;
1031  }
1032  } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1033  return NoAlias;
1034  }
1035  return MayAlias;
1036  } else if (!LastIndexedStruct || !C1 || !C2) {
1037  return MayAlias;
1038  }
1039 
1040  // We know that:
1041  // - both GEPs begin indexing from the exact same pointer;
1042  // - the last indices in both GEPs are constants, indexing into a struct;
1043  // - said indices are different, hence, the pointed-to fields are different;
1044  // - both GEPs only index through arrays prior to that.
1045  //
1046  // This lets us determine that the struct that GEP1 indexes into and the
1047  // struct that GEP2 indexes into must either precisely overlap or be
1048  // completely disjoint. Because they cannot partially overlap, indexing into
1049  // different non-overlapping fields of the struct will never alias.
1050 
1051  // Therefore, the only remaining thing needed to show that both GEPs can't
1052  // alias is that the fields are not overlapping.
1053  const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1054  const uint64_t StructSize = SL->getSizeInBytes();
1055  const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1056  const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1057 
1058  auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1059  uint64_t V2Off, uint64_t V2Size) {
1060  return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1061  ((V2Off + V2Size <= StructSize) ||
1062  (V2Off + V2Size - StructSize <= V1Off));
1063  };
1064 
1065  if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1066  EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1067  return NoAlias;
1068 
1069  return MayAlias;
1070 }
1071 
1072 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1073 // beginning of the object the GEP points would have a negative offset with
1074 // repsect to the alloca, that means the GEP can not alias pointer (b).
1075 // Note that the pointer based on the alloca may not be a GEP. For
1076 // example, it may be the alloca itself.
1077 // The same applies if (b) is based on a GlobalVariable. Note that just being
1078 // based on isIdentifiedObject() is not enough - we need an identified object
1079 // that does not permit access to negative offsets. For example, a negative
1080 // offset from a noalias argument or call can be inbounds w.r.t the actual
1081 // underlying object.
1082 //
1083 // For example, consider:
1084 //
1085 // struct { int f0, int f1, ...} foo;
1086 // foo alloca;
1087 // foo* random = bar(alloca);
1088 // int *f0 = &alloca.f0
1089 // int *f1 = &random->f1;
1090 //
1091 // Which is lowered, approximately, to:
1092 //
1093 // %alloca = alloca %struct.foo
1094 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1095 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1096 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1097 //
1098 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1099 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1100 // point into the same object. But since %f0 points to the beginning of %alloca,
1101 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1102 // than (%alloca - 1), and so is not inbounds, a contradiction.
1103 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1104  const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1105  uint64_t ObjectAccessSize) {
1106  // If the object access size is unknown, or the GEP isn't inbounds, bail.
1107  if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1108  return false;
1109 
1110  // We need the object to be an alloca or a globalvariable, and want to know
1111  // the offset of the pointer from the object precisely, so no variable
1112  // indices are allowed.
1113  if (!(isa<AllocaInst>(DecompObject.Base) ||
1114  isa<GlobalVariable>(DecompObject.Base)) ||
1115  !DecompObject.VarIndices.empty())
1116  return false;
1117 
1118  int64_t ObjectBaseOffset = DecompObject.StructOffset +
1119  DecompObject.OtherOffset;
1120 
1121  // If the GEP has no variable indices, we know the precise offset
1122  // from the base, then use it. If the GEP has variable indices, we're in
1123  // a bit more trouble: we can't count on the constant offsets that come
1124  // from non-struct sources, since these can be "rewound" by a negative
1125  // variable offset. So use only offsets that came from structs.
1126  int64_t GEPBaseOffset = DecompGEP.StructOffset;
1127  if (DecompGEP.VarIndices.empty())
1128  GEPBaseOffset += DecompGEP.OtherOffset;
1129 
1130  return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1131 }
1132 
1133 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1134 /// another pointer.
1135 ///
1136 /// We know that V1 is a GEP, but we don't know anything about V2.
1137 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1138 /// V2.
1139 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1140  const AAMDNodes &V1AAInfo, const Value *V2,
1141  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1142  const Value *UnderlyingV1,
1143  const Value *UnderlyingV2) {
1144  DecomposedGEP DecompGEP1, DecompGEP2;
1145  bool GEP1MaxLookupReached =
1146  DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1147  bool GEP2MaxLookupReached =
1148  DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1149 
1150  int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1151  int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1152 
1153  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1154  "DecomposeGEPExpression returned a result different from "
1155  "GetUnderlyingObject");
1156 
1157  // If the GEP's offset relative to its base is such that the base would
1158  // fall below the start of the object underlying V2, then the GEP and V2
1159  // cannot alias.
1160  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1161  isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1162  return NoAlias;
1163  // If we have two gep instructions with must-alias or not-alias'ing base
1164  // pointers, figure out if the indexes to the GEP tell us anything about the
1165  // derived pointer.
1166  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1167  // Check for the GEP base being at a negative offset, this time in the other
1168  // direction.
1169  if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1170  isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1171  return NoAlias;
1172  // Do the base pointers alias?
1173  AliasResult BaseAlias =
1174  aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1175  UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1176 
1177  // Check for geps of non-aliasing underlying pointers where the offsets are
1178  // identical.
1179  if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1180  // Do the base pointers alias assuming type and size.
1181  AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1182  UnderlyingV2, V2Size, V2AAInfo);
1183  if (PreciseBaseAlias == NoAlias) {
1184  // See if the computed offset from the common pointer tells us about the
1185  // relation of the resulting pointer.
1186  // If the max search depth is reached the result is undefined
1187  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1188  return MayAlias;
1189 
1190  // Same offsets.
1191  if (GEP1BaseOffset == GEP2BaseOffset &&
1192  DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1193  return NoAlias;
1194  }
1195  }
1196 
1197  // If we get a No or May, then return it immediately, no amount of analysis
1198  // will improve this situation.
1199  if (BaseAlias != MustAlias)
1200  return BaseAlias;
1201 
1202  // Otherwise, we have a MustAlias. Since the base pointers alias each other
1203  // exactly, see if the computed offset from the common pointer tells us
1204  // about the relation of the resulting pointer.
1205  // If we know the two GEPs are based off of the exact same pointer (and not
1206  // just the same underlying object), see if that tells us anything about
1207  // the resulting pointers.
1209  GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
1210  GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1211  AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1212  // If we couldn't find anything interesting, don't abandon just yet.
1213  if (R != MayAlias)
1214  return R;
1215  }
1216 
1217  // If the max search depth is reached, the result is undefined
1218  if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1219  return MayAlias;
1220 
1221  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1222  // symbolic difference.
1223  GEP1BaseOffset -= GEP2BaseOffset;
1224  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1225 
1226  } else {
1227  // Check to see if these two pointers are related by the getelementptr
1228  // instruction. If one pointer is a GEP with a non-zero index of the other
1229  // pointer, we know they cannot alias.
1230 
1231  // If both accesses are unknown size, we can't do anything useful here.
1232  if (V1Size == MemoryLocation::UnknownSize &&
1233  V2Size == MemoryLocation::UnknownSize)
1234  return MayAlias;
1235 
1236  AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1238  V2AAInfo, nullptr, UnderlyingV2);
1239  if (R != MustAlias)
1240  // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1241  // If V2 is known not to alias GEP base pointer, then the two values
1242  // cannot alias per GEP semantics: "Any memory access must be done through
1243  // a pointer value associated with an address range of the memory access,
1244  // otherwise the behavior is undefined.".
1245  return R;
1246 
1247  // If the max search depth is reached the result is undefined
1248  if (GEP1MaxLookupReached)
1249  return MayAlias;
1250  }
1251 
1252  // In the two GEP Case, if there is no difference in the offsets of the
1253  // computed pointers, the resultant pointers are a must alias. This
1254  // happens when we have two lexically identical GEP's (for example).
1255  //
1256  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1257  // must aliases the GEP, the end result is a must alias also.
1258  if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1259  return MustAlias;
1260 
1261  // If there is a constant difference between the pointers, but the difference
1262  // is less than the size of the associated memory object, then we know
1263  // that the objects are partially overlapping. If the difference is
1264  // greater, we know they do not overlap.
1265  if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1266  if (GEP1BaseOffset >= 0) {
1267  if (V2Size != MemoryLocation::UnknownSize) {
1268  if ((uint64_t)GEP1BaseOffset < V2Size)
1269  return PartialAlias;
1270  return NoAlias;
1271  }
1272  } else {
1273  // We have the situation where:
1274  // + +
1275  // | BaseOffset |
1276  // ---------------->|
1277  // |-->V1Size |-------> V2Size
1278  // GEP1 V2
1279  // We need to know that V2Size is not unknown, otherwise we might have
1280  // stripped a gep with negative index ('gep <ptr>, -1, ...).
1281  if (V1Size != MemoryLocation::UnknownSize &&
1282  V2Size != MemoryLocation::UnknownSize) {
1283  if (-(uint64_t)GEP1BaseOffset < V1Size)
1284  return PartialAlias;
1285  return NoAlias;
1286  }
1287  }
1288  }
1289 
1290  if (!DecompGEP1.VarIndices.empty()) {
1291  uint64_t Modulo = 0;
1292  bool AllPositive = true;
1293  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1294 
1295  // Try to distinguish something like &A[i][1] against &A[42][0].
1296  // Grab the least significant bit set in any of the scales. We
1297  // don't need std::abs here (even if the scale's negative) as we'll
1298  // be ^'ing Modulo with itself later.
1299  Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1300 
1301  if (AllPositive) {
1302  // If the Value could change between cycles, then any reasoning about
1303  // the Value this cycle may not hold in the next cycle. We'll just
1304  // give up if we can't determine conditions that hold for every cycle:
1305  const Value *V = DecompGEP1.VarIndices[i].V;
1306 
1307  KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1308  bool SignKnownZero = Known.isNonNegative();
1309  bool SignKnownOne = Known.isNegative();
1310 
1311  // Zero-extension widens the variable, and so forces the sign
1312  // bit to zero.
1313  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1314  SignKnownZero |= IsZExt;
1315  SignKnownOne &= !IsZExt;
1316 
1317  // If the variable begins with a zero then we know it's
1318  // positive, regardless of whether the value is signed or
1319  // unsigned.
1320  int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1321  AllPositive =
1322  (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1323  }
1324  }
1325 
1326  Modulo = Modulo ^ (Modulo & (Modulo - 1));
1327 
1328  // We can compute the difference between the two addresses
1329  // mod Modulo. Check whether that difference guarantees that the
1330  // two locations do not alias.
1331  uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1332  if (V1Size != MemoryLocation::UnknownSize &&
1333  V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1334  V1Size <= Modulo - ModOffset)
1335  return NoAlias;
1336 
1337  // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1338  // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1339  // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1340  if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1341  return NoAlias;
1342 
1343  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1344  GEP1BaseOffset, &AC, DT))
1345  return NoAlias;
1346  }
1347 
1348  // Statically, we can see that the base objects are the same, but the
1349  // pointers have dynamic offsets which we can't resolve. And none of our
1350  // little tricks above worked.
1351  return MayAlias;
1352 }
1353 
1355  // If the results agree, take it.
1356  if (A == B)
1357  return A;
1358  // A mix of PartialAlias and MustAlias is PartialAlias.
1359  if ((A == PartialAlias && B == MustAlias) ||
1360  (B == PartialAlias && A == MustAlias))
1361  return PartialAlias;
1362  // Otherwise, we don't know anything.
1363  return MayAlias;
1364 }
1365 
1366 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1367 /// against another.
1368 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1369  const AAMDNodes &SIAAInfo,
1370  const Value *V2, uint64_t V2Size,
1371  const AAMDNodes &V2AAInfo,
1372  const Value *UnderV2) {
1373  // If the values are Selects with the same condition, we can do a more precise
1374  // check: just check for aliases between the values on corresponding arms.
1375  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1376  if (SI->getCondition() == SI2->getCondition()) {
1377  AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1378  SI2->getTrueValue(), V2Size, V2AAInfo);
1379  if (Alias == MayAlias)
1380  return MayAlias;
1381  AliasResult ThisAlias =
1382  aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1383  SI2->getFalseValue(), V2Size, V2AAInfo);
1384  return MergeAliasResults(ThisAlias, Alias);
1385  }
1386 
1387  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1388  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1389  AliasResult Alias =
1390  aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1391  SISize, SIAAInfo, UnderV2);
1392  if (Alias == MayAlias)
1393  return MayAlias;
1394 
1395  AliasResult ThisAlias =
1396  aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1397  UnderV2);
1398  return MergeAliasResults(ThisAlias, Alias);
1399 }
1400 
1401 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1402 /// another.
1403 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1404  const AAMDNodes &PNAAInfo, const Value *V2,
1405  uint64_t V2Size, const AAMDNodes &V2AAInfo,
1406  const Value *UnderV2) {
1407  // Track phi nodes we have visited. We use this information when we determine
1408  // value equivalence.
1409  VisitedPhiBBs.insert(PN->getParent());
1410 
1411  // If the values are PHIs in the same block, we can do a more precise
1412  // as well as efficient check: just check for aliases between the values
1413  // on corresponding edges.
1414  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1415  if (PN2->getParent() == PN->getParent()) {
1416  LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1417  MemoryLocation(V2, V2Size, V2AAInfo));
1418  if (PN > V2)
1419  std::swap(Locs.first, Locs.second);
1420  // Analyse the PHIs' inputs under the assumption that the PHIs are
1421  // NoAlias.
1422  // If the PHIs are May/MustAlias there must be (recursively) an input
1423  // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1424  // there must be an operation on the PHIs within the PHIs' value cycle
1425  // that causes a MayAlias.
1426  // Pretend the phis do not alias.
1427  AliasResult Alias = NoAlias;
1428  assert(AliasCache.count(Locs) &&
1429  "There must exist an entry for the phi node");
1430  AliasResult OrigAliasResult = AliasCache[Locs];
1431  AliasCache[Locs] = NoAlias;
1432 
1433  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1434  AliasResult ThisAlias =
1435  aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1436  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1437  V2Size, V2AAInfo);
1438  Alias = MergeAliasResults(ThisAlias, Alias);
1439  if (Alias == MayAlias)
1440  break;
1441  }
1442 
1443  // Reset if speculation failed.
1444  if (Alias != NoAlias)
1445  AliasCache[Locs] = OrigAliasResult;
1446 
1447  return Alias;
1448  }
1449 
1450  SmallPtrSet<Value *, 4> UniqueSrc;
1451  SmallVector<Value *, 4> V1Srcs;
1452  bool isRecursive = false;
1453  for (Value *PV1 : PN->incoming_values()) {
1454  if (isa<PHINode>(PV1))
1455  // If any of the source itself is a PHI, return MayAlias conservatively
1456  // to avoid compile time explosion. The worst possible case is if both
1457  // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1458  // and 'n' are the number of PHI sources.
1459  return MayAlias;
1460 
1462  if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1463  // Check whether the incoming value is a GEP that advances the pointer
1464  // result of this PHI node (e.g. in a loop). If this is the case, we
1465  // would recurse and always get a MayAlias. Handle this case specially
1466  // below.
1467  if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1468  isa<ConstantInt>(PV1GEP->idx_begin())) {
1469  isRecursive = true;
1470  continue;
1471  }
1472  }
1473 
1474  if (UniqueSrc.insert(PV1).second)
1475  V1Srcs.push_back(PV1);
1476  }
1477 
1478  // If this PHI node is recursive, set the size of the accessed memory to
1479  // unknown to represent all the possible values the GEP could advance the
1480  // pointer to.
1481  if (isRecursive)
1482  PNSize = MemoryLocation::UnknownSize;
1483 
1484  AliasResult Alias =
1485  aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1486  PNSize, PNAAInfo, UnderV2);
1487 
1488  // Early exit if the check of the first PHI source against V2 is MayAlias.
1489  // Other results are not possible.
1490  if (Alias == MayAlias)
1491  return MayAlias;
1492 
1493  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1494  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1495  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1496  Value *V = V1Srcs[i];
1497 
1498  AliasResult ThisAlias =
1499  aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1500  Alias = MergeAliasResults(ThisAlias, Alias);
1501  if (Alias == MayAlias)
1502  break;
1503  }
1504 
1505  return Alias;
1506 }
1507 
1508 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1509 /// array references.
1510 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1511  AAMDNodes V1AAInfo, const Value *V2,
1512  uint64_t V2Size, AAMDNodes V2AAInfo,
1513  const Value *O1, const Value *O2) {
1514  // If either of the memory references is empty, it doesn't matter what the
1515  // pointer values are.
1516  if (V1Size == 0 || V2Size == 0)
1517  return NoAlias;
1518 
1519  // Strip off any casts if they exist.
1520  V1 = V1->stripPointerCastsAndBarriers();
1521  V2 = V2->stripPointerCastsAndBarriers();
1522 
1523  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1524  // value for undef that aliases nothing in the program.
1525  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1526  return NoAlias;
1527 
1528  // Are we checking for alias of the same value?
1529  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1530  // different iterations. We must therefore make sure that this is not the
1531  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1532  // happen by looking at the visited phi nodes and making sure they cannot
1533  // reach the value.
1534  if (isValueEqualInPotentialCycles(V1, V2))
1535  return MustAlias;
1536 
1537  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1538  return NoAlias; // Scalars cannot alias each other
1539 
1540  // Figure out what objects these things are pointing to if we can.
1541  if (O1 == nullptr)
1542  O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1543 
1544  if (O2 == nullptr)
1545  O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1546 
1547  // Null values in the default address space don't point to any object, so they
1548  // don't alias any other pointer.
1549  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1550  if (CPN->getType()->getAddressSpace() == 0)
1551  return NoAlias;
1552  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1553  if (CPN->getType()->getAddressSpace() == 0)
1554  return NoAlias;
1555 
1556  if (O1 != O2) {
1557  // If V1/V2 point to two different objects, we know that we have no alias.
1558  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1559  return NoAlias;
1560 
1561  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1562  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1563  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1564  return NoAlias;
1565 
1566  // Function arguments can't alias with things that are known to be
1567  // unambigously identified at the function level.
1568  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1569  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1570  return NoAlias;
1571 
1572  // Most objects can't alias null.
1573  if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1574  (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1575  return NoAlias;
1576 
1577  // If one pointer is the result of a call/invoke or load and the other is a
1578  // non-escaping local object within the same function, then we know the
1579  // object couldn't escape to a point where the call could return it.
1580  //
1581  // Note that if the pointers are in different functions, there are a
1582  // variety of complications. A call with a nocapture argument may still
1583  // temporary store the nocapture argument's value in a temporary memory
1584  // location if that memory location doesn't escape. Or it may pass a
1585  // nocapture value to other functions as long as they don't capture it.
1587  return NoAlias;
1589  return NoAlias;
1590  }
1591 
1592  // If the size of one access is larger than the entire object on the other
1593  // side, then we know such behavior is undefined and can assume no alias.
1594  if ((V1Size != MemoryLocation::UnknownSize &&
1595  isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1596  (V2Size != MemoryLocation::UnknownSize &&
1597  isObjectSmallerThan(O1, V2Size, DL, TLI)))
1598  return NoAlias;
1599 
1600  // Check the cache before climbing up use-def chains. This also terminates
1601  // otherwise infinitely recursive queries.
1602  LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1603  MemoryLocation(V2, V2Size, V2AAInfo));
1604  if (V1 > V2)
1605  std::swap(Locs.first, Locs.second);
1606  std::pair<AliasCacheTy::iterator, bool> Pair =
1607  AliasCache.insert(std::make_pair(Locs, MayAlias));
1608  if (!Pair.second)
1609  return Pair.first->second;
1610 
1611  // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1612  // GEP can't simplify, we don't even look at the PHI cases.
1613  if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1614  std::swap(V1, V2);
1615  std::swap(V1Size, V2Size);
1616  std::swap(O1, O2);
1617  std::swap(V1AAInfo, V2AAInfo);
1618  }
1619  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1620  AliasResult Result =
1621  aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1622  if (Result != MayAlias)
1623  return AliasCache[Locs] = Result;
1624  }
1625 
1626  if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1627  std::swap(V1, V2);
1628  std::swap(O1, O2);
1629  std::swap(V1Size, V2Size);
1630  std::swap(V1AAInfo, V2AAInfo);
1631  }
1632  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1633  AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1634  V2, V2Size, V2AAInfo, O2);
1635  if (Result != MayAlias)
1636  return AliasCache[Locs] = Result;
1637  }
1638 
1639  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1640  std::swap(V1, V2);
1641  std::swap(O1, O2);
1642  std::swap(V1Size, V2Size);
1643  std::swap(V1AAInfo, V2AAInfo);
1644  }
1645  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1646  AliasResult Result =
1647  aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1648  if (Result != MayAlias)
1649  return AliasCache[Locs] = Result;
1650  }
1651 
1652  // If both pointers are pointing into the same object and one of them
1653  // accesses the entire object, then the accesses must overlap in some way.
1654  if (O1 == O2)
1655  if ((V1Size != MemoryLocation::UnknownSize &&
1656  isObjectSize(O1, V1Size, DL, TLI)) ||
1657  (V2Size != MemoryLocation::UnknownSize &&
1658  isObjectSize(O2, V2Size, DL, TLI)))
1659  return AliasCache[Locs] = PartialAlias;
1660 
1661  // Recurse back into the best AA results we have, potentially with refined
1662  // memory locations. We have already ensured that BasicAA has a MayAlias
1663  // cache result for these, so any recursion back into BasicAA won't loop.
1664  AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1665  return AliasCache[Locs] = Result;
1666 }
1667 
1668 /// Check whether two Values can be considered equivalent.
1669 ///
1670 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1671 /// they can not be part of a cycle in the value graph by looking at all
1672 /// visited phi nodes an making sure that the phis cannot reach the value. We
1673 /// have to do this because we are looking through phi nodes (That is we say
1674 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1675 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1676  const Value *V2) {
1677  if (V != V2)
1678  return false;
1679 
1680  const Instruction *Inst = dyn_cast<Instruction>(V);
1681  if (!Inst)
1682  return true;
1683 
1684  if (VisitedPhiBBs.empty())
1685  return true;
1686 
1687  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1688  return false;
1689 
1690  // Make sure that the visited phis cannot reach the Value. This ensures that
1691  // the Values cannot come from different iterations of a potential cycle the
1692  // phi nodes could be involved in.
1693  for (auto *P : VisitedPhiBBs)
1694  if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1695  return false;
1696 
1697  return true;
1698 }
1699 
1700 /// Computes the symbolic difference between two de-composed GEPs.
1701 ///
1702 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1703 /// instructions GEP1 and GEP2 which have common base pointers.
1704 void BasicAAResult::GetIndexDifference(
1706  const SmallVectorImpl<VariableGEPIndex> &Src) {
1707  if (Src.empty())
1708  return;
1709 
1710  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1711  const Value *V = Src[i].V;
1712  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1713  int64_t Scale = Src[i].Scale;
1714 
1715  // Find V in Dest. This is N^2, but pointer indices almost never have more
1716  // than a few variable indexes.
1717  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1718  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1719  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1720  continue;
1721 
1722  // If we found it, subtract off Scale V's from the entry in Dest. If it
1723  // goes to zero, remove the entry.
1724  if (Dest[j].Scale != Scale)
1725  Dest[j].Scale -= Scale;
1726  else
1727  Dest.erase(Dest.begin() + j);
1728  Scale = 0;
1729  break;
1730  }
1731 
1732  // If we didn't consume this entry, add it to the end of the Dest list.
1733  if (Scale) {
1734  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1735  Dest.push_back(Entry);
1736  }
1737  }
1738 }
1739 
1740 bool BasicAAResult::constantOffsetHeuristic(
1741  const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1742  uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1743  DominatorTree *DT) {
1744  if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1745  V2Size == MemoryLocation::UnknownSize)
1746  return false;
1747 
1748  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1749 
1750  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1751  Var0.Scale != -Var1.Scale)
1752  return false;
1753 
1754  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1755 
1756  // We'll strip off the Extensions of Var0 and Var1 and do another round
1757  // of GetLinearExpression decomposition. In the example above, if Var0
1758  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1759 
1760  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1761  V1Offset(Width, 0);
1762  bool NSW = true, NUW = true;
1763  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1764  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1765  V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1766  NSW = true;
1767  NUW = true;
1768  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1769  V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1770 
1771  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1772  V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1773  return false;
1774 
1775  // We have a hit - Var0 and Var1 only differ by a constant offset!
1776 
1777  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1778  // Var1 is possible to calculate, but we're just interested in the absolute
1779  // minimum difference between the two. The minimum distance may occur due to
1780  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1781  // the minimum distance between %i and %i + 5 is 3.
1782  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1783  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1784  uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1785 
1786  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1787  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1788  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1789  // V2Size can fit in the MinDiffBytes gap.
1790  return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1791  V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1792 }
1793 
1794 //===----------------------------------------------------------------------===//
1795 // BasicAliasAnalysis Pass
1796 //===----------------------------------------------------------------------===//
1797 
1798 AnalysisKey BasicAA::Key;
1799 
1801  return BasicAAResult(F.getParent()->getDataLayout(),
1805  AM.getCachedResult<LoopAnalysis>(F));
1806 }
1807 
1810 }
1811 
1812 char BasicAAWrapperPass::ID = 0;
1813 void BasicAAWrapperPass::anchor() {}
1814 
1816  "Basic Alias Analysis (stateless AA impl)", true, true)
1821  "Basic Alias Analysis (stateless AA impl)", true, true)
1822 
1824  return new BasicAAWrapperPass();
1825 }
1826 
1828  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1829  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1830  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1831  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1832 
1833  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1834  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1835  LIWP ? &LIWP->getLoopInfo() : nullptr));
1836 
1837  return false;
1838 }
1839 
1841  AU.setPreservesAll();
1845 }
1846 
1848  return BasicAAResult(
1849  F.getParent()->getDataLayout(),
1851  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1852 }
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:81
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:395
The two locations precisely alias each other.
Definition: AliasAnalysis.h:85
static const unsigned MaxLookupSearchDepth
Basic Alias Analysis(stateless AA impl)"
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:577
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:841
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: CallSite.h:463
const Value * stripPointerCastsAndBarriers() const
Strip off pointer casts, all-zero GEPs, aliases and barriers.
Definition: Value.cpp:539
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:83
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
This class represents a function call, abstracting a target machine&#39;s calling convention.
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
An immutable pass that tracks lazily created AssumptionCache objects.
const Value * getTrueValue() const
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:439
A cache of .assume calls within a function.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables...
This is the AA result object for the basic, local, and stateless alias analysis.
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:818
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:346
STATISTIC(NumFunctions, "Total number of functions")
The two locations do not alias at all.
Definition: AliasAnalysis.h:79
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:231
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal)
Chases pointers until we find a (constant global) or not.
The access modifies the value stored in memory.
op_iterator op_begin()
Definition: User.h:214
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:81
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
This indicates that the function could not be classified into one of the behaviors above...
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
Definition: CallSite.h:446
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:176
AnalysisUsage & addRequired()
The only memory references in this function (if it has any) are references of memory that is otherwis...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:493
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:94
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
Class to represent struct types.
Definition: DerivedTypes.h:201
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:899
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
Definition: Operator.h:431
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
InstrTy * getInstruction() const
Definition: CallSite.h:89
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
The access references the value stored in memory.
Definition: AliasAnalysis.h:98
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:820
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
Definition: Function.h:419
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
place backedge safepoints impl
IterTy data_operands_end() const
Definition: CallSite.h:246
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1554
void shrink_and_clear()
Definition: DenseMap.h:971
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
unsigned getNumIndices() const
Definition: Operator.h:443
static cl::opt< bool > EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, cl::init(false))
Enable analysis of recursive PHI nodes.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:404
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool has(LibFunc F) const
Tests whether a library function is available.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:374
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:133
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V has size Size.
Value * getOperand(unsigned i) const
Definition: User.h:154
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:131
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
bool doesNotAccessMemory() const
Determine if the function does not access memory.
Definition: Function.h:387
T Min(T a, T b)
Definition: FuzzerDefs.h:90
The access neither references nor modifies the value stored in memory.
Definition: AliasAnalysis.h:96
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:404
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:221
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if &#39;V & Mask&#39; is known to be zero.
#define A
Definition: LargeTest.cpp:12
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:372
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
Returns the behavior when calling the given call site.
AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:73
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:216
bool hasOperandBundles() const
Definition: CallSite.h:509
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Value * getPointerOperand()
Definition: Operator.h:420
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction &#39;To&#39; is reachable from &#39;From&#39;, returning true if uncertain.
Definition: CFG.cpp:186
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
Checks to see if the specified callsite can clobber the specified memory object.
static bool isNonEscapingLocalObject(const Value *V)
Returns true if the pointer is to a function-local object that never escapes from the function...
const Value * getCondition() const
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: CallSite.h:567
const AMDGPUAS & AS
iterator erase(const_iterator CI)
Definition: SmallVector.h:449
unsigned getNumArgOperands() const
Definition: CallSite.h:290
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL)
Provide ad-hoc rules to disambiguate accesses through two GEP operators, both having the exact same p...
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
This function does not perform any non-local loads or stores to memory.
static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2094
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
size_type size() const
Definition: SmallPtrSet.h:93
A function analysis which provides an AssumptionCache.
unsigned getNumOperands() const
Definition: User.h:176
bool doesNotReadMemory() const
Determine if the call does not access or only writes memory.
Definition: CallSite.h:454
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:423
#define E
Definition: LargeTest.cpp:27
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
#define B
Definition: LargeTest.cpp:24
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI)
Returns true if we can prove that the object specified by V is smaller than Size. ...
Provides information about what library functions are available for the current target.
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
A constant pointer value that points to null.
Definition: Constants.h:530
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
uint64_t getSizeInBytes() const
Definition: DataLayout.h:501
IterTy data_operands_begin() const
data_operands_begin/data_operands_end - Return iterators iterating over the call / invoke argument li...
Definition: CallSite.h:242
unsigned getNumIncomingValues() const
Return the number of incoming edges.
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
Class for arbitrary precision integers.
Definition: APInt.h:69
static bool notDifferentParent(const Value *O1, const Value *O2)
FunctionPass * createBasicAAWrapperPass()
void setPreservesAll()
Set by analyses that do not transform their input at all.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
const Value * getFalseValue() const
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Definition: CallSite.h:438
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:405
Basic Alias true
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
bool isKnownNonNull(const Value *V)
Return true if this pointer couldn&#39;t possibly be null by its definition.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1300
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:515
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:475
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Establish a view to a call site for examination.
Definition: CallSite.h:687
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
The access both references and modifies the value stored in memory.
#define I(x, y, z)
Definition: MD5.cpp:58
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1198
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
Definition: Function.h:412
iterator end()
Definition: DenseMap.h:73
bool doesNotReadMemory() const
Determine if the function does not access or only writes memory.
Definition: Function.h:403
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:706
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Definition: Function.h:428
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:126
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:572
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:559
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:104
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:545
LLVM Value Representation.
Definition: Value.h:73
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:388
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
static const Function * getParent(const Value *V)
This is the interface for LLVM&#39;s primary stateless and local alias analysis.
Various options to control the behavior of getObjectSize.
Type * getSourceElementType() const
Definition: Operator.cpp:23
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:260
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
op_range incoming_values()
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void initializeBasicAAWrapperPassPass(PassRegistry &)
const BasicBlock * getParent() const
Definition: Instruction.h:66
Legacy wrapper pass to provide the BasicAAResult object.
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.