Line data Source code
1 : //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file defines the primary stateless implementation of the
11 : // Alias Analysis interface that implements identities (two different
12 : // globals cannot alias, etc), but does no stateful analysis.
13 : //
14 : //===----------------------------------------------------------------------===//
15 :
16 : #include "llvm/Analysis/BasicAliasAnalysis.h"
17 : #include "llvm/ADT/APInt.h"
18 : #include "llvm/ADT/SmallPtrSet.h"
19 : #include "llvm/ADT/SmallVector.h"
20 : #include "llvm/ADT/Statistic.h"
21 : #include "llvm/Analysis/AliasAnalysis.h"
22 : #include "llvm/Analysis/AssumptionCache.h"
23 : #include "llvm/Analysis/CFG.h"
24 : #include "llvm/Analysis/CaptureTracking.h"
25 : #include "llvm/Analysis/InstructionSimplify.h"
26 : #include "llvm/Analysis/LoopInfo.h"
27 : #include "llvm/Analysis/MemoryBuiltins.h"
28 : #include "llvm/Analysis/MemoryLocation.h"
29 : #include "llvm/Analysis/TargetLibraryInfo.h"
30 : #include "llvm/Analysis/ValueTracking.h"
31 : #include "llvm/Analysis/PhiValues.h"
32 : #include "llvm/IR/Argument.h"
33 : #include "llvm/IR/Attributes.h"
34 : #include "llvm/IR/CallSite.h"
35 : #include "llvm/IR/Constant.h"
36 : #include "llvm/IR/Constants.h"
37 : #include "llvm/IR/DataLayout.h"
38 : #include "llvm/IR/DerivedTypes.h"
39 : #include "llvm/IR/Dominators.h"
40 : #include "llvm/IR/Function.h"
41 : #include "llvm/IR/GetElementPtrTypeIterator.h"
42 : #include "llvm/IR/GlobalAlias.h"
43 : #include "llvm/IR/GlobalVariable.h"
44 : #include "llvm/IR/InstrTypes.h"
45 : #include "llvm/IR/Instruction.h"
46 : #include "llvm/IR/Instructions.h"
47 : #include "llvm/IR/IntrinsicInst.h"
48 : #include "llvm/IR/Intrinsics.h"
49 : #include "llvm/IR/Metadata.h"
50 : #include "llvm/IR/Operator.h"
51 : #include "llvm/IR/Type.h"
52 : #include "llvm/IR/User.h"
53 : #include "llvm/IR/Value.h"
54 : #include "llvm/Pass.h"
55 : #include "llvm/Support/Casting.h"
56 : #include "llvm/Support/CommandLine.h"
57 : #include "llvm/Support/Compiler.h"
58 : #include "llvm/Support/KnownBits.h"
59 : #include <cassert>
60 : #include <cstdint>
61 : #include <cstdlib>
62 : #include <utility>
63 :
64 : #define DEBUG_TYPE "basicaa"
65 :
66 : using namespace llvm;
67 :
68 : /// Enable analysis of recursive PHI nodes.
69 : static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
70 : cl::init(false));
71 : /// SearchLimitReached / SearchTimes shows how often the limit of
72 : /// to decompose GEPs is reached. It will affect the precision
73 : /// of basic alias analysis.
74 : STATISTIC(SearchLimitReached, "Number of times the limit to "
75 : "decompose GEPs is reached");
76 : STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
77 :
78 : /// Cutoff after which to stop analysing a set of phi nodes potentially involved
79 : /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
80 : /// careful with value equivalence. We use reachability to make sure a value
81 : /// cannot be involved in a cycle.
82 : const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
83 :
84 : // The max limit of the search depth in DecomposeGEPExpression() and
85 : // GetUnderlyingObject(), both functions need to use the same search
86 : // depth otherwise the algorithm in aliasGEP will assert.
87 : static const unsigned MaxLookupSearchDepth = 6;
88 :
89 613 : bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
90 : FunctionAnalysisManager::Invalidator &Inv) {
91 : // We don't care if this analysis itself is preserved, it has no state. But
92 : // we need to check that the analyses it depends on have been. Note that we
93 : // may be created without handles to some analyses and in that case don't
94 : // depend on them.
95 613 : if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
96 1226 : (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
97 1083 : (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
98 419 : (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
99 196 : return true;
100 :
101 : // Otherwise this analysis result remains valid.
102 : return false;
103 : }
104 :
105 : //===----------------------------------------------------------------------===//
106 : // Useful predicates
107 : //===----------------------------------------------------------------------===//
108 :
109 : /// Returns true if the pointer is to a function-local object that never
110 : /// escapes from the function.
111 4242137 : static bool isNonEscapingLocalObject(const Value *V) {
112 : // If this is a local allocation, check to see if it escapes.
113 2830989 : if (isa<AllocaInst>(V) || isNoAliasCall(V))
114 : // Set StoreCaptures to True so that we can assume in our callers that the
115 : // pointer is not the result of a load instruction. Currently
116 : // PointerMayBeCaptured doesn't have any special analysis for the
117 : // StoreCaptures=false case; if it did, our callers could be refined to be
118 : // more precise.
119 1431660 : return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
120 :
121 : // If this is an argument that corresponds to a byval or noalias argument,
122 : // then it has not escaped before entering the function. Check if it escapes
123 : // inside the function.
124 : if (const Argument *A = dyn_cast<Argument>(V))
125 555642 : if (A->hasByValAttr() || A->hasNoAliasAttr())
126 : // Note even if the argument is marked nocapture, we still need to check
127 : // for copies made inside the function. The nocapture attribute only
128 : // specifies that there are no copies made that outlive the function.
129 9779 : return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
130 :
131 : return false;
132 : }
133 :
134 : /// Returns true if the pointer is one which would have been considered an
135 : /// escape by isNonEscapingLocalObject.
136 17345895 : static bool isEscapeSource(const Value *V) {
137 17345895 : if (ImmutableCallSite(V))
138 : return true;
139 :
140 17217593 : if (isa<Argument>(V))
141 : return true;
142 :
143 : // The load case works because isNonEscapingLocalObject considers all
144 : // stores to be escapes (it passes true for the StoreCaptures argument
145 : // to PointerMayBeCaptured).
146 : if (isa<LoadInst>(V))
147 1095356 : return true;
148 :
149 : return false;
150 : }
151 :
152 : /// Returns the size of the object specified by V or UnknownSize if unknown.
153 : static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
154 : const TargetLibraryInfo &TLI,
155 : bool NullIsValidLoc,
156 : bool RoundToAlign = false) {
157 : uint64_t Size;
158 21180128 : ObjectSizeOpts Opts;
159 21180128 : Opts.RoundToAlign = RoundToAlign;
160 21180128 : Opts.NullIsUnknownSize = NullIsValidLoc;
161 21180128 : if (getObjectSize(V, Size, DL, &TLI, Opts))
162 20646551 : return Size;
163 : return MemoryLocation::UnknownSize;
164 : }
165 :
166 : /// Returns true if we can prove that the object specified by V is smaller than
167 : /// Size.
168 26980522 : static bool isObjectSmallerThan(const Value *V, uint64_t Size,
169 : const DataLayout &DL,
170 : const TargetLibraryInfo &TLI,
171 : bool NullIsValidLoc) {
172 : // Note that the meanings of the "object" are slightly different in the
173 : // following contexts:
174 : // c1: llvm::getObjectSize()
175 : // c2: llvm.objectsize() intrinsic
176 : // c3: isObjectSmallerThan()
177 : // c1 and c2 share the same meaning; however, the meaning of "object" in c3
178 : // refers to the "entire object".
179 : //
180 : // Consider this example:
181 : // char *p = (char*)malloc(100)
182 : // char *q = p+80;
183 : //
184 : // In the context of c1 and c2, the "object" pointed by q refers to the
185 : // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
186 : //
187 : // However, in the context of c3, the "object" refers to the chunk of memory
188 : // being allocated. So, the "object" has 100 bytes, and q points to the middle
189 : // the "object". In case q is passed to isObjectSmallerThan() as the 1st
190 : // parameter, before the llvm::getObjectSize() is called to get the size of
191 : // entire object, we should:
192 : // - either rewind the pointer q to the base-address of the object in
193 : // question (in this case rewind to p), or
194 : // - just give up. It is up to caller to make sure the pointer is pointing
195 : // to the base address the object.
196 : //
197 : // We go for 2nd option for simplicity.
198 26980522 : if (!isIdentifiedObject(V))
199 : return false;
200 :
201 : // This function needs to use the aligned object size because we allow
202 : // reads a bit past the end given sufficient alignment.
203 : uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
204 : /*RoundToAlign*/ true);
205 :
206 21139652 : return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
207 : }
208 :
209 : /// Returns true if we can prove that the object specified by V has size Size.
210 40476 : static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
211 : const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
212 : uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
213 40476 : return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
214 : }
215 :
216 : //===----------------------------------------------------------------------===//
217 : // GetElementPtr Instruction Decomposition and Analysis
218 : //===----------------------------------------------------------------------===//
219 :
220 : /// Analyzes the specified value as a linear expression: "A*V + B", where A and
221 : /// B are constant integers.
222 : ///
223 : /// Returns the scale and offset values as APInts and return V as a Value*, and
224 : /// return whether we looked through any sign or zero extends. The incoming
225 : /// Value is known to have IntegerType, and it may already be sign or zero
226 : /// extended.
227 : ///
228 : /// Note that this looks through extends, so the high bits may not be
229 : /// represented in the result.
230 458466 : /*static*/ const Value *BasicAAResult::GetLinearExpression(
231 : const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
232 : unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
233 : AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
234 : assert(V->getType()->isIntegerTy() && "Not an integer value");
235 :
236 : // Limit our recursion depth.
237 458466 : if (Depth == 6) {
238 80 : Scale = 1;
239 80 : Offset = 0;
240 80 : return V;
241 : }
242 :
243 : if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
244 : // If it's a constant, just convert it to an offset and remove the variable.
245 : // If we've been called recursively, the Offset bit width will be greater
246 : // than the constant's (the Offset's always as wide as the outermost call),
247 : // so we'll zext here and process any extension in the isa<SExtInst> &
248 : // isa<ZExtInst> cases below.
249 28 : Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
250 : assert(Scale == 0 && "Constant values don't have a scale");
251 28 : return V;
252 : }
253 :
254 : if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
255 : if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
256 : // If we've been called recursively, then Offset and Scale will be wider
257 : // than the BOp operands. We'll always zext it here as we'll process sign
258 : // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
259 92894 : APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
260 :
261 92894 : switch (BOp->getOpcode()) {
262 36829 : default:
263 : // We don't understand this instruction, so we can't decompose it any
264 : // further.
265 36829 : Scale = 1;
266 36829 : Offset = 0;
267 36829 : return V;
268 19973 : case Instruction::Or:
269 : // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
270 : // analyze it.
271 19973 : if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
272 : BOp, DT)) {
273 26 : Scale = 1;
274 26 : Offset = 0;
275 26 : return V;
276 : }
277 : LLVM_FALLTHROUGH;
278 : case Instruction::Add:
279 48552 : V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
280 : SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
281 48552 : Offset += RHS;
282 48552 : break;
283 65 : case Instruction::Sub:
284 65 : V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
285 : SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
286 65 : Offset -= RHS;
287 65 : break;
288 827 : case Instruction::Mul:
289 827 : V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
290 : SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
291 827 : Offset *= RHS;
292 827 : Scale *= RHS;
293 827 : break;
294 6595 : case Instruction::Shl:
295 6595 : V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
296 : SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
297 :
298 : // We're trying to linearize an expression of the kind:
299 : // shl i8 -128, 36
300 : // where the shift count exceeds the bitwidth of the type.
301 : // We can't decompose this further (the expression would return
302 : // a poison value).
303 19784 : if (Offset.getBitWidth() < RHS.getLimitedValue() ||
304 6594 : Scale.getBitWidth() < RHS.getLimitedValue()) {
305 1 : Scale = 1;
306 1 : Offset = 0;
307 1 : return V;
308 : }
309 :
310 6594 : Offset <<= RHS.getLimitedValue();
311 6594 : Scale <<= RHS.getLimitedValue();
312 : // the semantics of nsw and nuw for left shifts don't match those of
313 : // multiplications, so we won't propagate them.
314 6594 : NSW = NUW = false;
315 6594 : return V;
316 : }
317 :
318 : if (isa<OverflowingBinaryOperator>(BOp)) {
319 29497 : NUW &= BOp->hasNoUnsignedWrap();
320 29497 : NSW &= BOp->hasNoSignedWrap();
321 : }
322 49444 : return V;
323 : }
324 : }
325 :
326 : // Since GEP indices are sign extended anyway, we don't care about the high
327 : // bits of a sign or zero extended value - just scales and offsets. The
328 : // extensions have to be consistent though.
329 : if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
330 : Value *CastOp = cast<CastInst>(V)->getOperand(0);
331 25047 : unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
332 25047 : unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
333 25047 : unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
334 : const Value *Result =
335 25047 : GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
336 : Depth + 1, AC, DT, NSW, NUW);
337 :
338 : // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
339 : // by just incrementing the number of bits we've extended by.
340 25047 : unsigned ExtendedBy = NewWidth - SmallWidth;
341 :
342 6558 : if (isa<SExtInst>(V) && ZExtBits == 0) {
343 : // sext(sext(%x, a), b) == sext(%x, a + b)
344 :
345 6554 : if (NSW) {
346 : // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
347 : // into sext(%x) + sext(c). We'll sext the Offset ourselves:
348 6416 : unsigned OldWidth = Offset.getBitWidth();
349 12832 : Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
350 : } else {
351 : // We may have signed-wrapped, so don't decompose sext(%x + c) into
352 : // sext(%x) + sext(c)
353 138 : Scale = 1;
354 138 : Offset = 0;
355 : Result = CastOp;
356 138 : ZExtBits = OldZExtBits;
357 138 : SExtBits = OldSExtBits;
358 : }
359 6554 : SExtBits += ExtendedBy;
360 : } else {
361 : // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
362 :
363 18493 : if (!NUW) {
364 : // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
365 : // zext(%x) + zext(c)
366 1343 : Scale = 1;
367 1343 : Offset = 0;
368 : Result = CastOp;
369 1343 : ZExtBits = OldZExtBits;
370 1343 : SExtBits = OldSExtBits;
371 : }
372 18493 : ZExtBits += ExtendedBy;
373 : }
374 :
375 25047 : return Result;
376 : }
377 :
378 340417 : Scale = 1;
379 340417 : Offset = 0;
380 340417 : return V;
381 : }
382 :
383 : /// To ensure a pointer offset fits in an integer of size PointerSize
384 : /// (in bits) when that size is smaller than 64. This is an issue in
385 : /// particular for 32b programs with negative indices that rely on two's
386 : /// complement wrap-arounds for precise alias information.
387 : static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
388 : assert(PointerSize <= 64 && "Invalid PointerSize!");
389 20823271 : unsigned ShiftBits = 64 - PointerSize;
390 41277626 : return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
391 : }
392 :
393 : /// If V is a symbolic pointer expression, decompose it into a base pointer
394 : /// with a constant offset and a number of scaled symbolic offsets.
395 : ///
396 : /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
397 : /// in the VarIndices vector) are Value*'s that are known to be scaled by the
398 : /// specified amount, but which may have other unrepresented high bits. As
399 : /// such, the gep cannot necessarily be reconstructed from its decomposed form.
400 : ///
401 : /// When DataLayout is around, this function is capable of analyzing everything
402 : /// that GetUnderlyingObject can look through. To be able to do that
403 : /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
404 : /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
405 : /// through pointer casts.
406 27048356 : bool BasicAAResult::DecomposeGEPExpression(const Value *V,
407 : DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
408 : DominatorTree *DT) {
409 : // Limit recursion depth to limit compile time in crazy cases.
410 : unsigned MaxLookup = MaxLookupSearchDepth;
411 : SearchTimes++;
412 :
413 27048356 : Decomposed.StructOffset = 0;
414 27048356 : Decomposed.OtherOffset = 0;
415 : Decomposed.VarIndices.clear();
416 : do {
417 : // See if this is a bitcast or GEP.
418 : const Operator *Op = dyn_cast<Operator>(V);
419 : if (!Op) {
420 : // The only non-operator case we can handle are GlobalAliases.
421 : if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
422 : if (!GA->isInterposable()) {
423 : V = GA->getAliasee();
424 105352 : continue;
425 : }
426 : }
427 23538798 : Decomposed.Base = V;
428 27043248 : return false;
429 : }
430 :
431 24418883 : if (Op->getOpcode() == Instruction::BitCast ||
432 : Op->getOpcode() == Instruction::AddrSpaceCast) {
433 101018 : V = Op->getOperand(0);
434 101018 : continue;
435 : }
436 :
437 : const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
438 : if (!GEPOp) {
439 3508784 : if (auto CS = ImmutableCallSite(V)) {
440 : // CaptureTracking can know about special capturing properties of some
441 : // intrinsics like launder.invariant.group, that can't be expressed with
442 : // the attributes, but have properties like returning aliasing pointer.
443 : // Because some analysis may assume that nocaptured pointer is not
444 : // returned from some special intrinsic (because function would have to
445 : // be marked with returns attribute), it is crucial to use this function
446 : // because it should be in sync with CaptureTracking. Not using it may
447 : // cause weird miscompilations where 2 aliasing pointers are assumed to
448 : // noalias.
449 82097 : if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
450 : V = RP;
451 : continue;
452 : }
453 : }
454 :
455 : // If it's not a GEP, hand it off to SimplifyInstruction to see if it
456 : // can come up with something. This matches what GetUnderlyingObject does.
457 : if (const Instruction *I = dyn_cast<Instruction>(V))
458 : // TODO: Get a DominatorTree and AssumptionCache and use them here
459 : // (these are both now available in this function, but this should be
460 : // updated when GetUnderlyingObject is updated). TLI should be
461 : // provided also.
462 3505373 : if (const Value *Simplified =
463 3505373 : SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
464 : V = Simplified;
465 : continue;
466 : }
467 :
468 3504450 : Decomposed.Base = V;
469 3504450 : return false;
470 : }
471 :
472 : // Don't attempt to analyze GEPs over unsized objects.
473 20809081 : if (!GEPOp->getSourceElementType()->isSized()) {
474 0 : Decomposed.Base = V;
475 0 : return false;
476 : }
477 :
478 : unsigned AS = GEPOp->getPointerAddressSpace();
479 : // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
480 20809081 : gep_type_iterator GTI = gep_type_begin(GEPOp);
481 : unsigned PointerSize = DL.getPointerSizeInBits(AS);
482 : // Assume all GEP operands are constants until proven otherwise.
483 : bool GepHasConstantOffset = true;
484 62501585 : for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
485 104194089 : I != E; ++I, ++GTI) {
486 41692504 : const Value *Index = *I;
487 : // Compute the (potentially symbolic) offset in bytes for this index.
488 1670629 : if (StructType *STy = GTI.getStructTypeOrNull()) {
489 : // For a struct, add the member offset.
490 1670629 : unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
491 1670629 : if (FieldNo == 0)
492 41323588 : continue;
493 :
494 860411 : Decomposed.StructOffset +=
495 860411 : DL.getStructLayout(STy)->getElementOffset(FieldNo);
496 860411 : continue;
497 : }
498 :
499 : // For an array/pointer, add the element offset, explicitly scaled.
500 : if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
501 39652959 : if (CIdx->isZero())
502 : continue;
503 19772582 : Decomposed.OtherOffset +=
504 19772582 : DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
505 19772582 : continue;
506 : }
507 :
508 : GepHasConstantOffset = false;
509 :
510 368916 : uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
511 368916 : unsigned ZExtBits = 0, SExtBits = 0;
512 :
513 : // If the integer type is smaller than the pointer size, it is implicitly
514 : // sign extended to pointer size.
515 368916 : unsigned Width = Index->getType()->getIntegerBitWidth();
516 368916 : if (PointerSize > Width)
517 1191 : SExtBits += PointerSize - Width;
518 :
519 : // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
520 : APInt IndexScale(Width, 0), IndexOffset(Width, 0);
521 368916 : bool NSW = true, NUW = true;
522 368916 : Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
523 : SExtBits, DL, 0, AC, DT, NSW, NUW);
524 :
525 : // All GEP math happens in the width of the pointer type,
526 : // so we can truncate the value to 64-bits as we don't handle
527 : // currently pointers larger than 64 bits and we would crash
528 : // later. TODO: Make `Scale` an APInt to avoid this problem.
529 368916 : if (IndexScale.getBitWidth() > 64)
530 2 : IndexScale = IndexScale.sextOrTrunc(64);
531 :
532 : // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
533 : // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
534 368916 : Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
535 368916 : Scale *= IndexScale.getSExtValue();
536 :
537 : // If we already had an occurrence of this index variable, merge this
538 : // scale into it. For example, we want to handle:
539 : // A[x][x] -> x*16 + x*4 -> x*20
540 : // This also ensures that 'x' only appears in the index list once.
541 386503 : for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
542 18384 : if (Decomposed.VarIndices[i].V == Index &&
543 18384 : Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
544 797 : Decomposed.VarIndices[i].SExtBits == SExtBits) {
545 797 : Scale += Decomposed.VarIndices[i].Scale;
546 797 : Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
547 797 : break;
548 : }
549 : }
550 :
551 : // Make sure that we have a scale that makes sense for this target's
552 : // pointer size.
553 : Scale = adjustToPointerSize(Scale, PointerSize);
554 :
555 368916 : if (Scale) {
556 : VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
557 368891 : static_cast<int64_t>(Scale)};
558 368891 : Decomposed.VarIndices.push_back(Entry);
559 : }
560 : }
561 :
562 : // Take care of wrap-arounds
563 20809081 : if (GepHasConstantOffset) {
564 20454355 : Decomposed.StructOffset =
565 20454355 : adjustToPointerSize(Decomposed.StructOffset, PointerSize);
566 20454355 : Decomposed.OtherOffset =
567 20454355 : adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
568 : }
569 :
570 : // Analyze the base pointer next.
571 : V = GEPOp->getOperand(0);
572 20914433 : } while (--MaxLookup);
573 :
574 : // If the chain of expressions is too deep, just return early.
575 5108 : Decomposed.Base = V;
576 : SearchLimitReached++;
577 5108 : return true;
578 : }
579 :
580 : /// Returns whether the given pointer value points to memory that is local to
581 : /// the function, with global constants being considered local to all
582 : /// functions.
583 6748468 : bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
584 : bool OrLocal) {
585 : assert(Visited.empty() && "Visited must be cleared after use!");
586 :
587 : unsigned MaxLookup = 8;
588 : SmallVector<const Value *, 16> Worklist;
589 6748468 : Worklist.push_back(Loc.Ptr);
590 : do {
591 7094677 : const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
592 7094677 : if (!Visited.insert(V).second) {
593 32788 : Visited.clear();
594 32788 : return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
595 : }
596 :
597 : // An alloca instruction defines local memory.
598 7061889 : if (OrLocal && isa<AllocaInst>(V))
599 : continue;
600 :
601 : // A global constant counts as local memory for our purposes.
602 : if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
603 : // Note: this doesn't require GV to be "ODR" because it isn't legal for a
604 : // global to be marked constant in some modules and non-constant in
605 : // others. GV may even be a declaration, not a definition.
606 4155284 : if (!GV->isConstant()) {
607 4137520 : Visited.clear();
608 4137520 : return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
609 : }
610 : continue;
611 : }
612 :
613 : // If both select values point to local memory, then so does the select.
614 : if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
615 5789 : Worklist.push_back(SI->getTrueValue());
616 5789 : Worklist.push_back(SI->getFalseValue());
617 5789 : continue;
618 : }
619 :
620 : // If all values incoming to a phi node point to local memory, then so does
621 : // the phi.
622 : if (const PHINode *PN = dyn_cast<PHINode>(V)) {
623 : // Don't bother inspecting phi nodes with many operands.
624 342400 : if (PN->getNumIncomingValues() > MaxLookup) {
625 3437 : Visited.clear();
626 3437 : return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
627 : }
628 1103122 : for (Value *IncValue : PN->incoming_values())
629 764159 : Worklist.push_back(IncValue);
630 : continue;
631 : }
632 :
633 : // Otherwise be conservative.
634 2494285 : Visited.clear();
635 2494285 : return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
636 426647 : } while (!Worklist.empty() && --MaxLookup);
637 :
638 80438 : Visited.clear();
639 160876 : return Worklist.empty();
640 : }
641 :
642 : /// Returns the behavior when calling the given call site.
643 10633428 : FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
644 10633428 : if (CS.doesNotAccessMemory())
645 : // Can't do better than this.
646 : return FMRB_DoesNotAccessMemory;
647 :
648 : FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
649 :
650 : // If the callsite knows it only reads memory, don't return worse
651 : // than that.
652 10297711 : if (CS.onlyReadsMemory())
653 : Min = FMRB_OnlyReadsMemory;
654 10266453 : else if (CS.doesNotReadMemory())
655 : Min = FMRB_DoesNotReadMemory;
656 :
657 10297711 : if (CS.onlyAccessesArgMemory())
658 7623149 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
659 2674562 : else if (CS.onlyAccessesInaccessibleMemory())
660 313 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
661 2674249 : else if (CS.onlyAccessesInaccessibleMemOrArgMem())
662 164 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
663 :
664 : // If CS has operand bundles then aliasing attributes from the function it
665 : // calls do not directly apply to the CallSite. This can be made more
666 : // precise in the future.
667 10297711 : if (!CS.hasOperandBundles())
668 : if (const Function *F = CS.getCalledFunction())
669 10150866 : Min =
670 20301732 : FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
671 :
672 : return Min;
673 : }
674 :
675 : /// Returns the behavior when calling the given function. For use when the call
676 : /// site is not known.
677 10203787 : FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
678 : // If the function declares it doesn't access memory, we can't do better.
679 10203787 : if (F->doesNotAccessMemory())
680 : return FMRB_DoesNotAccessMemory;
681 :
682 : FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
683 :
684 : // If the function declares it only reads memory, go with that.
685 10202479 : if (F->onlyReadsMemory())
686 : Min = FMRB_OnlyReadsMemory;
687 10170907 : else if (F->doesNotReadMemory())
688 : Min = FMRB_DoesNotReadMemory;
689 :
690 10202479 : if (F->onlyAccessesArgMemory())
691 7623964 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
692 2578515 : else if (F->onlyAccessesInaccessibleMemory())
693 329 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
694 2578186 : else if (F->onlyAccessesInaccessibleMemOrArgMem())
695 161 : Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
696 :
697 : return Min;
698 : }
699 :
700 : /// Returns true if this is a writeonly (i.e Mod only) parameter.
701 2212267 : static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
702 : const TargetLibraryInfo &TLI) {
703 2212267 : if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
704 : return true;
705 :
706 : // We can bound the aliasing properties of memset_pattern16 just as we can
707 : // for memcpy/memset. This is particularly important because the
708 : // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
709 : // whenever possible.
710 : // FIXME Consider handling this in InferFunctionAttr.cpp together with other
711 : // attributes.
712 : LibFunc F;
713 55306 : if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
714 3171 : F == LibFunc_memset_pattern16 && TLI.has(F))
715 0 : if (ArgIdx == 0)
716 0 : return true;
717 :
718 : // TODO: memset_pattern4, memset_pattern8
719 : // TODO: _chk variants
720 : // TODO: strcmp, strcpy
721 :
722 : return false;
723 : }
724 :
725 2212267 : ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
726 : unsigned ArgIdx) {
727 : // Checking for known builtin intrinsics and target library functions.
728 2212267 : if (isWriteOnlyParam(CS, ArgIdx, TLI))
729 : return ModRefInfo::Mod;
730 :
731 55306 : if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
732 : return ModRefInfo::Ref;
733 :
734 43667 : if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
735 1 : return ModRefInfo::NoModRef;
736 :
737 : return AAResultBase::getArgModRefInfo(CS, ArgIdx);
738 : }
739 :
740 : static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
741 : const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
742 17899812 : return II && II->getIntrinsicID() == IID;
743 : }
744 :
745 : #ifndef NDEBUG
746 : static const Function *getParent(const Value *V) {
747 : if (const Instruction *inst = dyn_cast<Instruction>(V)) {
748 : if (!inst->getParent())
749 : return nullptr;
750 : return inst->getParent()->getParent();
751 : }
752 :
753 : if (const Argument *arg = dyn_cast<Argument>(V))
754 : return arg->getParent();
755 :
756 : return nullptr;
757 : }
758 :
759 : static bool notDifferentParent(const Value *O1, const Value *O2) {
760 :
761 : const Function *F1 = getParent(O1);
762 : const Function *F2 = getParent(O2);
763 :
764 : return !F1 || !F2 || F1 == F2;
765 : }
766 : #endif
767 :
768 39451693 : AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
769 : const MemoryLocation &LocB) {
770 : assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
771 : "BasicAliasAnalysis doesn't support interprocedural queries.");
772 :
773 : // If we have a directly cached entry for these locations, we have recursed
774 : // through this once, so just return the cached results. Notably, when this
775 : // happens, we don't clear the cache.
776 78903386 : auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
777 39451693 : if (CacheIt != AliasCache.end())
778 5589716 : return CacheIt->second;
779 :
780 33861977 : AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
781 : LocB.Size, LocB.AATags);
782 : // AliasCache rarely has more than 1 or 2 elements, always use
783 : // shrink_and_clear so it quickly returns to the inline capacity of the
784 : // SmallDenseMap if it ever grows larger.
785 : // FIXME: This should really be shrink_to_inline_capacity_and_clear().
786 33861977 : AliasCache.shrink_and_clear();
787 33861977 : VisitedPhiBBs.clear();
788 33861977 : return Alias;
789 : }
790 :
791 : /// Checks to see if the specified callsite can clobber the specified memory
792 : /// object.
793 : ///
794 : /// Since we only look at local properties of this function, we really can't
795 : /// say much about this query. We do, however, use simple "address taken"
796 : /// analysis on local objects.
797 5461680 : ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
798 : const MemoryLocation &Loc) {
799 : assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
800 : "AliasAnalysis query involving multiple functions!");
801 :
802 5461680 : const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
803 :
804 : // Calls marked 'tail' cannot read or write allocas from the current frame
805 : // because the current frame might be destroyed by the time they run. However,
806 : // a tail call may use an alloca with byval. Calling with byval copies the
807 : // contents of the alloca into argument registers or stack slots, so there is
808 : // no lifetime issue.
809 : if (isa<AllocaInst>(Object))
810 : if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
811 1231651 : if (CI->isTailCall() &&
812 1229051 : !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
813 2600 : return ModRefInfo::NoModRef;
814 :
815 : // If the pointer is to a locally allocated object that does not escape,
816 : // then the call can not mod/ref the pointer unless the call takes the pointer
817 : // as an argument, and itself doesn't capture it.
818 6892604 : if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
819 1433524 : isNonEscapingLocalObject(Object)) {
820 :
821 : // Optimistically assume that call doesn't touch Object and check this
822 : // assumption in the following loop.
823 : ModRefInfo Result = ModRefInfo::NoModRef;
824 : bool IsMustAlias = true;
825 :
826 : unsigned OperandNo = 0;
827 84202 : for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
828 84202 : CI != CE; ++CI, ++OperandNo) {
829 : // Only look at the no-capture or byval pointer arguments. If this
830 : // pointer were passed to arguments that were neither of these, then it
831 : // couldn't be no-capture.
832 156461 : if (!(*CI)->getType()->isPointerTy() ||
833 34838 : (!CS.doesNotCapture(OperandNo) &&
834 69676 : OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
835 48644 : continue;
836 :
837 : // Call doesn't access memory through this operand, so we don't care
838 : // if it aliases with Object.
839 8649 : if (CS.doesNotAccessMemory(OperandNo))
840 : continue;
841 :
842 : // If this is a no-capture pointer argument, see if we can tell that it
843 : // is impossible to alias the pointer we're checking.
844 : AliasResult AR =
845 34588 : getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
846 8647 : if (AR != MustAlias)
847 : IsMustAlias = false;
848 : // Operand doesnt alias 'Object', continue looking for other aliases
849 8647 : if (AR == NoAlias)
850 : continue;
851 : // Operand aliases 'Object', but call doesn't modify it. Strengthen
852 : // initial assumption and keep looking in case if there are more aliases.
853 3116 : if (CS.onlyReadsMemory(OperandNo)) {
854 : Result = setRef(Result);
855 539 : continue;
856 : }
857 : // Operand aliases 'Object' but call only writes into it.
858 2577 : if (CS.doesNotReadMemory(OperandNo)) {
859 : Result = setMod(Result);
860 446 : continue;
861 : }
862 : // This operand aliases 'Object' and call reads and writes into it.
863 : // Setting ModRef will not yield an early return below, MustAlias is not
864 : // used further.
865 : Result = ModRefInfo::ModRef;
866 : break;
867 : }
868 :
869 : // No operand aliases, reset Must bit. Add below if at least one aliases
870 : // and all aliases found are MustAlias.
871 29040 : if (isNoModRef(Result))
872 : IsMustAlias = false;
873 :
874 : // Early return if we improved mod ref information
875 29040 : if (!isModAndRefSet(Result)) {
876 26905 : if (isNoModRef(Result))
877 : return ModRefInfo::NoModRef;
878 977 : return IsMustAlias ? setMust(Result) : clearMust(Result);
879 : }
880 : }
881 :
882 : // If the CallSite is to malloc or calloc, we can assume that it doesn't
883 : // modify any IR visible value. This is only valid because we assume these
884 : // routines do not read values visible in the IR. TODO: Consider special
885 : // casing realloc and strdup routines which access only their arguments as
886 : // well. Or alternatively, replace all of this with inaccessiblememonly once
887 : // that's implemented fully.
888 : auto *Inst = CS.getInstruction();
889 5432175 : if (isMallocOrCallocLikeFn(Inst, &TLI)) {
890 : // Be conservative if the accessed pointer may alias the allocation -
891 : // fallback to the generic handling below.
892 65103 : if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
893 : return ModRefInfo::NoModRef;
894 : }
895 :
896 : // The semantics of memcpy intrinsics forbid overlap between their respective
897 : // operands, i.e., source and destination of any given memcpy must no-alias.
898 : // If Loc must-aliases either one of these two locations, then it necessarily
899 : // no-aliases the other.
900 : if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
901 : AliasResult SrcAA, DestAA;
902 :
903 23042 : if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
904 : Loc)) == MustAlias)
905 : // Loc is exactly the memcpy source thus disjoint from memcpy dest.
906 : return ModRefInfo::Ref;
907 11111 : if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
908 : Loc)) == MustAlias)
909 : // The converse case.
910 : return ModRefInfo::Mod;
911 :
912 : // It's also possible for Loc to alias both src and dest, or neither.
913 : ModRefInfo rv = ModRefInfo::NoModRef;
914 10635 : if (SrcAA != NoAlias)
915 : rv = setRef(rv);
916 10635 : if (DestAA != NoAlias)
917 : rv = setMod(rv);
918 10635 : return rv;
919 : }
920 :
921 : // While the assume intrinsic is marked as arbitrarily writing so that
922 : // proper control dependencies will be maintained, it never aliases any
923 : // particular memory location.
924 : if (isIntrinsicCall(CS, Intrinsic::assume))
925 : return ModRefInfo::NoModRef;
926 :
927 : // Like assumes, guard intrinsics are also marked as arbitrarily writing so
928 : // that proper control dependencies are maintained but they never mods any
929 : // particular memory location.
930 : //
931 : // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
932 : // heap state at the point the guard is issued needs to be consistent in case
933 : // the guard invokes the "deopt" continuation.
934 : if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
935 : return ModRefInfo::Ref;
936 :
937 : // Like assumes, invariant.start intrinsics were also marked as arbitrarily
938 : // writing so that proper control dependencies are maintained but they never
939 : // mod any particular memory location visible to the IR.
940 : // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
941 : // intrinsic is now modeled as reading memory. This prevents hoisting the
942 : // invariant.start intrinsic over stores. Consider:
943 : // *ptr = 40;
944 : // *ptr = 50;
945 : // invariant_start(ptr)
946 : // int val = *ptr;
947 : // print(val);
948 : //
949 : // This cannot be transformed to:
950 : //
951 : // *ptr = 40;
952 : // invariant_start(ptr)
953 : // *ptr = 50;
954 : // int val = *ptr;
955 : // print(val);
956 : //
957 : // The transformation will cause the second store to be ignored (based on
958 : // rules of invariant.start) and print 40, while the first program always
959 : // prints 50.
960 : if (isIntrinsicCall(CS, Intrinsic::invariant_start))
961 5924 : return ModRefInfo::Ref;
962 :
963 : // The AAResultBase base class has some smarts, lets use them.
964 : return AAResultBase::getModRefInfo(CS, Loc);
965 : }
966 :
967 2173425 : ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
968 : ImmutableCallSite CS2) {
969 : // While the assume intrinsic is marked as arbitrarily writing so that
970 : // proper control dependencies will be maintained, it never aliases any
971 : // particular memory location.
972 : if (isIntrinsicCall(CS1, Intrinsic::assume) ||
973 : isIntrinsicCall(CS2, Intrinsic::assume))
974 : return ModRefInfo::NoModRef;
975 :
976 : // Like assumes, guard intrinsics are also marked as arbitrarily writing so
977 : // that proper control dependencies are maintained but they never mod any
978 : // particular memory location.
979 : //
980 : // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
981 : // heap state at the point the guard is issued needs to be consistent in case
982 : // the guard invokes the "deopt" continuation.
983 :
984 : // NB! This function is *not* commutative, so we specical case two
985 : // possibilities for guard intrinsics.
986 :
987 : if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
988 40 : return isModSet(createModRefInfo(getModRefBehavior(CS2)))
989 40 : ? ModRefInfo::Ref
990 : : ModRefInfo::NoModRef;
991 :
992 : if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
993 2 : return isModSet(createModRefInfo(getModRefBehavior(CS1)))
994 2 : ? ModRefInfo::Mod
995 : : ModRefInfo::NoModRef;
996 :
997 : // The AAResultBase base class has some smarts, lets use them.
998 : return AAResultBase::getModRefInfo(CS1, CS2);
999 : }
1000 :
1001 : /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1002 : /// both having the exact same pointer operand.
1003 135421 : static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1004 : LocationSize MaybeV1Size,
1005 : const GEPOperator *GEP2,
1006 : LocationSize MaybeV2Size,
1007 : const DataLayout &DL) {
1008 : assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1009 : GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1010 : GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1011 : "Expected GEPs with the same pointer operand");
1012 :
1013 : // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1014 : // such that the struct field accesses provably cannot alias.
1015 : // We also need at least two indices (the pointer, and the struct field).
1016 135421 : if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1017 : GEP1->getNumIndices() < 2)
1018 : return MayAlias;
1019 :
1020 : // If we don't know the size of the accesses through both GEPs, we can't
1021 : // determine whether the struct fields accessed can't alias.
1022 44058 : if (MaybeV1Size == LocationSize::unknown() ||
1023 : MaybeV2Size == LocationSize::unknown())
1024 : return MayAlias;
1025 :
1026 : const uint64_t V1Size = MaybeV1Size.getValue();
1027 : const uint64_t V2Size = MaybeV2Size.getValue();
1028 :
1029 : ConstantInt *C1 =
1030 43197 : dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1031 : ConstantInt *C2 =
1032 43197 : dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1033 :
1034 : // If the last (struct) indices are constants and are equal, the other indices
1035 : // might be also be dynamically equal, so the GEPs can alias.
1036 84475 : if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
1037 : return MayAlias;
1038 :
1039 : // Find the last-indexed type of the GEP, i.e., the type you'd get if
1040 : // you stripped the last index.
1041 : // On the way, look at each indexed type. If there's something other
1042 : // than an array, different indices can lead to different final types.
1043 : SmallVector<Value *, 8> IntermediateIndices;
1044 :
1045 : // Insert the first index; we don't need to check the type indexed
1046 : // through it as it only drops the pointer indirection.
1047 : assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1048 34954 : IntermediateIndices.push_back(GEP1->getOperand(1));
1049 :
1050 : // Insert all the remaining indices but the last one.
1051 : // Also, check that they all index through arrays.
1052 43655 : for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1053 22070 : if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1054 : GEP1->getSourceElementType(), IntermediateIndices)))
1055 : return MayAlias;
1056 17402 : IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1057 : }
1058 :
1059 21585 : auto *Ty = GetElementPtrInst::getIndexedType(
1060 : GEP1->getSourceElementType(), IntermediateIndices);
1061 : StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1062 :
1063 : if (isa<SequentialType>(Ty)) {
1064 : // We know that:
1065 : // - both GEPs begin indexing from the exact same pointer;
1066 : // - the last indices in both GEPs are constants, indexing into a sequential
1067 : // type (array or pointer);
1068 : // - both GEPs only index through arrays prior to that.
1069 : //
1070 : // Because array indices greater than the number of elements are valid in
1071 : // GEPs, unless we know the intermediate indices are identical between
1072 : // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1073 : // partially overlap. We also need to check that the loaded size matches
1074 : // the element size, otherwise we could still have overlap.
1075 : const uint64_t ElementSize =
1076 1090 : DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1077 1090 : if (V1Size != ElementSize || V2Size != ElementSize)
1078 : return MayAlias;
1079 :
1080 1585 : for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1081 1634 : if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1082 : return MayAlias;
1083 :
1084 : // Now we know that the array/pointer that GEP1 indexes into and that
1085 : // that GEP2 indexes into must either precisely overlap or be disjoint.
1086 : // Because they cannot partially overlap and because fields in an array
1087 : // cannot overlap, if we can prove the final indices are different between
1088 : // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1089 :
1090 : // If the last indices are constants, we've already checked they don't
1091 : // equal each other so we can exit early.
1092 768 : if (C1 && C2)
1093 : return NoAlias;
1094 : {
1095 : Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1096 753 : Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1097 : if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1098 : // If one of the indices is a PHI node, be safe and only use
1099 : // computeKnownBits so we don't make any assumptions about the
1100 : // relationships between the two indices. This is important if we're
1101 : // asking about values from different loop iterations. See PR32314.
1102 : // TODO: We may be able to change the check so we only do this when
1103 : // we definitely looked through a PHINode.
1104 454 : if (GEP1LastIdx != GEP2LastIdx &&
1105 434 : GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1106 833 : KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1107 833 : KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1108 858 : if (Known1.Zero.intersects(Known2.One) ||
1109 : Known1.One.intersects(Known2.Zero))
1110 35 : return NoAlias;
1111 : }
1112 299 : } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1113 : return NoAlias;
1114 : }
1115 678 : return MayAlias;
1116 20495 : } else if (!LastIndexedStruct || !C1 || !C2) {
1117 : return MayAlias;
1118 : }
1119 :
1120 : // We know that:
1121 : // - both GEPs begin indexing from the exact same pointer;
1122 : // - the last indices in both GEPs are constants, indexing into a struct;
1123 : // - said indices are different, hence, the pointed-to fields are different;
1124 : // - both GEPs only index through arrays prior to that.
1125 : //
1126 : // This lets us determine that the struct that GEP1 indexes into and the
1127 : // struct that GEP2 indexes into must either precisely overlap or be
1128 : // completely disjoint. Because they cannot partially overlap, indexing into
1129 : // different non-overlapping fields of the struct will never alias.
1130 :
1131 : // Therefore, the only remaining thing needed to show that both GEPs can't
1132 : // alias is that the fields are not overlapping.
1133 20495 : const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1134 20495 : const uint64_t StructSize = SL->getSizeInBytes();
1135 20495 : const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1136 20495 : const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1137 :
1138 : auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1139 : uint64_t V2Off, uint64_t V2Size) {
1140 20495 : return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1141 20470 : ((V2Off + V2Size <= StructSize) ||
1142 8 : (V2Off + V2Size - StructSize <= V1Off));
1143 : };
1144 :
1145 : if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1146 : EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1147 20462 : return NoAlias;
1148 :
1149 : return MayAlias;
1150 : }
1151 :
1152 : // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1153 : // beginning of the object the GEP points would have a negative offset with
1154 : // repsect to the alloca, that means the GEP can not alias pointer (b).
1155 : // Note that the pointer based on the alloca may not be a GEP. For
1156 : // example, it may be the alloca itself.
1157 : // The same applies if (b) is based on a GlobalVariable. Note that just being
1158 : // based on isIdentifiedObject() is not enough - we need an identified object
1159 : // that does not permit access to negative offsets. For example, a negative
1160 : // offset from a noalias argument or call can be inbounds w.r.t the actual
1161 : // underlying object.
1162 : //
1163 : // For example, consider:
1164 : //
1165 : // struct { int f0, int f1, ...} foo;
1166 : // foo alloca;
1167 : // foo* random = bar(alloca);
1168 : // int *f0 = &alloca.f0
1169 : // int *f1 = &random->f1;
1170 : //
1171 : // Which is lowered, approximately, to:
1172 : //
1173 : // %alloca = alloca %struct.foo
1174 : // %random = call %struct.foo* @random(%struct.foo* %alloca)
1175 : // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1176 : // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1177 : //
1178 : // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1179 : // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1180 : // point into the same object. But since %f0 points to the beginning of %alloca,
1181 : // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1182 : // than (%alloca - 1), and so is not inbounds, a contradiction.
1183 17430602 : bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1184 : const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1185 : LocationSize MaybeObjectAccessSize) {
1186 : // If the object access size is unknown, or the GEP isn't inbounds, bail.
1187 17430602 : if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1188 : return false;
1189 :
1190 : const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1191 :
1192 : // We need the object to be an alloca or a globalvariable, and want to know
1193 : // the offset of the pointer from the object precisely, so no variable
1194 : // indices are allowed.
1195 18610967 : if (!(isa<AllocaInst>(DecompObject.Base) ||
1196 12057188 : isa<GlobalVariable>(DecompObject.Base)) ||
1197 12057188 : !DecompObject.VarIndices.empty())
1198 : return false;
1199 :
1200 24065266 : int64_t ObjectBaseOffset = DecompObject.StructOffset +
1201 12032633 : DecompObject.OtherOffset;
1202 :
1203 : // If the GEP has no variable indices, we know the precise offset
1204 : // from the base, then use it. If the GEP has variable indices,
1205 : // we can't get exact GEP offset to identify pointer alias. So return
1206 : // false in that case.
1207 12032633 : if (!DecompGEP.VarIndices.empty())
1208 : return false;
1209 11902454 : int64_t GEPBaseOffset = DecompGEP.StructOffset;
1210 11902454 : GEPBaseOffset += DecompGEP.OtherOffset;
1211 :
1212 11902454 : return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1213 : }
1214 :
1215 : /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1216 : /// another pointer.
1217 : ///
1218 : /// We know that V1 is a GEP, but we don't know anything about V2.
1219 : /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1220 : /// V2.
1221 : AliasResult
1222 13524178 : BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1223 : const AAMDNodes &V1AAInfo, const Value *V2,
1224 : LocationSize V2Size, const AAMDNodes &V2AAInfo,
1225 : const Value *UnderlyingV1, const Value *UnderlyingV2) {
1226 : DecomposedGEP DecompGEP1, DecompGEP2;
1227 : bool GEP1MaxLookupReached =
1228 13524178 : DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1229 : bool GEP2MaxLookupReached =
1230 13524178 : DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1231 :
1232 13524178 : int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1233 13524178 : int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1234 :
1235 : assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1236 : "DecomposeGEPExpression returned a result different from "
1237 : "GetUnderlyingObject");
1238 :
1239 : // If the GEP's offset relative to its base is such that the base would
1240 : // fall below the start of the object underlying V2, then the GEP and V2
1241 : // cannot alias.
1242 27044033 : if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1243 13519855 : isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1244 : return NoAlias;
1245 : // If we have two gep instructions with must-alias or not-alias'ing base
1246 : // pointers, figure out if the indexes to the GEP tell us anything about the
1247 : // derived pointer.
1248 : if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1249 : // Check for the GEP base being at a negative offset, this time in the other
1250 : // direction.
1251 7824427 : if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1252 3910747 : isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1253 : return NoAlias;
1254 : // Do the base pointers alias?
1255 : AliasResult BaseAlias =
1256 1216236 : aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1257 : UnderlyingV2, LocationSize::unknown(), AAMDNodes());
1258 :
1259 : // Check for geps of non-aliasing underlying pointers where the offsets are
1260 : // identical.
1261 608118 : if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1262 : // Do the base pointers alias assuming type and size.
1263 235054 : AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1264 : UnderlyingV2, V2Size, V2AAInfo);
1265 235054 : if (PreciseBaseAlias == NoAlias) {
1266 : // See if the computed offset from the common pointer tells us about the
1267 : // relation of the resulting pointer.
1268 : // If the max search depth is reached the result is undefined
1269 41323 : if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1270 : return MayAlias;
1271 :
1272 : // Same offsets.
1273 53945 : if (GEP1BaseOffset == GEP2BaseOffset &&
1274 13288 : DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1275 : return NoAlias;
1276 : }
1277 : }
1278 :
1279 : // If we get a No or May, then return it immediately, no amount of analysis
1280 : // will improve this situation.
1281 594592 : if (BaseAlias != MustAlias) {
1282 : assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1283 : return BaseAlias;
1284 : }
1285 :
1286 : // Otherwise, we have a MustAlias. Since the base pointers alias each other
1287 : // exactly, see if the computed offset from the common pointer tells us
1288 : // about the relation of the resulting pointer.
1289 : // If we know the two GEPs are based off of the exact same pointer (and not
1290 : // just the same underlying object), see if that tells us anything about
1291 : // the resulting pointers.
1292 150189 : if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1293 287284 : GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1294 : GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1295 135421 : AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1296 : // If we couldn't find anything interesting, don't abandon just yet.
1297 135421 : if (R != MayAlias)
1298 : return R;
1299 : }
1300 :
1301 : // If the max search depth is reached, the result is undefined
1302 129637 : if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1303 : return MayAlias;
1304 :
1305 : // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1306 : // symbolic difference.
1307 129247 : GEP1BaseOffset -= GEP2BaseOffset;
1308 129247 : GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1309 :
1310 : } else {
1311 : // Check to see if these two pointers are related by the getelementptr
1312 : // instruction. If one pointer is a GEP with a non-zero index of the other
1313 : // pointer, we know they cannot alias.
1314 :
1315 : // If both accesses are unknown size, we can't do anything useful here.
1316 4496229 : if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1317 : return MayAlias;
1318 :
1319 : AliasResult R =
1320 5833594 : aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), V2,
1321 : LocationSize::unknown(), V2AAInfo, nullptr, UnderlyingV2);
1322 2916797 : if (R != MustAlias) {
1323 : // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1324 : // If V2 is known not to alias GEP base pointer, then the two values
1325 : // cannot alias per GEP semantics: "Any memory access must be done through
1326 : // a pointer value associated with an address range of the memory access,
1327 : // otherwise the behavior is undefined.".
1328 : assert(R == NoAlias || R == MayAlias);
1329 : return R;
1330 : }
1331 :
1332 : // If the max search depth is reached the result is undefined
1333 56022 : if (GEP1MaxLookupReached)
1334 : return MayAlias;
1335 : }
1336 :
1337 : // In the two GEP Case, if there is no difference in the offsets of the
1338 : // computed pointers, the resultant pointers are a must alias. This
1339 : // happens when we have two lexically identical GEP's (for example).
1340 : //
1341 : // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1342 : // must aliases the GEP, the end result is a must alias also.
1343 185175 : if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1344 : return MustAlias;
1345 :
1346 : // If there is a constant difference between the pointers, but the difference
1347 : // is less than the size of the associated memory object, then we know
1348 : // that the objects are partially overlapping. If the difference is
1349 : // greater, we know they do not overlap.
1350 182609 : if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1351 165053 : if (GEP1BaseOffset >= 0) {
1352 71281 : if (V2Size != LocationSize::unknown()) {
1353 67716 : if ((uint64_t)GEP1BaseOffset < V2Size.getValue())
1354 : return PartialAlias;
1355 63484 : return NoAlias;
1356 : }
1357 : } else {
1358 : // We have the situation where:
1359 : // + +
1360 : // | BaseOffset |
1361 : // ---------------->|
1362 : // |-->V1Size |-------> V2Size
1363 : // GEP1 V2
1364 : // We need to know that V2Size is not unknown, otherwise we might have
1365 : // stripped a gep with negative index ('gep <ptr>, -1, ...).
1366 93772 : if (V1Size != LocationSize::unknown() &&
1367 : V2Size != LocationSize::unknown()) {
1368 126080 : if (-(uint64_t)GEP1BaseOffset < V1Size.getValue())
1369 : return PartialAlias;
1370 62643 : return NoAlias;
1371 : }
1372 : }
1373 : }
1374 :
1375 51853 : if (!DecompGEP1.VarIndices.empty()) {
1376 : uint64_t Modulo = 0;
1377 : bool AllPositive = true;
1378 41377 : for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1379 :
1380 : // Try to distinguish something like &A[i][1] against &A[42][0].
1381 : // Grab the least significant bit set in any of the scales. We
1382 : // don't need std::abs here (even if the scale's negative) as we'll
1383 : // be ^'ing Modulo with itself later.
1384 23821 : Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1385 :
1386 23821 : if (AllPositive) {
1387 : // If the Value could change between cycles, then any reasoning about
1388 : // the Value this cycle may not hold in the next cycle. We'll just
1389 : // give up if we can't determine conditions that hold for every cycle:
1390 19367 : const Value *V = DecompGEP1.VarIndices[i].V;
1391 :
1392 38734 : KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1393 : bool SignKnownZero = Known.isNonNegative();
1394 : bool SignKnownOne = Known.isNegative();
1395 :
1396 : // Zero-extension widens the variable, and so forces the sign
1397 : // bit to zero.
1398 19367 : bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1399 19367 : SignKnownZero |= IsZExt;
1400 19367 : SignKnownOne &= !IsZExt;
1401 :
1402 : // If the variable begins with a zero then we know it's
1403 : // positive, regardless of whether the value is signed or
1404 : // unsigned.
1405 19367 : int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1406 : AllPositive =
1407 19367 : (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1408 : }
1409 : }
1410 :
1411 17556 : Modulo = Modulo ^ (Modulo & (Modulo - 1));
1412 :
1413 : // We can compute the difference between the two addresses
1414 : // mod Modulo. Check whether that difference guarantees that the
1415 : // two locations do not alias.
1416 17556 : uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1417 17198 : if (V1Size != LocationSize::unknown() &&
1418 34391 : V2Size != LocationSize::unknown() && ModOffset >= V2Size.getValue() &&
1419 1714 : V1Size.getValue() <= Modulo - ModOffset)
1420 : return NoAlias;
1421 :
1422 : // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1423 : // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1424 : // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1425 283 : if (AllPositive && GEP1BaseOffset > 0 &&
1426 16275 : V2Size != LocationSize::unknown() &&
1427 : V2Size.getValue() <= (uint64_t)GEP1BaseOffset)
1428 : return NoAlias;
1429 :
1430 15722 : if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1431 15722 : GEP1BaseOffset, &AC, DT))
1432 90 : return NoAlias;
1433 : }
1434 :
1435 : // Statically, we can see that the base objects are the same, but the
1436 : // pointers have dynamic offsets which we can't resolve. And none of our
1437 : // little tricks above worked.
1438 : return MayAlias;
1439 : }
1440 :
1441 : static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1442 : // If the results agree, take it.
1443 3931726 : if (A == B)
1444 : return A;
1445 : // A mix of PartialAlias and MustAlias is PartialAlias.
1446 100353 : if ((A == PartialAlias && B == MustAlias) ||
1447 100353 : (B == PartialAlias && A == MustAlias))
1448 : return PartialAlias;
1449 : // Otherwise, we don't know anything.
1450 : return MayAlias;
1451 : }
1452 :
1453 : /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1454 : /// against another.
1455 12673 : AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1456 : LocationSize SISize,
1457 : const AAMDNodes &SIAAInfo,
1458 : const Value *V2, LocationSize V2Size,
1459 : const AAMDNodes &V2AAInfo,
1460 : const Value *UnderV2) {
1461 : // If the values are Selects with the same condition, we can do a more precise
1462 : // check: just check for aliases between the values on corresponding arms.
1463 : if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1464 720 : if (SI->getCondition() == SI2->getCondition()) {
1465 546 : AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1466 : SI2->getTrueValue(), V2Size, V2AAInfo);
1467 546 : if (Alias == MayAlias)
1468 : return MayAlias;
1469 : AliasResult ThisAlias =
1470 263 : aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1471 : SI2->getFalseValue(), V2Size, V2AAInfo);
1472 : return MergeAliasResults(ThisAlias, Alias);
1473 : }
1474 :
1475 : // If both arms of the Select node NoAlias or MustAlias V2, then returns
1476 : // NoAlias / MustAlias. Otherwise, returns MayAlias.
1477 : AliasResult Alias =
1478 12127 : aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1479 : SISize, SIAAInfo, UnderV2);
1480 12127 : if (Alias == MayAlias)
1481 : return MayAlias;
1482 :
1483 : AliasResult ThisAlias =
1484 7734 : aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1485 : UnderV2);
1486 : return MergeAliasResults(ThisAlias, Alias);
1487 : }
1488 :
1489 : /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1490 : /// another.
1491 5224332 : AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1492 : const AAMDNodes &PNAAInfo, const Value *V2,
1493 : LocationSize V2Size,
1494 : const AAMDNodes &V2AAInfo,
1495 : const Value *UnderV2) {
1496 : // Track phi nodes we have visited. We use this information when we determine
1497 : // value equivalence.
1498 5224332 : VisitedPhiBBs.insert(PN->getParent());
1499 :
1500 : // If the values are PHIs in the same block, we can do a more precise
1501 : // as well as efficient check: just check for aliases between the values
1502 : // on corresponding edges.
1503 : if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1504 119371 : if (PN2->getParent() == PN->getParent()) {
1505 : LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1506 : MemoryLocation(V2, V2Size, V2AAInfo));
1507 69986 : if (PN > V2)
1508 : std::swap(Locs.first, Locs.second);
1509 : // Analyse the PHIs' inputs under the assumption that the PHIs are
1510 : // NoAlias.
1511 : // If the PHIs are May/MustAlias there must be (recursively) an input
1512 : // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1513 : // there must be an operation on the PHIs within the PHIs' value cycle
1514 : // that causes a MayAlias.
1515 : // Pretend the phis do not alias.
1516 : AliasResult Alias = NoAlias;
1517 : assert(AliasCache.count(Locs) &&
1518 : "There must exist an entry for the phi node");
1519 69986 : AliasResult OrigAliasResult = AliasCache[Locs];
1520 69986 : AliasCache[Locs] = NoAlias;
1521 :
1522 109690 : for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1523 : AliasResult ThisAlias =
1524 101205 : aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1525 101205 : PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1526 : V2Size, V2AAInfo);
1527 : Alias = MergeAliasResults(ThisAlias, Alias);
1528 39704 : if (Alias == MayAlias)
1529 : break;
1530 : }
1531 :
1532 : // Reset if speculation failed.
1533 69986 : if (Alias != NoAlias)
1534 61501 : AliasCache[Locs] = OrigAliasResult;
1535 :
1536 : return Alias;
1537 : }
1538 :
1539 : SmallVector<Value *, 4> V1Srcs;
1540 : bool isRecursive = false;
1541 5154346 : if (PV) {
1542 : // If we have PhiValues then use it to get the underlying phi values.
1543 3226648 : const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1544 : // If we have more phi values than the search depth then return MayAlias
1545 : // conservatively to avoid compile time explosion. The worst possible case
1546 : // is if both sides are PHI nodes. In which case, this is O(m x n) time
1547 : // where 'm' and 'n' are the number of PHI sources.
1548 6453296 : if (PhiValueSet.size() > MaxLookupSearchDepth)
1549 : return MayAlias;
1550 : // Add the values to V1Srcs
1551 9855515 : for (Value *PV1 : PhiValueSet) {
1552 6635533 : if (EnableRecPhiAnalysis) {
1553 : if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1554 : // Check whether the incoming value is a GEP that advances the pointer
1555 : // result of this PHI node (e.g. in a loop). If this is the case, we
1556 : // would recurse and always get a MayAlias. Handle this case specially
1557 : // below.
1558 0 : if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1559 : isa<ConstantInt>(PV1GEP->idx_begin())) {
1560 : isRecursive = true;
1561 : continue;
1562 : }
1563 : }
1564 : }
1565 6635533 : V1Srcs.push_back(PV1);
1566 : }
1567 : } else {
1568 : // If we don't have PhiInfo then just look at the operands of the phi itself
1569 : // FIXME: Remove this once we can guarantee that we have PhiInfo always
1570 : SmallPtrSet<Value *, 4> UniqueSrc;
1571 6035224 : for (Value *PV1 : PN->incoming_values()) {
1572 : if (isa<PHINode>(PV1))
1573 : // If any of the source itself is a PHI, return MayAlias conservatively
1574 : // to avoid compile time explosion. The worst possible case is if both
1575 : // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1576 : // and 'n' are the number of PHI sources.
1577 47618 : return MayAlias;
1578 :
1579 4107526 : if (EnableRecPhiAnalysis)
1580 : if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1581 : // Check whether the incoming value is a GEP that advances the pointer
1582 : // result of this PHI node (e.g. in a loop). If this is the case, we
1583 : // would recurse and always get a MayAlias. Handle this case specially
1584 : // below.
1585 9 : if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1586 : isa<ConstantInt>(PV1GEP->idx_begin())) {
1587 : isRecursive = true;
1588 : continue;
1589 : }
1590 : }
1591 :
1592 4107517 : if (UniqueSrc.insert(PV1).second)
1593 4097810 : V1Srcs.push_back(PV1);
1594 : }
1595 : }
1596 :
1597 : // If V1Srcs is empty then that means that the phi has no underlying non-phi
1598 : // value. This should only be possible in blocks unreachable from the entry
1599 : // block, but return MayAlias just in case.
1600 5100062 : if (V1Srcs.empty())
1601 : return MayAlias;
1602 :
1603 : // If this PHI node is recursive, set the size of the accessed memory to
1604 : // unknown to represent all the possible values the GEP could advance the
1605 : // pointer to.
1606 5100062 : if (isRecursive)
1607 : PNSize = LocationSize::unknown();
1608 :
1609 : AliasResult Alias =
1610 5100062 : aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1611 : PNSize, PNAAInfo, UnderV2);
1612 :
1613 : // Early exit if the check of the first PHI source against V2 is MayAlias.
1614 : // Other results are not possible.
1615 5100062 : if (Alias == MayAlias)
1616 : return MayAlias;
1617 :
1618 : // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1619 : // NoAlias / MustAlias. Otherwise, returns MayAlias.
1620 7212564 : for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1621 3822524 : Value *V = V1Srcs[i];
1622 :
1623 : AliasResult ThisAlias =
1624 3822524 : aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1625 : Alias = MergeAliasResults(ThisAlias, Alias);
1626 3789214 : if (Alias == MayAlias)
1627 : break;
1628 : }
1629 :
1630 : return Alias;
1631 : }
1632 :
1633 : /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1634 : /// array references.
1635 46666407 : AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1636 : AAMDNodes V1AAInfo, const Value *V2,
1637 : LocationSize V2Size, AAMDNodes V2AAInfo,
1638 : const Value *O1, const Value *O2) {
1639 : // If either of the memory references is empty, it doesn't matter what the
1640 : // pointer values are.
1641 46666407 : if (V1Size == 0 || V2Size == 0)
1642 : return NoAlias;
1643 :
1644 : // Strip off any casts if they exist.
1645 46666388 : V1 = V1->stripPointerCastsAndInvariantGroups();
1646 46666388 : V2 = V2->stripPointerCastsAndInvariantGroups();
1647 :
1648 : // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1649 : // value for undef that aliases nothing in the program.
1650 46666388 : if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1651 : return NoAlias;
1652 :
1653 : // Are we checking for alias of the same value?
1654 : // Because we look 'through' phi nodes, we could look at "Value" pointers from
1655 : // different iterations. We must therefore make sure that this is not the
1656 : // case. The function isValueEqualInPotentialCycles ensures that this cannot
1657 : // happen by looking at the visited phi nodes and making sure they cannot
1658 : // reach the value.
1659 46664114 : if (isValueEqualInPotentialCycles(V1, V2))
1660 : return MustAlias;
1661 :
1662 91178824 : if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1663 : return NoAlias; // Scalars cannot alias each other
1664 :
1665 : // Figure out what objects these things are pointing to if we can.
1666 45589340 : if (O1 == nullptr)
1667 36659287 : O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1668 :
1669 45589340 : if (O2 == nullptr)
1670 42727075 : O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1671 :
1672 : // Null values in the default address space don't point to any object, so they
1673 : // don't alias any other pointer.
1674 : if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1675 1114 : if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1676 : return NoAlias;
1677 : if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1678 19127 : if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1679 : return NoAlias;
1680 :
1681 45569133 : if (O1 != O2) {
1682 : // If V1/V2 point to two different objects, we know that we have no alias.
1683 35625101 : if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1684 : return NoAlias;
1685 :
1686 : // Constant pointers can't alias with non-const isIdentifiedObject objects.
1687 8771167 : if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1688 3576409 : (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1689 1261 : return NoAlias;
1690 :
1691 : // Function arguments can't alias with things that are known to be
1692 : // unambigously identified at the function level.
1693 8769906 : if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1694 937006 : (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1695 95047 : return NoAlias;
1696 :
1697 : // If one pointer is the result of a call/invoke or load and the other is a
1698 : // non-escaping local object within the same function, then we know the
1699 : // object couldn't escape to a point where the call could return it.
1700 : //
1701 : // Note that if the pointers are in different functions, there are a
1702 : // variety of complications. A call with a nocapture argument may still
1703 : // temporary store the nocapture argument's value in a temporary memory
1704 : // location if that memory location doesn't escape. Or it may pass a
1705 : // nocapture value to other functions as long as they don't capture it.
1706 8674859 : if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1707 : return NoAlias;
1708 8671036 : if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1709 : return NoAlias;
1710 : }
1711 :
1712 : // If the size of one access is larger than the entire object on the other
1713 : // side, then we know such behavior is undefined and can assume no alias.
1714 18608542 : bool NullIsValidLocation = NullPointerIsDefined(&F);
1715 27058474 : if ((V1Size.isPrecise() && isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI,
1716 32068709 : NullIsValidLocation)) ||
1717 26902570 : (V2Size.isPrecise() && isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI,
1718 : NullIsValidLocation)))
1719 115466 : return NoAlias;
1720 :
1721 : // Check the cache before climbing up use-def chains. This also terminates
1722 : // otherwise infinitely recursive queries.
1723 : LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1724 : MemoryLocation(V2, V2Size, V2AAInfo));
1725 18493076 : if (V1 > V2)
1726 : std::swap(Locs.first, Locs.second);
1727 : std::pair<AliasCacheTy::iterator, bool> Pair =
1728 18493076 : AliasCache.insert(std::make_pair(Locs, MayAlias));
1729 18493076 : if (!Pair.second)
1730 107911 : return Pair.first->second;
1731 :
1732 : // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1733 : // GEP can't simplify, we don't even look at the PHI cases.
1734 : if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1735 : std::swap(V1, V2);
1736 : std::swap(V1Size, V2Size);
1737 : std::swap(O1, O2);
1738 : std::swap(V1AAInfo, V2AAInfo);
1739 : }
1740 : if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1741 : AliasResult Result =
1742 13524178 : aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1743 13524178 : if (Result != MayAlias)
1744 9394349 : return AliasCache[Locs] = Result;
1745 : }
1746 :
1747 : if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1748 : std::swap(V1, V2);
1749 : std::swap(O1, O2);
1750 : std::swap(V1Size, V2Size);
1751 : std::swap(V1AAInfo, V2AAInfo);
1752 : }
1753 : if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1754 5224332 : AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1755 : V2, V2Size, V2AAInfo, O2);
1756 5224332 : if (Result != MayAlias)
1757 3398525 : return AliasCache[Locs] = Result;
1758 : }
1759 :
1760 : if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1761 : std::swap(V1, V2);
1762 : std::swap(O1, O2);
1763 : std::swap(V1Size, V2Size);
1764 : std::swap(V1AAInfo, V2AAInfo);
1765 : }
1766 : if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1767 : AliasResult Result =
1768 12673 : aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1769 12673 : if (Result != MayAlias)
1770 2456 : return AliasCache[Locs] = Result;
1771 : }
1772 :
1773 : // If both pointers are pointing into the same object and one of them
1774 : // accesses the entire object, then the accesses must overlap in some way.
1775 5589835 : if (O1 == O2)
1776 1597300 : if (V1Size.isPrecise() && V2Size.isPrecise() &&
1777 60716 : (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1778 40472 : isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1779 119 : return AliasCache[Locs] = PartialAlias;
1780 :
1781 : // Recurse back into the best AA results we have, potentially with refined
1782 : // memory locations. We have already ensured that BasicAA has a MayAlias
1783 : // cache result for these, so any recursion back into BasicAA won't loop.
1784 11179432 : AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1785 5589716 : return AliasCache[Locs] = Result;
1786 : }
1787 :
1788 : /// Check whether two Values can be considered equivalent.
1789 : ///
1790 : /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1791 : /// they can not be part of a cycle in the value graph by looking at all
1792 : /// visited phi nodes an making sure that the phis cannot reach the value. We
1793 : /// have to do this because we are looking through phi nodes (That is we say
1794 : /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1795 46711138 : bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1796 : const Value *V2) {
1797 46711138 : if (V != V2)
1798 : return false;
1799 :
1800 : const Instruction *Inst = dyn_cast<Instruction>(V);
1801 : if (!Inst)
1802 : return true;
1803 :
1804 227126 : if (VisitedPhiBBs.empty())
1805 : return true;
1806 :
1807 28202 : if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1808 : return false;
1809 :
1810 : // Make sure that the visited phis cannot reach the Value. This ensures that
1811 : // the Values cannot come from different iterations of a potential cycle the
1812 : // phi nodes could be involved in.
1813 38756 : for (auto *P : VisitedPhiBBs)
1814 59730 : if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1815 19311 : return false;
1816 :
1817 8891 : return true;
1818 : }
1819 :
1820 : /// Computes the symbolic difference between two de-composed GEPs.
1821 : ///
1822 : /// Dest and Src are the variable indices from two decomposed GetElementPtr
1823 : /// instructions GEP1 and GEP2 which have common base pointers.
1824 129247 : void BasicAAResult::GetIndexDifference(
1825 : SmallVectorImpl<VariableGEPIndex> &Dest,
1826 : const SmallVectorImpl<VariableGEPIndex> &Src) {
1827 129247 : if (Src.empty())
1828 : return;
1829 :
1830 88096 : for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1831 44642 : const Value *V = Src[i].V;
1832 44642 : unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1833 44642 : int64_t Scale = Src[i].Scale;
1834 :
1835 : // Find V in Dest. This is N^2, but pointer indices almost never have more
1836 : // than a few variable indexes.
1837 51022 : for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1838 85586 : if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1839 42793 : Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1840 : continue;
1841 :
1842 : // If we found it, subtract off Scale V's from the entry in Dest. If it
1843 : // goes to zero, remove the entry.
1844 36413 : if (Dest[j].Scale != Scale)
1845 619 : Dest[j].Scale -= Scale;
1846 : else
1847 35794 : Dest.erase(Dest.begin() + j);
1848 : Scale = 0;
1849 : break;
1850 : }
1851 :
1852 : // If we didn't consume this entry, add it to the end of the Dest list.
1853 8229 : if (Scale) {
1854 8229 : VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1855 8229 : Dest.push_back(Entry);
1856 : }
1857 : }
1858 : }
1859 :
1860 15722 : bool BasicAAResult::constantOffsetHeuristic(
1861 : const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1862 : LocationSize MaybeV1Size, LocationSize MaybeV2Size, int64_t BaseOffset,
1863 : AssumptionCache *AC, DominatorTree *DT) {
1864 15722 : if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1865 : MaybeV2Size == LocationSize::unknown())
1866 : return false;
1867 :
1868 : const uint64_t V1Size = MaybeV1Size.getValue();
1869 : const uint64_t V2Size = MaybeV2Size.getValue();
1870 :
1871 : const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1872 :
1873 4851 : if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1874 4753 : Var0.Scale != -Var1.Scale)
1875 : return false;
1876 :
1877 4232 : unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1878 :
1879 : // We'll strip off the Extensions of Var0 and Var1 and do another round
1880 : // of GetLinearExpression decomposition. In the example above, if Var0
1881 : // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1882 :
1883 : APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1884 : V1Offset(Width, 0);
1885 4232 : bool NSW = true, NUW = true;
1886 4232 : unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1887 4232 : const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1888 : V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1889 4232 : NSW = true;
1890 4232 : NUW = true;
1891 4232 : const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1892 : V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1893 :
1894 4231 : if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1895 8463 : V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1896 4104 : return false;
1897 :
1898 : // We have a hit - Var0 and Var1 only differ by a constant offset!
1899 :
1900 : // If we've been sext'ed then zext'd the maximum difference between Var0 and
1901 : // Var1 is possible to calculate, but we're just interested in the absolute
1902 : // minimum difference between the two. The minimum distance may occur due to
1903 : // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1904 : // the minimum distance between %i and %i + 5 is 3.
1905 256 : APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1906 128 : MinDiff = APIntOps::umin(MinDiff, Wrapped);
1907 256 : uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1908 :
1909 : // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1910 : // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1911 : // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1912 : // V2Size can fit in the MinDiffBytes gap.
1913 128 : return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1914 90 : V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1915 : }
1916 :
1917 : //===----------------------------------------------------------------------===//
1918 : // BasicAliasAnalysis Pass
1919 : //===----------------------------------------------------------------------===//
1920 :
1921 : AnalysisKey BasicAA::Key;
1922 :
1923 802 : BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1924 : return BasicAAResult(F.getParent()->getDataLayout(),
1925 : F,
1926 : AM.getResult<TargetLibraryAnalysis>(F),
1927 : AM.getResult<AssumptionAnalysis>(F),
1928 : &AM.getResult<DominatorTreeAnalysis>(F),
1929 : AM.getCachedResult<LoopAnalysis>(F),
1930 802 : AM.getCachedResult<PhiValuesAnalysis>(F));
1931 : }
1932 :
1933 230852 : BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1934 115426 : initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1935 115426 : }
1936 :
1937 : char BasicAAWrapperPass::ID = 0;
1938 :
1939 0 : void BasicAAWrapperPass::anchor() {}
1940 :
1941 85402 : INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1942 : "Basic Alias Analysis (stateless AA impl)", false, true)
1943 85402 : INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1944 85402 : INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1945 85402 : INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1946 884007 : INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1947 : "Basic Alias Analysis (stateless AA impl)", false, true)
1948 :
1949 27453 : FunctionPass *llvm::createBasicAAWrapperPass() {
1950 27453 : return new BasicAAWrapperPass();
1951 : }
1952 :
1953 1589360 : bool BasicAAWrapperPass::runOnFunction(Function &F) {
1954 1589360 : auto &ACT = getAnalysis<AssumptionCacheTracker>();
1955 1589360 : auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1956 1589360 : auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1957 1589360 : auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1958 1589360 : auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1959 :
1960 1589360 : Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
1961 1589360 : ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1962 1589360 : LIWP ? &LIWP->getLoopInfo() : nullptr,
1963 1693854 : PVWP ? &PVWP->getResult() : nullptr));
1964 :
1965 1589360 : return false;
1966 : }
1967 :
1968 113872 : void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1969 : AU.setPreservesAll();
1970 : AU.addRequired<AssumptionCacheTracker>();
1971 : AU.addRequired<DominatorTreeWrapperPass>();
1972 : AU.addRequired<TargetLibraryInfoWrapperPass>();
1973 : AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1974 113872 : }
1975 :
1976 1451366 : BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1977 : return BasicAAResult(
1978 : F.getParent()->getDataLayout(),
1979 : F,
1980 1451366 : P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1981 2902732 : P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1982 : }
|