File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Analysis/BasicAliasAnalysis.cpp |
Warning: | line 398, column 9 Value stored to 'NUW' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the primary stateless implementation of the |
10 | // Alias Analysis interface that implements identities (two different |
11 | // globals cannot alias, etc), but does no stateful analysis. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "llvm/Analysis/BasicAliasAnalysis.h" |
16 | #include "llvm/ADT/APInt.h" |
17 | #include "llvm/ADT/ScopeExit.h" |
18 | #include "llvm/ADT/SmallPtrSet.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/Statistic.h" |
21 | #include "llvm/Analysis/AliasAnalysis.h" |
22 | #include "llvm/Analysis/AssumptionCache.h" |
23 | #include "llvm/Analysis/CFG.h" |
24 | #include "llvm/Analysis/CaptureTracking.h" |
25 | #include "llvm/Analysis/MemoryBuiltins.h" |
26 | #include "llvm/Analysis/MemoryLocation.h" |
27 | #include "llvm/Analysis/PhiValues.h" |
28 | #include "llvm/Analysis/TargetLibraryInfo.h" |
29 | #include "llvm/Analysis/ValueTracking.h" |
30 | #include "llvm/IR/Argument.h" |
31 | #include "llvm/IR/Attributes.h" |
32 | #include "llvm/IR/Constant.h" |
33 | #include "llvm/IR/ConstantRange.h" |
34 | #include "llvm/IR/Constants.h" |
35 | #include "llvm/IR/DataLayout.h" |
36 | #include "llvm/IR/DerivedTypes.h" |
37 | #include "llvm/IR/Dominators.h" |
38 | #include "llvm/IR/Function.h" |
39 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
40 | #include "llvm/IR/GlobalAlias.h" |
41 | #include "llvm/IR/GlobalVariable.h" |
42 | #include "llvm/IR/InstrTypes.h" |
43 | #include "llvm/IR/Instruction.h" |
44 | #include "llvm/IR/Instructions.h" |
45 | #include "llvm/IR/IntrinsicInst.h" |
46 | #include "llvm/IR/Intrinsics.h" |
47 | #include "llvm/IR/Operator.h" |
48 | #include "llvm/IR/Type.h" |
49 | #include "llvm/IR/User.h" |
50 | #include "llvm/IR/Value.h" |
51 | #include "llvm/InitializePasses.h" |
52 | #include "llvm/Pass.h" |
53 | #include "llvm/Support/Casting.h" |
54 | #include "llvm/Support/CommandLine.h" |
55 | #include "llvm/Support/Compiler.h" |
56 | #include "llvm/Support/KnownBits.h" |
57 | #include <cassert> |
58 | #include <cstdint> |
59 | #include <cstdlib> |
60 | #include <utility> |
61 | |
62 | #define DEBUG_TYPE"basicaa" "basicaa" |
63 | |
64 | using namespace llvm; |
65 | |
66 | /// Enable analysis of recursive PHI nodes. |
67 | static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, |
68 | cl::init(true)); |
69 | |
70 | /// SearchLimitReached / SearchTimes shows how often the limit of |
71 | /// to decompose GEPs is reached. It will affect the precision |
72 | /// of basic alias analysis. |
73 | STATISTIC(SearchLimitReached, "Number of times the limit to "static llvm::Statistic SearchLimitReached = {"basicaa", "SearchLimitReached" , "Number of times the limit to " "decompose GEPs is reached" } |
74 | "decompose GEPs is reached")static llvm::Statistic SearchLimitReached = {"basicaa", "SearchLimitReached" , "Number of times the limit to " "decompose GEPs is reached" }; |
75 | STATISTIC(SearchTimes, "Number of times a GEP is decomposed")static llvm::Statistic SearchTimes = {"basicaa", "SearchTimes" , "Number of times a GEP is decomposed"}; |
76 | |
77 | /// Cutoff after which to stop analysing a set of phi nodes potentially involved |
78 | /// in a cycle. Because we are analysing 'through' phi nodes, we need to be |
79 | /// careful with value equivalence. We use reachability to make sure a value |
80 | /// cannot be involved in a cycle. |
81 | const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; |
82 | |
83 | // The max limit of the search depth in DecomposeGEPExpression() and |
84 | // getUnderlyingObject(). |
85 | static const unsigned MaxLookupSearchDepth = 6; |
86 | |
87 | bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, |
88 | FunctionAnalysisManager::Invalidator &Inv) { |
89 | // We don't care if this analysis itself is preserved, it has no state. But |
90 | // we need to check that the analyses it depends on have been. Note that we |
91 | // may be created without handles to some analyses and in that case don't |
92 | // depend on them. |
93 | if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || |
94 | (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || |
95 | (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) |
96 | return true; |
97 | |
98 | // Otherwise this analysis result remains valid. |
99 | return false; |
100 | } |
101 | |
102 | //===----------------------------------------------------------------------===// |
103 | // Useful predicates |
104 | //===----------------------------------------------------------------------===// |
105 | |
106 | /// Returns true if the pointer is one which would have been considered an |
107 | /// escape by isNonEscapingLocalObject. |
108 | static bool isEscapeSource(const Value *V) { |
109 | if (isa<CallBase>(V)) |
110 | return true; |
111 | |
112 | // The load case works because isNonEscapingLocalObject considers all |
113 | // stores to be escapes (it passes true for the StoreCaptures argument |
114 | // to PointerMayBeCaptured). |
115 | if (isa<LoadInst>(V)) |
116 | return true; |
117 | |
118 | // The inttoptr case works because isNonEscapingLocalObject considers all |
119 | // means of converting or equating a pointer to an int (ptrtoint, ptr store |
120 | // which could be followed by an integer load, ptr<->int compare) as |
121 | // escaping, and objects located at well-known addresses via platform-specific |
122 | // means cannot be considered non-escaping local objects. |
123 | if (isa<IntToPtrInst>(V)) |
124 | return true; |
125 | |
126 | return false; |
127 | } |
128 | |
129 | /// Returns the size of the object specified by V or UnknownSize if unknown. |
130 | static uint64_t getObjectSize(const Value *V, const DataLayout &DL, |
131 | const TargetLibraryInfo &TLI, |
132 | bool NullIsValidLoc, |
133 | bool RoundToAlign = false) { |
134 | uint64_t Size; |
135 | ObjectSizeOpts Opts; |
136 | Opts.RoundToAlign = RoundToAlign; |
137 | Opts.NullIsUnknownSize = NullIsValidLoc; |
138 | if (getObjectSize(V, Size, DL, &TLI, Opts)) |
139 | return Size; |
140 | return MemoryLocation::UnknownSize; |
141 | } |
142 | |
143 | /// Returns true if we can prove that the object specified by V is smaller than |
144 | /// Size. |
145 | static bool isObjectSmallerThan(const Value *V, uint64_t Size, |
146 | const DataLayout &DL, |
147 | const TargetLibraryInfo &TLI, |
148 | bool NullIsValidLoc) { |
149 | // Note that the meanings of the "object" are slightly different in the |
150 | // following contexts: |
151 | // c1: llvm::getObjectSize() |
152 | // c2: llvm.objectsize() intrinsic |
153 | // c3: isObjectSmallerThan() |
154 | // c1 and c2 share the same meaning; however, the meaning of "object" in c3 |
155 | // refers to the "entire object". |
156 | // |
157 | // Consider this example: |
158 | // char *p = (char*)malloc(100) |
159 | // char *q = p+80; |
160 | // |
161 | // In the context of c1 and c2, the "object" pointed by q refers to the |
162 | // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. |
163 | // |
164 | // However, in the context of c3, the "object" refers to the chunk of memory |
165 | // being allocated. So, the "object" has 100 bytes, and q points to the middle |
166 | // the "object". In case q is passed to isObjectSmallerThan() as the 1st |
167 | // parameter, before the llvm::getObjectSize() is called to get the size of |
168 | // entire object, we should: |
169 | // - either rewind the pointer q to the base-address of the object in |
170 | // question (in this case rewind to p), or |
171 | // - just give up. It is up to caller to make sure the pointer is pointing |
172 | // to the base address the object. |
173 | // |
174 | // We go for 2nd option for simplicity. |
175 | if (!isIdentifiedObject(V)) |
176 | return false; |
177 | |
178 | // This function needs to use the aligned object size because we allow |
179 | // reads a bit past the end given sufficient alignment. |
180 | uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, |
181 | /*RoundToAlign*/ true); |
182 | |
183 | return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; |
184 | } |
185 | |
186 | /// Return the minimal extent from \p V to the end of the underlying object, |
187 | /// assuming the result is used in an aliasing query. E.g., we do use the query |
188 | /// location size and the fact that null pointers cannot alias here. |
189 | static uint64_t getMinimalExtentFrom(const Value &V, |
190 | const LocationSize &LocSize, |
191 | const DataLayout &DL, |
192 | bool NullIsValidLoc) { |
193 | // If we have dereferenceability information we know a lower bound for the |
194 | // extent as accesses for a lower offset would be valid. We need to exclude |
195 | // the "or null" part if null is a valid pointer. We can ignore frees, as an |
196 | // access after free would be undefined behavior. |
197 | bool CanBeNull, CanBeFreed; |
198 | uint64_t DerefBytes = |
199 | V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); |
200 | DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; |
201 | // If queried with a precise location size, we assume that location size to be |
202 | // accessed, thus valid. |
203 | if (LocSize.isPrecise()) |
204 | DerefBytes = std::max(DerefBytes, LocSize.getValue()); |
205 | return DerefBytes; |
206 | } |
207 | |
208 | /// Returns true if we can prove that the object specified by V has size Size. |
209 | static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, |
210 | const TargetLibraryInfo &TLI, bool NullIsValidLoc) { |
211 | uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); |
212 | return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; |
213 | } |
214 | |
215 | //===----------------------------------------------------------------------===// |
216 | // CaptureInfo implementations |
217 | //===----------------------------------------------------------------------===// |
218 | |
219 | CaptureInfo::~CaptureInfo() = default; |
220 | |
221 | bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, |
222 | const Instruction *I) { |
223 | return isNonEscapingLocalObject(Object, &IsCapturedCache); |
224 | } |
225 | |
226 | bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, |
227 | const Instruction *I) { |
228 | if (!isIdentifiedFunctionLocal(Object)) |
229 | return false; |
230 | |
231 | auto Iter = EarliestEscapes.insert({Object, nullptr}); |
232 | if (Iter.second) { |
233 | Instruction *EarliestCapture = FindEarliestCapture( |
234 | Object, *const_cast<Function *>(I->getFunction()), |
235 | /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues); |
236 | if (EarliestCapture) { |
237 | auto Ins = Inst2Obj.insert({EarliestCapture, {}}); |
238 | Ins.first->second.push_back(Object); |
239 | } |
240 | Iter.first->second = EarliestCapture; |
241 | } |
242 | |
243 | // No capturing instruction. |
244 | if (!Iter.first->second) |
245 | return true; |
246 | |
247 | return I != Iter.first->second && |
248 | !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); |
249 | } |
250 | |
251 | void EarliestEscapeInfo::removeInstruction(Instruction *I) { |
252 | auto Iter = Inst2Obj.find(I); |
253 | if (Iter != Inst2Obj.end()) { |
254 | for (const Value *Obj : Iter->second) |
255 | EarliestEscapes.erase(Obj); |
256 | Inst2Obj.erase(I); |
257 | } |
258 | } |
259 | |
260 | //===----------------------------------------------------------------------===// |
261 | // GetElementPtr Instruction Decomposition and Analysis |
262 | //===----------------------------------------------------------------------===// |
263 | |
264 | namespace { |
265 | /// Represents zext(sext(trunc(V))). |
266 | struct CastedValue { |
267 | const Value *V; |
268 | unsigned ZExtBits = 0; |
269 | unsigned SExtBits = 0; |
270 | unsigned TruncBits = 0; |
271 | |
272 | explicit CastedValue(const Value *V) : V(V) {} |
273 | explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, |
274 | unsigned TruncBits) |
275 | : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {} |
276 | |
277 | unsigned getBitWidth() const { |
278 | return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + |
279 | SExtBits; |
280 | } |
281 | |
282 | CastedValue withValue(const Value *NewV) const { |
283 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits); |
284 | } |
285 | |
286 | /// Replace V with zext(NewV) |
287 | CastedValue withZExtOfValue(const Value *NewV) const { |
288 | unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - |
289 | NewV->getType()->getPrimitiveSizeInBits(); |
290 | if (ExtendBy <= TruncBits) |
291 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); |
292 | |
293 | // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) |
294 | ExtendBy -= TruncBits; |
295 | return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0); |
296 | } |
297 | |
298 | /// Replace V with sext(NewV) |
299 | CastedValue withSExtOfValue(const Value *NewV) const { |
300 | unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - |
301 | NewV->getType()->getPrimitiveSizeInBits(); |
302 | if (ExtendBy <= TruncBits) |
303 | return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); |
304 | |
305 | // zext(sext(sext(NewV))) |
306 | ExtendBy -= TruncBits; |
307 | return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0); |
308 | } |
309 | |
310 | APInt evaluateWith(APInt N) const { |
311 | assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&(static_cast <bool> (N.getBitWidth() == V->getType() ->getPrimitiveSizeInBits() && "Incompatible bit width" ) ? void (0) : __assert_fail ("N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && \"Incompatible bit width\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 312, __extension__ __PRETTY_FUNCTION__)) |
312 | "Incompatible bit width")(static_cast <bool> (N.getBitWidth() == V->getType() ->getPrimitiveSizeInBits() && "Incompatible bit width" ) ? void (0) : __assert_fail ("N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && \"Incompatible bit width\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 312, __extension__ __PRETTY_FUNCTION__)); |
313 | if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); |
314 | if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); |
315 | if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); |
316 | return N; |
317 | } |
318 | |
319 | ConstantRange evaluateWith(ConstantRange N) const { |
320 | assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&(static_cast <bool> (N.getBitWidth() == V->getType() ->getPrimitiveSizeInBits() && "Incompatible bit width" ) ? void (0) : __assert_fail ("N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && \"Incompatible bit width\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 321, __extension__ __PRETTY_FUNCTION__)) |
321 | "Incompatible bit width")(static_cast <bool> (N.getBitWidth() == V->getType() ->getPrimitiveSizeInBits() && "Incompatible bit width" ) ? void (0) : __assert_fail ("N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && \"Incompatible bit width\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 321, __extension__ __PRETTY_FUNCTION__)); |
322 | if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits); |
323 | if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); |
324 | if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); |
325 | return N; |
326 | } |
327 | |
328 | bool canDistributeOver(bool NUW, bool NSW) const { |
329 | // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) |
330 | // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) |
331 | // trunc(x op y) == trunc(x) op trunc(y) |
332 | return (!ZExtBits || NUW) && (!SExtBits || NSW); |
333 | } |
334 | |
335 | bool hasSameCastsAs(const CastedValue &Other) const { |
336 | return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && |
337 | TruncBits == Other.TruncBits; |
338 | } |
339 | }; |
340 | |
341 | /// Represents zext(sext(trunc(V))) * Scale + Offset. |
342 | struct LinearExpression { |
343 | CastedValue Val; |
344 | APInt Scale; |
345 | APInt Offset; |
346 | |
347 | /// True if all operations in this expression are NSW. |
348 | bool IsNSW; |
349 | |
350 | LinearExpression(const CastedValue &Val, const APInt &Scale, |
351 | const APInt &Offset, bool IsNSW) |
352 | : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} |
353 | |
354 | LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) { |
355 | unsigned BitWidth = Val.getBitWidth(); |
356 | Scale = APInt(BitWidth, 1); |
357 | Offset = APInt(BitWidth, 0); |
358 | } |
359 | |
360 | LinearExpression mul(const APInt &Other, bool MulIsNSW) const { |
361 | // The check for zero offset is necessary, because generally |
362 | // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z). |
363 | bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero())); |
364 | return LinearExpression(Val, Scale * Other, Offset * Other, NSW); |
365 | } |
366 | }; |
367 | } |
368 | |
369 | /// Analyzes the specified value as a linear expression: "A*V + B", where A and |
370 | /// B are constant integers. |
371 | static LinearExpression GetLinearExpression( |
372 | const CastedValue &Val, const DataLayout &DL, unsigned Depth, |
373 | AssumptionCache *AC, DominatorTree *DT) { |
374 | // Limit our recursion depth. |
375 | if (Depth == 6) |
376 | return Val; |
377 | |
378 | if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) |
379 | return LinearExpression(Val, APInt(Val.getBitWidth(), 0), |
380 | Val.evaluateWith(Const->getValue()), true); |
381 | |
382 | if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { |
383 | if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { |
384 | APInt RHS = Val.evaluateWith(RHSC->getValue()); |
385 | // The only non-OBO case we deal with is or, and only limited to the |
386 | // case where it is both nuw and nsw. |
387 | bool NUW = true, NSW = true; |
388 | if (isa<OverflowingBinaryOperator>(BOp)) { |
389 | NUW &= BOp->hasNoUnsignedWrap(); |
390 | NSW &= BOp->hasNoSignedWrap(); |
391 | } |
392 | if (!Val.canDistributeOver(NUW, NSW)) |
393 | return Val; |
394 | |
395 | // While we can distribute over trunc, we cannot preserve nowrap flags |
396 | // in that case. |
397 | if (Val.TruncBits) |
398 | NUW = NSW = false; |
Value stored to 'NUW' is never read | |
399 | |
400 | LinearExpression E(Val); |
401 | switch (BOp->getOpcode()) { |
402 | default: |
403 | // We don't understand this instruction, so we can't decompose it any |
404 | // further. |
405 | return Val; |
406 | case Instruction::Or: |
407 | // X|C == X+C if all the bits in C are unset in X. Otherwise we can't |
408 | // analyze it. |
409 | if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, |
410 | BOp, DT)) |
411 | return Val; |
412 | |
413 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
414 | case Instruction::Add: { |
415 | E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, |
416 | Depth + 1, AC, DT); |
417 | E.Offset += RHS; |
418 | E.IsNSW &= NSW; |
419 | break; |
420 | } |
421 | case Instruction::Sub: { |
422 | E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, |
423 | Depth + 1, AC, DT); |
424 | E.Offset -= RHS; |
425 | E.IsNSW &= NSW; |
426 | break; |
427 | } |
428 | case Instruction::Mul: |
429 | E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, |
430 | Depth + 1, AC, DT) |
431 | .mul(RHS, NSW); |
432 | break; |
433 | case Instruction::Shl: |
434 | // We're trying to linearize an expression of the kind: |
435 | // shl i8 -128, 36 |
436 | // where the shift count exceeds the bitwidth of the type. |
437 | // We can't decompose this further (the expression would return |
438 | // a poison value). |
439 | if (RHS.getLimitedValue() > Val.getBitWidth()) |
440 | return Val; |
441 | |
442 | E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, |
443 | Depth + 1, AC, DT); |
444 | E.Offset <<= RHS.getLimitedValue(); |
445 | E.Scale <<= RHS.getLimitedValue(); |
446 | E.IsNSW &= NSW; |
447 | break; |
448 | } |
449 | return E; |
450 | } |
451 | } |
452 | |
453 | if (isa<ZExtInst>(Val.V)) |
454 | return GetLinearExpression( |
455 | Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), |
456 | DL, Depth + 1, AC, DT); |
457 | |
458 | if (isa<SExtInst>(Val.V)) |
459 | return GetLinearExpression( |
460 | Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), |
461 | DL, Depth + 1, AC, DT); |
462 | |
463 | return Val; |
464 | } |
465 | |
466 | /// To ensure a pointer offset fits in an integer of size IndexSize |
467 | /// (in bits) when that size is smaller than the maximum index size. This is |
468 | /// an issue, for example, in particular for 32b pointers with negative indices |
469 | /// that rely on two's complement wrap-arounds for precise alias information |
470 | /// where the maximum index size is 64b. |
471 | static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) { |
472 | assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!")(static_cast <bool> (IndexSize <= Offset.getBitWidth () && "Invalid IndexSize!") ? void (0) : __assert_fail ("IndexSize <= Offset.getBitWidth() && \"Invalid IndexSize!\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 472, __extension__ __PRETTY_FUNCTION__)); |
473 | unsigned ShiftBits = Offset.getBitWidth() - IndexSize; |
474 | return (Offset << ShiftBits).ashr(ShiftBits); |
475 | } |
476 | |
477 | namespace { |
478 | // A linear transformation of a Value; this class represents |
479 | // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. |
480 | struct VariableGEPIndex { |
481 | CastedValue Val; |
482 | APInt Scale; |
483 | |
484 | // Context instruction to use when querying information about this index. |
485 | const Instruction *CxtI; |
486 | |
487 | /// True if all operations in this expression are NSW. |
488 | bool IsNSW; |
489 | |
490 | void dump() const { |
491 | print(dbgs()); |
492 | dbgs() << "\n"; |
493 | } |
494 | void print(raw_ostream &OS) const { |
495 | OS << "(V=" << Val.V->getName() |
496 | << ", zextbits=" << Val.ZExtBits |
497 | << ", sextbits=" << Val.SExtBits |
498 | << ", truncbits=" << Val.TruncBits |
499 | << ", scale=" << Scale << ")"; |
500 | } |
501 | }; |
502 | } |
503 | |
504 | // Represents the internal structure of a GEP, decomposed into a base pointer, |
505 | // constant offsets, and variable scaled indices. |
506 | struct BasicAAResult::DecomposedGEP { |
507 | // Base pointer of the GEP |
508 | const Value *Base; |
509 | // Total constant offset from base. |
510 | APInt Offset; |
511 | // Scaled variable (non-constant) indices. |
512 | SmallVector<VariableGEPIndex, 4> VarIndices; |
513 | // Are all operations inbounds GEPs or non-indexing operations? |
514 | // (None iff expression doesn't involve any geps) |
515 | Optional<bool> InBounds; |
516 | |
517 | void dump() const { |
518 | print(dbgs()); |
519 | dbgs() << "\n"; |
520 | } |
521 | void print(raw_ostream &OS) const { |
522 | OS << "(DecomposedGEP Base=" << Base->getName() |
523 | << ", Offset=" << Offset |
524 | << ", VarIndices=["; |
525 | for (size_t i = 0; i < VarIndices.size(); i++) { |
526 | if (i != 0) |
527 | OS << ", "; |
528 | VarIndices[i].print(OS); |
529 | } |
530 | OS << "])"; |
531 | } |
532 | }; |
533 | |
534 | |
535 | /// If V is a symbolic pointer expression, decompose it into a base pointer |
536 | /// with a constant offset and a number of scaled symbolic offsets. |
537 | /// |
538 | /// The scaled symbolic offsets (represented by pairs of a Value* and a scale |
539 | /// in the VarIndices vector) are Value*'s that are known to be scaled by the |
540 | /// specified amount, but which may have other unrepresented high bits. As |
541 | /// such, the gep cannot necessarily be reconstructed from its decomposed form. |
542 | BasicAAResult::DecomposedGEP |
543 | BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, |
544 | AssumptionCache *AC, DominatorTree *DT) { |
545 | // Limit recursion depth to limit compile time in crazy cases. |
546 | unsigned MaxLookup = MaxLookupSearchDepth; |
547 | SearchTimes++; |
548 | const Instruction *CxtI = dyn_cast<Instruction>(V); |
549 | |
550 | unsigned MaxIndexSize = DL.getMaxIndexSizeInBits(); |
551 | DecomposedGEP Decomposed; |
552 | Decomposed.Offset = APInt(MaxIndexSize, 0); |
553 | do { |
554 | // See if this is a bitcast or GEP. |
555 | const Operator *Op = dyn_cast<Operator>(V); |
556 | if (!Op) { |
557 | // The only non-operator case we can handle are GlobalAliases. |
558 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
559 | if (!GA->isInterposable()) { |
560 | V = GA->getAliasee(); |
561 | continue; |
562 | } |
563 | } |
564 | Decomposed.Base = V; |
565 | return Decomposed; |
566 | } |
567 | |
568 | if (Op->getOpcode() == Instruction::BitCast || |
569 | Op->getOpcode() == Instruction::AddrSpaceCast) { |
570 | V = Op->getOperand(0); |
571 | continue; |
572 | } |
573 | |
574 | const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); |
575 | if (!GEPOp) { |
576 | if (const auto *PHI = dyn_cast<PHINode>(V)) { |
577 | // Look through single-arg phi nodes created by LCSSA. |
578 | if (PHI->getNumIncomingValues() == 1) { |
579 | V = PHI->getIncomingValue(0); |
580 | continue; |
581 | } |
582 | } else if (const auto *Call = dyn_cast<CallBase>(V)) { |
583 | // CaptureTracking can know about special capturing properties of some |
584 | // intrinsics like launder.invariant.group, that can't be expressed with |
585 | // the attributes, but have properties like returning aliasing pointer. |
586 | // Because some analysis may assume that nocaptured pointer is not |
587 | // returned from some special intrinsic (because function would have to |
588 | // be marked with returns attribute), it is crucial to use this function |
589 | // because it should be in sync with CaptureTracking. Not using it may |
590 | // cause weird miscompilations where 2 aliasing pointers are assumed to |
591 | // noalias. |
592 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { |
593 | V = RP; |
594 | continue; |
595 | } |
596 | } |
597 | |
598 | Decomposed.Base = V; |
599 | return Decomposed; |
600 | } |
601 | |
602 | // Track whether we've seen at least one in bounds gep, and if so, whether |
603 | // all geps parsed were in bounds. |
604 | if (Decomposed.InBounds == None) |
605 | Decomposed.InBounds = GEPOp->isInBounds(); |
606 | else if (!GEPOp->isInBounds()) |
607 | Decomposed.InBounds = false; |
608 | |
609 | assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized")(static_cast <bool> (GEPOp->getSourceElementType()-> isSized() && "GEP must be sized") ? void (0) : __assert_fail ("GEPOp->getSourceElementType()->isSized() && \"GEP must be sized\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 609, __extension__ __PRETTY_FUNCTION__)); |
610 | |
611 | // Don't attempt to analyze GEPs if index scale is not a compile-time |
612 | // constant. |
613 | if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { |
614 | Decomposed.Base = V; |
615 | return Decomposed; |
616 | } |
617 | |
618 | unsigned AS = GEPOp->getPointerAddressSpace(); |
619 | // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. |
620 | gep_type_iterator GTI = gep_type_begin(GEPOp); |
621 | unsigned IndexSize = DL.getIndexSizeInBits(AS); |
622 | // Assume all GEP operands are constants until proven otherwise. |
623 | bool GepHasConstantOffset = true; |
624 | for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); |
625 | I != E; ++I, ++GTI) { |
626 | const Value *Index = *I; |
627 | // Compute the (potentially symbolic) offset in bytes for this index. |
628 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
629 | // For a struct, add the member offset. |
630 | unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); |
631 | if (FieldNo == 0) |
632 | continue; |
633 | |
634 | Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); |
635 | continue; |
636 | } |
637 | |
638 | // For an array/pointer, add the element offset, explicitly scaled. |
639 | if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { |
640 | if (CIdx->isZero()) |
641 | continue; |
642 | Decomposed.Offset += |
643 | DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * |
644 | CIdx->getValue().sextOrTrunc(MaxIndexSize); |
645 | continue; |
646 | } |
647 | |
648 | GepHasConstantOffset = false; |
649 | |
650 | // If the integer type is smaller than the index size, it is implicitly |
651 | // sign extended or truncated to index size. |
652 | unsigned Width = Index->getType()->getIntegerBitWidth(); |
653 | unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0; |
654 | unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0; |
655 | LinearExpression LE = GetLinearExpression( |
656 | CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); |
657 | |
658 | // Scale by the type size. |
659 | unsigned TypeSize = |
660 | DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); |
661 | LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds()); |
662 | Decomposed.Offset += LE.Offset.sextOrSelf(MaxIndexSize); |
663 | APInt Scale = LE.Scale.sextOrSelf(MaxIndexSize); |
664 | |
665 | // If we already had an occurrence of this index variable, merge this |
666 | // scale into it. For example, we want to handle: |
667 | // A[x][x] -> x*16 + x*4 -> x*20 |
668 | // This also ensures that 'x' only appears in the index list once. |
669 | for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { |
670 | if (Decomposed.VarIndices[i].Val.V == LE.Val.V && |
671 | Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) { |
672 | Scale += Decomposed.VarIndices[i].Scale; |
673 | Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); |
674 | break; |
675 | } |
676 | } |
677 | |
678 | // Make sure that we have a scale that makes sense for this target's |
679 | // index size. |
680 | Scale = adjustToIndexSize(Scale, IndexSize); |
681 | |
682 | if (!!Scale) { |
683 | VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; |
684 | Decomposed.VarIndices.push_back(Entry); |
685 | } |
686 | } |
687 | |
688 | // Take care of wrap-arounds |
689 | if (GepHasConstantOffset) |
690 | Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize); |
691 | |
692 | // Analyze the base pointer next. |
693 | V = GEPOp->getOperand(0); |
694 | } while (--MaxLookup); |
695 | |
696 | // If the chain of expressions is too deep, just return early. |
697 | Decomposed.Base = V; |
698 | SearchLimitReached++; |
699 | return Decomposed; |
700 | } |
701 | |
702 | /// Returns whether the given pointer value points to memory that is local to |
703 | /// the function, with global constants being considered local to all |
704 | /// functions. |
705 | bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, |
706 | AAQueryInfo &AAQI, bool OrLocal) { |
707 | assert(Visited.empty() && "Visited must be cleared after use!")(static_cast <bool> (Visited.empty() && "Visited must be cleared after use!" ) ? void (0) : __assert_fail ("Visited.empty() && \"Visited must be cleared after use!\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 707, __extension__ __PRETTY_FUNCTION__)); |
708 | |
709 | unsigned MaxLookup = 8; |
710 | SmallVector<const Value *, 16> Worklist; |
711 | Worklist.push_back(Loc.Ptr); |
712 | do { |
713 | const Value *V = getUnderlyingObject(Worklist.pop_back_val()); |
714 | if (!Visited.insert(V).second) { |
715 | Visited.clear(); |
716 | return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); |
717 | } |
718 | |
719 | // An alloca instruction defines local memory. |
720 | if (OrLocal && isa<AllocaInst>(V)) |
721 | continue; |
722 | |
723 | // A global constant counts as local memory for our purposes. |
724 | if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { |
725 | // Note: this doesn't require GV to be "ODR" because it isn't legal for a |
726 | // global to be marked constant in some modules and non-constant in |
727 | // others. GV may even be a declaration, not a definition. |
728 | if (!GV->isConstant()) { |
729 | Visited.clear(); |
730 | return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); |
731 | } |
732 | continue; |
733 | } |
734 | |
735 | // If both select values point to local memory, then so does the select. |
736 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { |
737 | Worklist.push_back(SI->getTrueValue()); |
738 | Worklist.push_back(SI->getFalseValue()); |
739 | continue; |
740 | } |
741 | |
742 | // If all values incoming to a phi node point to local memory, then so does |
743 | // the phi. |
744 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { |
745 | // Don't bother inspecting phi nodes with many operands. |
746 | if (PN->getNumIncomingValues() > MaxLookup) { |
747 | Visited.clear(); |
748 | return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); |
749 | } |
750 | append_range(Worklist, PN->incoming_values()); |
751 | continue; |
752 | } |
753 | |
754 | // Otherwise be conservative. |
755 | Visited.clear(); |
756 | return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); |
757 | } while (!Worklist.empty() && --MaxLookup); |
758 | |
759 | Visited.clear(); |
760 | return Worklist.empty(); |
761 | } |
762 | |
763 | static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { |
764 | const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); |
765 | return II && II->getIntrinsicID() == IID; |
766 | } |
767 | |
768 | /// Returns the behavior when calling the given call site. |
769 | FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { |
770 | if (Call->doesNotAccessMemory()) |
771 | // Can't do better than this. |
772 | return FMRB_DoesNotAccessMemory; |
773 | |
774 | FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; |
775 | |
776 | // If the callsite knows it only reads memory, don't return worse |
777 | // than that. |
778 | if (Call->onlyReadsMemory()) |
779 | Min = FMRB_OnlyReadsMemory; |
780 | else if (Call->onlyWritesMemory()) |
781 | Min = FMRB_OnlyWritesMemory; |
782 | |
783 | if (Call->onlyAccessesArgMemory()) |
784 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); |
785 | else if (Call->onlyAccessesInaccessibleMemory()) |
786 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); |
787 | else if (Call->onlyAccessesInaccessibleMemOrArgMem()) |
788 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); |
789 | |
790 | // If the call has operand bundles then aliasing attributes from the function |
791 | // it calls do not directly apply to the call. This can be made more precise |
792 | // in the future. |
793 | if (!Call->hasOperandBundles()) |
794 | if (const Function *F = Call->getCalledFunction()) |
795 | Min = |
796 | FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); |
797 | |
798 | return Min; |
799 | } |
800 | |
801 | /// Returns the behavior when calling the given function. For use when the call |
802 | /// site is not known. |
803 | FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { |
804 | // If the function declares it doesn't access memory, we can't do better. |
805 | if (F->doesNotAccessMemory()) |
806 | return FMRB_DoesNotAccessMemory; |
807 | |
808 | FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; |
809 | |
810 | // If the function declares it only reads memory, go with that. |
811 | if (F->onlyReadsMemory()) |
812 | Min = FMRB_OnlyReadsMemory; |
813 | else if (F->onlyWritesMemory()) |
814 | Min = FMRB_OnlyWritesMemory; |
815 | |
816 | if (F->onlyAccessesArgMemory()) |
817 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); |
818 | else if (F->onlyAccessesInaccessibleMemory()) |
819 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); |
820 | else if (F->onlyAccessesInaccessibleMemOrArgMem()) |
821 | Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); |
822 | |
823 | return Min; |
824 | } |
825 | |
826 | /// Returns true if this is a writeonly (i.e Mod only) parameter. |
827 | static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, |
828 | const TargetLibraryInfo &TLI) { |
829 | if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) |
830 | return true; |
831 | |
832 | // We can bound the aliasing properties of memset_pattern16 just as we can |
833 | // for memcpy/memset. This is particularly important because the |
834 | // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 |
835 | // whenever possible. |
836 | // FIXME Consider handling this in InferFunctionAttr.cpp together with other |
837 | // attributes. |
838 | LibFunc F; |
839 | if (Call->getCalledFunction() && |
840 | TLI.getLibFunc(*Call->getCalledFunction(), F) && |
841 | F == LibFunc_memset_pattern16 && TLI.has(F)) |
842 | if (ArgIdx == 0) |
843 | return true; |
844 | |
845 | // TODO: memset_pattern4, memset_pattern8 |
846 | // TODO: _chk variants |
847 | // TODO: strcmp, strcpy |
848 | |
849 | return false; |
850 | } |
851 | |
852 | ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, |
853 | unsigned ArgIdx) { |
854 | // Checking for known builtin intrinsics and target library functions. |
855 | if (isWriteOnlyParam(Call, ArgIdx, TLI)) |
856 | return ModRefInfo::Mod; |
857 | |
858 | if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) |
859 | return ModRefInfo::Ref; |
860 | |
861 | if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) |
862 | return ModRefInfo::NoModRef; |
863 | |
864 | return AAResultBase::getArgModRefInfo(Call, ArgIdx); |
865 | } |
866 | |
867 | #ifndef NDEBUG |
868 | static const Function *getParent(const Value *V) { |
869 | if (const Instruction *inst = dyn_cast<Instruction>(V)) { |
870 | if (!inst->getParent()) |
871 | return nullptr; |
872 | return inst->getParent()->getParent(); |
873 | } |
874 | |
875 | if (const Argument *arg = dyn_cast<Argument>(V)) |
876 | return arg->getParent(); |
877 | |
878 | return nullptr; |
879 | } |
880 | |
881 | static bool notDifferentParent(const Value *O1, const Value *O2) { |
882 | |
883 | const Function *F1 = getParent(O1); |
884 | const Function *F2 = getParent(O2); |
885 | |
886 | return !F1 || !F2 || F1 == F2; |
887 | } |
888 | #endif |
889 | |
890 | AliasResult BasicAAResult::alias(const MemoryLocation &LocA, |
891 | const MemoryLocation &LocB, |
892 | AAQueryInfo &AAQI) { |
893 | assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&(static_cast <bool> (notDifferentParent(LocA.Ptr, LocB. Ptr) && "BasicAliasAnalysis doesn't support interprocedural queries." ) ? void (0) : __assert_fail ("notDifferentParent(LocA.Ptr, LocB.Ptr) && \"BasicAliasAnalysis doesn't support interprocedural queries.\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 894, __extension__ __PRETTY_FUNCTION__)) |
894 | "BasicAliasAnalysis doesn't support interprocedural queries.")(static_cast <bool> (notDifferentParent(LocA.Ptr, LocB. Ptr) && "BasicAliasAnalysis doesn't support interprocedural queries." ) ? void (0) : __assert_fail ("notDifferentParent(LocA.Ptr, LocB.Ptr) && \"BasicAliasAnalysis doesn't support interprocedural queries.\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 894, __extension__ __PRETTY_FUNCTION__)); |
895 | return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); |
896 | } |
897 | |
898 | /// Checks to see if the specified callsite can clobber the specified memory |
899 | /// object. |
900 | /// |
901 | /// Since we only look at local properties of this function, we really can't |
902 | /// say much about this query. We do, however, use simple "address taken" |
903 | /// analysis on local objects. |
904 | ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, |
905 | const MemoryLocation &Loc, |
906 | AAQueryInfo &AAQI) { |
907 | assert(notDifferentParent(Call, Loc.Ptr) &&(static_cast <bool> (notDifferentParent(Call, Loc.Ptr) && "AliasAnalysis query involving multiple functions!") ? void ( 0) : __assert_fail ("notDifferentParent(Call, Loc.Ptr) && \"AliasAnalysis query involving multiple functions!\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 908, __extension__ __PRETTY_FUNCTION__)) |
908 | "AliasAnalysis query involving multiple functions!")(static_cast <bool> (notDifferentParent(Call, Loc.Ptr) && "AliasAnalysis query involving multiple functions!") ? void ( 0) : __assert_fail ("notDifferentParent(Call, Loc.Ptr) && \"AliasAnalysis query involving multiple functions!\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 908, __extension__ __PRETTY_FUNCTION__)); |
909 | |
910 | const Value *Object = getUnderlyingObject(Loc.Ptr); |
911 | |
912 | // Calls marked 'tail' cannot read or write allocas from the current frame |
913 | // because the current frame might be destroyed by the time they run. However, |
914 | // a tail call may use an alloca with byval. Calling with byval copies the |
915 | // contents of the alloca into argument registers or stack slots, so there is |
916 | // no lifetime issue. |
917 | if (isa<AllocaInst>(Object)) |
918 | if (const CallInst *CI = dyn_cast<CallInst>(Call)) |
919 | if (CI->isTailCall() && |
920 | !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) |
921 | return ModRefInfo::NoModRef; |
922 | |
923 | // Stack restore is able to modify unescaped dynamic allocas. Assume it may |
924 | // modify them even though the alloca is not escaped. |
925 | if (auto *AI = dyn_cast<AllocaInst>(Object)) |
926 | if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) |
927 | return ModRefInfo::Mod; |
928 | |
929 | // If the pointer is to a locally allocated object that does not escape, |
930 | // then the call can not mod/ref the pointer unless the call takes the pointer |
931 | // as an argument, and itself doesn't capture it. |
932 | if (!isa<Constant>(Object) && Call != Object && |
933 | AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { |
934 | |
935 | // Optimistically assume that call doesn't touch Object and check this |
936 | // assumption in the following loop. |
937 | ModRefInfo Result = ModRefInfo::NoModRef; |
938 | bool IsMustAlias = true; |
939 | |
940 | unsigned OperandNo = 0; |
941 | for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); |
942 | CI != CE; ++CI, ++OperandNo) { |
943 | // Only look at the no-capture or byval pointer arguments. If this |
944 | // pointer were passed to arguments that were neither of these, then it |
945 | // couldn't be no-capture. |
946 | if (!(*CI)->getType()->isPointerTy() || |
947 | (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && |
948 | !Call->isByValArgument(OperandNo))) |
949 | continue; |
950 | |
951 | // Call doesn't access memory through this operand, so we don't care |
952 | // if it aliases with Object. |
953 | if (Call->doesNotAccessMemory(OperandNo)) |
954 | continue; |
955 | |
956 | // If this is a no-capture pointer argument, see if we can tell that it |
957 | // is impossible to alias the pointer we're checking. |
958 | AliasResult AR = getBestAAResults().alias( |
959 | MemoryLocation::getBeforeOrAfter(*CI), |
960 | MemoryLocation::getBeforeOrAfter(Object), AAQI); |
961 | if (AR != AliasResult::MustAlias) |
962 | IsMustAlias = false; |
963 | // Operand doesn't alias 'Object', continue looking for other aliases |
964 | if (AR == AliasResult::NoAlias) |
965 | continue; |
966 | // Operand aliases 'Object', but call doesn't modify it. Strengthen |
967 | // initial assumption and keep looking in case if there are more aliases. |
968 | if (Call->onlyReadsMemory(OperandNo)) { |
969 | Result = setRef(Result); |
970 | continue; |
971 | } |
972 | // Operand aliases 'Object' but call only writes into it. |
973 | if (Call->onlyWritesMemory(OperandNo)) { |
974 | Result = setMod(Result); |
975 | continue; |
976 | } |
977 | // This operand aliases 'Object' and call reads and writes into it. |
978 | // Setting ModRef will not yield an early return below, MustAlias is not |
979 | // used further. |
980 | Result = ModRefInfo::ModRef; |
981 | break; |
982 | } |
983 | |
984 | // No operand aliases, reset Must bit. Add below if at least one aliases |
985 | // and all aliases found are MustAlias. |
986 | if (isNoModRef(Result)) |
987 | IsMustAlias = false; |
988 | |
989 | // Early return if we improved mod ref information |
990 | if (!isModAndRefSet(Result)) { |
991 | if (isNoModRef(Result)) |
992 | return ModRefInfo::NoModRef; |
993 | return IsMustAlias ? setMust(Result) : clearMust(Result); |
994 | } |
995 | } |
996 | |
997 | // If the call is malloc/calloc like, we can assume that it doesn't |
998 | // modify any IR visible value. This is only valid because we assume these |
999 | // routines do not read values visible in the IR. TODO: Consider special |
1000 | // casing realloc and strdup routines which access only their arguments as |
1001 | // well. Or alternatively, replace all of this with inaccessiblememonly once |
1002 | // that's implemented fully. |
1003 | if (isMallocOrCallocLikeFn(Call, &TLI)) { |
1004 | // Be conservative if the accessed pointer may alias the allocation - |
1005 | // fallback to the generic handling below. |
1006 | if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, |
1007 | AAQI) == AliasResult::NoAlias) |
1008 | return ModRefInfo::NoModRef; |
1009 | } |
1010 | |
1011 | // Ideally, there should be no need to special case for memcpy/memove |
1012 | // intrinsics here since general machinery (based on memory attributes) should |
1013 | // already handle it just fine. Unfortunately, it doesn't due to deficiency in |
1014 | // operand bundles support. At the moment it's not clear if complexity behind |
1015 | // enhancing general mechanism worths it. |
1016 | // TODO: Consider improving operand bundles support in general mechanism. |
1017 | if (auto *Inst = dyn_cast<AnyMemTransferInst>(Call)) { |
1018 | AliasResult SrcAA = |
1019 | getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); |
1020 | AliasResult DestAA = |
1021 | getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); |
1022 | // It's also possible for Loc to alias both src and dest, or neither. |
1023 | ModRefInfo rv = ModRefInfo::NoModRef; |
1024 | if (SrcAA != AliasResult::NoAlias || Call->hasReadingOperandBundles()) |
1025 | rv = setRef(rv); |
1026 | if (DestAA != AliasResult::NoAlias || Call->hasClobberingOperandBundles()) |
1027 | rv = setMod(rv); |
1028 | return rv; |
1029 | } |
1030 | |
1031 | // Guard intrinsics are marked as arbitrarily writing so that proper control |
1032 | // dependencies are maintained but they never mods any particular memory |
1033 | // location. |
1034 | // |
1035 | // *Unlike* assumes, guard intrinsics are modeled as reading memory since the |
1036 | // heap state at the point the guard is issued needs to be consistent in case |
1037 | // the guard invokes the "deopt" continuation. |
1038 | if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) |
1039 | return ModRefInfo::Ref; |
1040 | // The same applies to deoptimize which is essentially a guard(false). |
1041 | if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) |
1042 | return ModRefInfo::Ref; |
1043 | |
1044 | // Like assumes, invariant.start intrinsics were also marked as arbitrarily |
1045 | // writing so that proper control dependencies are maintained but they never |
1046 | // mod any particular memory location visible to the IR. |
1047 | // *Unlike* assumes (which are now modeled as NoModRef), invariant.start |
1048 | // intrinsic is now modeled as reading memory. This prevents hoisting the |
1049 | // invariant.start intrinsic over stores. Consider: |
1050 | // *ptr = 40; |
1051 | // *ptr = 50; |
1052 | // invariant_start(ptr) |
1053 | // int val = *ptr; |
1054 | // print(val); |
1055 | // |
1056 | // This cannot be transformed to: |
1057 | // |
1058 | // *ptr = 40; |
1059 | // invariant_start(ptr) |
1060 | // *ptr = 50; |
1061 | // int val = *ptr; |
1062 | // print(val); |
1063 | // |
1064 | // The transformation will cause the second store to be ignored (based on |
1065 | // rules of invariant.start) and print 40, while the first program always |
1066 | // prints 50. |
1067 | if (isIntrinsicCall(Call, Intrinsic::invariant_start)) |
1068 | return ModRefInfo::Ref; |
1069 | |
1070 | // The AAResultBase base class has some smarts, lets use them. |
1071 | return AAResultBase::getModRefInfo(Call, Loc, AAQI); |
1072 | } |
1073 | |
1074 | ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, |
1075 | const CallBase *Call2, |
1076 | AAQueryInfo &AAQI) { |
1077 | // Guard intrinsics are marked as arbitrarily writing so that proper control |
1078 | // dependencies are maintained but they never mods any particular memory |
1079 | // location. |
1080 | // |
1081 | // *Unlike* assumes, guard intrinsics are modeled as reading memory since the |
1082 | // heap state at the point the guard is issued needs to be consistent in case |
1083 | // the guard invokes the "deopt" continuation. |
1084 | |
1085 | // NB! This function is *not* commutative, so we special case two |
1086 | // possibilities for guard intrinsics. |
1087 | |
1088 | if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) |
1089 | return isModSet(createModRefInfo(getModRefBehavior(Call2))) |
1090 | ? ModRefInfo::Ref |
1091 | : ModRefInfo::NoModRef; |
1092 | |
1093 | if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) |
1094 | return isModSet(createModRefInfo(getModRefBehavior(Call1))) |
1095 | ? ModRefInfo::Mod |
1096 | : ModRefInfo::NoModRef; |
1097 | |
1098 | // The AAResultBase base class has some smarts, lets use them. |
1099 | return AAResultBase::getModRefInfo(Call1, Call2, AAQI); |
1100 | } |
1101 | |
1102 | /// Return true if we know V to the base address of the corresponding memory |
1103 | /// object. This implies that any address less than V must be out of bounds |
1104 | /// for the underlying object. Note that just being isIdentifiedObject() is |
1105 | /// not enough - For example, a negative offset from a noalias argument or call |
1106 | /// can be inbounds w.r.t the actual underlying object. |
1107 | static bool isBaseOfObject(const Value *V) { |
1108 | // TODO: We can handle other cases here |
1109 | // 1) For GC languages, arguments to functions are often required to be |
1110 | // base pointers. |
1111 | // 2) Result of allocation routines are often base pointers. Leverage TLI. |
1112 | return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); |
1113 | } |
1114 | |
1115 | /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against |
1116 | /// another pointer. |
1117 | /// |
1118 | /// We know that V1 is a GEP, but we don't know anything about V2. |
1119 | /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for |
1120 | /// V2. |
1121 | AliasResult BasicAAResult::aliasGEP( |
1122 | const GEPOperator *GEP1, LocationSize V1Size, |
1123 | const Value *V2, LocationSize V2Size, |
1124 | const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { |
1125 | if (!V1Size.hasValue() && !V2Size.hasValue()) { |
1126 | // TODO: This limitation exists for compile-time reasons. Relax it if we |
1127 | // can avoid exponential pathological cases. |
1128 | if (!isa<GEPOperator>(V2)) |
1129 | return AliasResult::MayAlias; |
1130 | |
1131 | // If both accesses have unknown size, we can only check whether the base |
1132 | // objects don't alias. |
1133 | AliasResult BaseAlias = getBestAAResults().alias( |
1134 | MemoryLocation::getBeforeOrAfter(UnderlyingV1), |
1135 | MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); |
1136 | return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias |
1137 | : AliasResult::MayAlias; |
1138 | } |
1139 | |
1140 | DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); |
1141 | DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); |
1142 | |
1143 | // Bail if we were not able to decompose anything. |
1144 | if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) |
1145 | return AliasResult::MayAlias; |
1146 | |
1147 | // Subtract the GEP2 pointer from the GEP1 pointer to find out their |
1148 | // symbolic difference. |
1149 | subtractDecomposedGEPs(DecompGEP1, DecompGEP2); |
1150 | |
1151 | // If an inbounds GEP would have to start from an out of bounds address |
1152 | // for the two to alias, then we can assume noalias. |
1153 | if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && |
1154 | V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && |
1155 | isBaseOfObject(DecompGEP2.Base)) |
1156 | return AliasResult::NoAlias; |
1157 | |
1158 | if (isa<GEPOperator>(V2)) { |
1159 | // Symmetric case to above. |
1160 | if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && |
1161 | V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && |
1162 | isBaseOfObject(DecompGEP1.Base)) |
1163 | return AliasResult::NoAlias; |
1164 | } |
1165 | |
1166 | // For GEPs with identical offsets, we can preserve the size and AAInfo |
1167 | // when performing the alias check on the underlying objects. |
1168 | if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) |
1169 | return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), |
1170 | MemoryLocation(DecompGEP2.Base, V2Size), |
1171 | AAQI); |
1172 | |
1173 | // Do the base pointers alias? |
1174 | AliasResult BaseAlias = getBestAAResults().alias( |
1175 | MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), |
1176 | MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); |
1177 | |
1178 | // If we get a No or May, then return it immediately, no amount of analysis |
1179 | // will improve this situation. |
1180 | if (BaseAlias != AliasResult::MustAlias) { |
1181 | assert(BaseAlias == AliasResult::NoAlias ||(static_cast <bool> (BaseAlias == AliasResult::NoAlias || BaseAlias == AliasResult::MayAlias) ? void (0) : __assert_fail ("BaseAlias == AliasResult::NoAlias || BaseAlias == AliasResult::MayAlias" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 1182, __extension__ __PRETTY_FUNCTION__)) |
1182 | BaseAlias == AliasResult::MayAlias)(static_cast <bool> (BaseAlias == AliasResult::NoAlias || BaseAlias == AliasResult::MayAlias) ? void (0) : __assert_fail ("BaseAlias == AliasResult::NoAlias || BaseAlias == AliasResult::MayAlias" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 1182, __extension__ __PRETTY_FUNCTION__)); |
1183 | return BaseAlias; |
1184 | } |
1185 | |
1186 | // If there is a constant difference between the pointers, but the difference |
1187 | // is less than the size of the associated memory object, then we know |
1188 | // that the objects are partially overlapping. If the difference is |
1189 | // greater, we know they do not overlap. |
1190 | if (DecompGEP1.VarIndices.empty()) { |
1191 | APInt &Off = DecompGEP1.Offset; |
1192 | |
1193 | // Initialize for Off >= 0 (V2 <= GEP1) case. |
1194 | const Value *LeftPtr = V2; |
1195 | const Value *RightPtr = GEP1; |
1196 | LocationSize VLeftSize = V2Size; |
1197 | LocationSize VRightSize = V1Size; |
1198 | const bool Swapped = Off.isNegative(); |
1199 | |
1200 | if (Swapped) { |
1201 | // Swap if we have the situation where: |
1202 | // + + |
1203 | // | BaseOffset | |
1204 | // ---------------->| |
1205 | // |-->V1Size |-------> V2Size |
1206 | // GEP1 V2 |
1207 | std::swap(LeftPtr, RightPtr); |
1208 | std::swap(VLeftSize, VRightSize); |
1209 | Off = -Off; |
1210 | } |
1211 | |
1212 | if (!VLeftSize.hasValue()) |
1213 | return AliasResult::MayAlias; |
1214 | |
1215 | const uint64_t LSize = VLeftSize.getValue(); |
1216 | if (Off.ult(LSize)) { |
1217 | // Conservatively drop processing if a phi was visited and/or offset is |
1218 | // too big. |
1219 | AliasResult AR = AliasResult::PartialAlias; |
1220 | if (VRightSize.hasValue() && Off.ule(INT32_MAX(2147483647)) && |
1221 | (Off + VRightSize.getValue()).ule(LSize)) { |
1222 | // Memory referenced by right pointer is nested. Save the offset in |
1223 | // cache. Note that originally offset estimated as GEP1-V2, but |
1224 | // AliasResult contains the shift that represents GEP1+Offset=V2. |
1225 | AR.setOffset(-Off.getSExtValue()); |
1226 | AR.swap(Swapped); |
1227 | } |
1228 | return AR; |
1229 | } |
1230 | return AliasResult::NoAlias; |
1231 | } |
1232 | |
1233 | // We need to know both acess sizes for all the following heuristics. |
1234 | if (!V1Size.hasValue() || !V2Size.hasValue()) |
1235 | return AliasResult::MayAlias; |
1236 | |
1237 | APInt GCD; |
1238 | ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset); |
1239 | for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { |
1240 | const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; |
1241 | const APInt &Scale = Index.Scale; |
1242 | APInt ScaleForGCD = Scale; |
1243 | if (!Index.IsNSW) |
1244 | ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), |
1245 | Scale.countTrailingZeros()); |
1246 | |
1247 | if (i == 0) |
1248 | GCD = ScaleForGCD.abs(); |
1249 | else |
1250 | GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); |
1251 | |
1252 | ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false, |
1253 | true, &AC, Index.CxtI); |
1254 | KnownBits Known = |
1255 | computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT); |
1256 | CR = CR.intersectWith( |
1257 | ConstantRange::fromKnownBits(Known, /* Signed */ true), |
1258 | ConstantRange::Signed); |
1259 | CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth()); |
1260 | |
1261 | assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&(static_cast <bool> (OffsetRange.getBitWidth() == Scale .getBitWidth() && "Bit widths are normalized to MaxIndexSize" ) ? void (0) : __assert_fail ("OffsetRange.getBitWidth() == Scale.getBitWidth() && \"Bit widths are normalized to MaxIndexSize\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 1262, __extension__ __PRETTY_FUNCTION__)) |
1262 | "Bit widths are normalized to MaxIndexSize")(static_cast <bool> (OffsetRange.getBitWidth() == Scale .getBitWidth() && "Bit widths are normalized to MaxIndexSize" ) ? void (0) : __assert_fail ("OffsetRange.getBitWidth() == Scale.getBitWidth() && \"Bit widths are normalized to MaxIndexSize\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 1262, __extension__ __PRETTY_FUNCTION__)); |
1263 | if (Index.IsNSW) |
1264 | OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale))); |
1265 | else |
1266 | OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale))); |
1267 | } |
1268 | |
1269 | // We now have accesses at two offsets from the same base: |
1270 | // 1. (...)*GCD + DecompGEP1.Offset with size V1Size |
1271 | // 2. 0 with size V2Size |
1272 | // Using arithmetic modulo GCD, the accesses are at |
1273 | // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits |
1274 | // into the range [V2Size..GCD), then we know they cannot overlap. |
1275 | APInt ModOffset = DecompGEP1.Offset.srem(GCD); |
1276 | if (ModOffset.isNegative()) |
1277 | ModOffset += GCD; // We want mod, not rem. |
1278 | if (ModOffset.uge(V2Size.getValue()) && |
1279 | (GCD - ModOffset).uge(V1Size.getValue())) |
1280 | return AliasResult::NoAlias; |
1281 | |
1282 | // Compute ranges of potentially accessed bytes for both accesses. If the |
1283 | // interseciton is empty, there can be no overlap. |
1284 | unsigned BW = OffsetRange.getBitWidth(); |
1285 | ConstantRange Range1 = OffsetRange.add( |
1286 | ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); |
1287 | ConstantRange Range2 = |
1288 | ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue())); |
1289 | if (Range1.intersectWith(Range2).isEmptySet()) |
1290 | return AliasResult::NoAlias; |
1291 | |
1292 | // Try to determine the range of values for VarIndex such that |
1293 | // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex. |
1294 | Optional<APInt> MinAbsVarIndex; |
1295 | if (DecompGEP1.VarIndices.size() == 1) { |
1296 | // VarIndex = Scale*V. |
1297 | const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; |
1298 | if (Var.Val.TruncBits == 0 && |
1299 | isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { |
1300 | // If V != 0, then abs(VarIndex) > 0. |
1301 | MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1); |
1302 | |
1303 | // Check if abs(V*Scale) >= abs(Scale) holds in the presence of |
1304 | // potentially wrapping math. |
1305 | auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) { |
1306 | if (Var.IsNSW) |
1307 | return true; |
1308 | |
1309 | int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits(); |
1310 | // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds. |
1311 | // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a |
1312 | // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap. |
1313 | int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW; |
1314 | if (MaxScaleValueBW <= 0) |
1315 | return false; |
1316 | return Var.Scale.ule( |
1317 | APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth())); |
1318 | }; |
1319 | // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the |
1320 | // presence of potentially wrapping math. |
1321 | if (MultiplyByScaleNoWrap(Var)) { |
1322 | // If V != 0 then abs(VarIndex) >= abs(Scale). |
1323 | MinAbsVarIndex = Var.Scale.abs(); |
1324 | } |
1325 | } |
1326 | } else if (DecompGEP1.VarIndices.size() == 2) { |
1327 | // VarIndex = Scale*V0 + (-Scale)*V1. |
1328 | // If V0 != V1 then abs(VarIndex) >= abs(Scale). |
1329 | // Check that VisitedPhiBBs is empty, to avoid reasoning about |
1330 | // inequality of values across loop iterations. |
1331 | const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; |
1332 | const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; |
1333 | if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 && |
1334 | Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() && |
1335 | isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, |
1336 | DT)) |
1337 | MinAbsVarIndex = Var0.Scale.abs(); |
1338 | } |
1339 | |
1340 | if (MinAbsVarIndex) { |
1341 | // The constant offset will have added at least +/-MinAbsVarIndex to it. |
1342 | APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; |
1343 | APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; |
1344 | // We know that Offset <= OffsetLo || Offset >= OffsetHi |
1345 | if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && |
1346 | OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) |
1347 | return AliasResult::NoAlias; |
1348 | } |
1349 | |
1350 | if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) |
1351 | return AliasResult::NoAlias; |
1352 | |
1353 | // Statically, we can see that the base objects are the same, but the |
1354 | // pointers have dynamic offsets which we can't resolve. And none of our |
1355 | // little tricks above worked. |
1356 | return AliasResult::MayAlias; |
1357 | } |
1358 | |
1359 | static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { |
1360 | // If the results agree, take it. |
1361 | if (A == B) |
1362 | return A; |
1363 | // A mix of PartialAlias and MustAlias is PartialAlias. |
1364 | if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || |
1365 | (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) |
1366 | return AliasResult::PartialAlias; |
1367 | // Otherwise, we don't know anything. |
1368 | return AliasResult::MayAlias; |
1369 | } |
1370 | |
1371 | /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction |
1372 | /// against another. |
1373 | AliasResult |
1374 | BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, |
1375 | const Value *V2, LocationSize V2Size, |
1376 | AAQueryInfo &AAQI) { |
1377 | // If the values are Selects with the same condition, we can do a more precise |
1378 | // check: just check for aliases between the values on corresponding arms. |
1379 | if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) |
1380 | if (SI->getCondition() == SI2->getCondition()) { |
1381 | AliasResult Alias = getBestAAResults().alias( |
1382 | MemoryLocation(SI->getTrueValue(), SISize), |
1383 | MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); |
1384 | if (Alias == AliasResult::MayAlias) |
1385 | return AliasResult::MayAlias; |
1386 | AliasResult ThisAlias = getBestAAResults().alias( |
1387 | MemoryLocation(SI->getFalseValue(), SISize), |
1388 | MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); |
1389 | return MergeAliasResults(ThisAlias, Alias); |
1390 | } |
1391 | |
1392 | // If both arms of the Select node NoAlias or MustAlias V2, then returns |
1393 | // NoAlias / MustAlias. Otherwise, returns MayAlias. |
1394 | AliasResult Alias = getBestAAResults().alias( |
1395 | MemoryLocation(V2, V2Size), |
1396 | MemoryLocation(SI->getTrueValue(), SISize), AAQI); |
1397 | if (Alias == AliasResult::MayAlias) |
1398 | return AliasResult::MayAlias; |
1399 | |
1400 | AliasResult ThisAlias = getBestAAResults().alias( |
1401 | MemoryLocation(V2, V2Size), |
1402 | MemoryLocation(SI->getFalseValue(), SISize), AAQI); |
1403 | return MergeAliasResults(ThisAlias, Alias); |
1404 | } |
1405 | |
1406 | /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against |
1407 | /// another. |
1408 | AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, |
1409 | const Value *V2, LocationSize V2Size, |
1410 | AAQueryInfo &AAQI) { |
1411 | if (!PN->getNumIncomingValues()) |
1412 | return AliasResult::NoAlias; |
1413 | // If the values are PHIs in the same block, we can do a more precise |
1414 | // as well as efficient check: just check for aliases between the values |
1415 | // on corresponding edges. |
1416 | if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) |
1417 | if (PN2->getParent() == PN->getParent()) { |
1418 | Optional<AliasResult> Alias; |
1419 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
1420 | AliasResult ThisAlias = getBestAAResults().alias( |
1421 | MemoryLocation(PN->getIncomingValue(i), PNSize), |
1422 | MemoryLocation( |
1423 | PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), |
1424 | AAQI); |
1425 | if (Alias) |
1426 | *Alias = MergeAliasResults(*Alias, ThisAlias); |
1427 | else |
1428 | Alias = ThisAlias; |
1429 | if (*Alias == AliasResult::MayAlias) |
1430 | break; |
1431 | } |
1432 | return *Alias; |
1433 | } |
1434 | |
1435 | SmallVector<Value *, 4> V1Srcs; |
1436 | // If a phi operand recurses back to the phi, we can still determine NoAlias |
1437 | // if we don't alias the underlying objects of the other phi operands, as we |
1438 | // know that the recursive phi needs to be based on them in some way. |
1439 | bool isRecursive = false; |
1440 | auto CheckForRecPhi = [&](Value *PV) { |
1441 | if (!EnableRecPhiAnalysis) |
1442 | return false; |
1443 | if (getUnderlyingObject(PV) == PN) { |
1444 | isRecursive = true; |
1445 | return true; |
1446 | } |
1447 | return false; |
1448 | }; |
1449 | |
1450 | if (PV) { |
1451 | // If we have PhiValues then use it to get the underlying phi values. |
1452 | const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); |
1453 | // If we have more phi values than the search depth then return MayAlias |
1454 | // conservatively to avoid compile time explosion. The worst possible case |
1455 | // is if both sides are PHI nodes. In which case, this is O(m x n) time |
1456 | // where 'm' and 'n' are the number of PHI sources. |
1457 | if (PhiValueSet.size() > MaxLookupSearchDepth) |
1458 | return AliasResult::MayAlias; |
1459 | // Add the values to V1Srcs |
1460 | for (Value *PV1 : PhiValueSet) { |
1461 | if (CheckForRecPhi(PV1)) |
1462 | continue; |
1463 | V1Srcs.push_back(PV1); |
1464 | } |
1465 | } else { |
1466 | // If we don't have PhiInfo then just look at the operands of the phi itself |
1467 | // FIXME: Remove this once we can guarantee that we have PhiInfo always |
1468 | SmallPtrSet<Value *, 4> UniqueSrc; |
1469 | Value *OnePhi = nullptr; |
1470 | for (Value *PV1 : PN->incoming_values()) { |
1471 | if (isa<PHINode>(PV1)) { |
1472 | if (OnePhi && OnePhi != PV1) { |
1473 | // To control potential compile time explosion, we choose to be |
1474 | // conserviate when we have more than one Phi input. It is important |
1475 | // that we handle the single phi case as that lets us handle LCSSA |
1476 | // phi nodes and (combined with the recursive phi handling) simple |
1477 | // pointer induction variable patterns. |
1478 | return AliasResult::MayAlias; |
1479 | } |
1480 | OnePhi = PV1; |
1481 | } |
1482 | |
1483 | if (CheckForRecPhi(PV1)) |
1484 | continue; |
1485 | |
1486 | if (UniqueSrc.insert(PV1).second) |
1487 | V1Srcs.push_back(PV1); |
1488 | } |
1489 | |
1490 | if (OnePhi && UniqueSrc.size() > 1) |
1491 | // Out of an abundance of caution, allow only the trivial lcssa and |
1492 | // recursive phi cases. |
1493 | return AliasResult::MayAlias; |
1494 | } |
1495 | |
1496 | // If V1Srcs is empty then that means that the phi has no underlying non-phi |
1497 | // value. This should only be possible in blocks unreachable from the entry |
1498 | // block, but return MayAlias just in case. |
1499 | if (V1Srcs.empty()) |
1500 | return AliasResult::MayAlias; |
1501 | |
1502 | // If this PHI node is recursive, indicate that the pointer may be moved |
1503 | // across iterations. We can only prove NoAlias if different underlying |
1504 | // objects are involved. |
1505 | if (isRecursive) |
1506 | PNSize = LocationSize::beforeOrAfterPointer(); |
1507 | |
1508 | // In the recursive alias queries below, we may compare values from two |
1509 | // different loop iterations. Keep track of visited phi blocks, which will |
1510 | // be used when determining value equivalence. |
1511 | bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; |
1512 | auto _ = make_scope_exit([&]() { |
1513 | if (BlockInserted) |
1514 | VisitedPhiBBs.erase(PN->getParent()); |
1515 | }); |
1516 | |
1517 | // If we inserted a block into VisitedPhiBBs, alias analysis results that |
1518 | // have been cached earlier may no longer be valid. Perform recursive queries |
1519 | // with a new AAQueryInfo. |
1520 | AAQueryInfo NewAAQI = AAQI.withEmptyCache(); |
1521 | AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; |
1522 | |
1523 | AliasResult Alias = getBestAAResults().alias( |
1524 | MemoryLocation(V2, V2Size), |
1525 | MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); |
1526 | |
1527 | // Early exit if the check of the first PHI source against V2 is MayAlias. |
1528 | // Other results are not possible. |
1529 | if (Alias == AliasResult::MayAlias) |
1530 | return AliasResult::MayAlias; |
1531 | // With recursive phis we cannot guarantee that MustAlias/PartialAlias will |
1532 | // remain valid to all elements and needs to conservatively return MayAlias. |
1533 | if (isRecursive && Alias != AliasResult::NoAlias) |
1534 | return AliasResult::MayAlias; |
1535 | |
1536 | // If all sources of the PHI node NoAlias or MustAlias V2, then returns |
1537 | // NoAlias / MustAlias. Otherwise, returns MayAlias. |
1538 | for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { |
1539 | Value *V = V1Srcs[i]; |
1540 | |
1541 | AliasResult ThisAlias = getBestAAResults().alias( |
1542 | MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); |
1543 | Alias = MergeAliasResults(ThisAlias, Alias); |
1544 | if (Alias == AliasResult::MayAlias) |
1545 | break; |
1546 | } |
1547 | |
1548 | return Alias; |
1549 | } |
1550 | |
1551 | /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as |
1552 | /// array references. |
1553 | AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, |
1554 | const Value *V2, LocationSize V2Size, |
1555 | AAQueryInfo &AAQI) { |
1556 | // If either of the memory references is empty, it doesn't matter what the |
1557 | // pointer values are. |
1558 | if (V1Size.isZero() || V2Size.isZero()) |
1559 | return AliasResult::NoAlias; |
1560 | |
1561 | // Strip off any casts if they exist. |
1562 | V1 = V1->stripPointerCastsForAliasAnalysis(); |
1563 | V2 = V2->stripPointerCastsForAliasAnalysis(); |
1564 | |
1565 | // If V1 or V2 is undef, the result is NoAlias because we can always pick a |
1566 | // value for undef that aliases nothing in the program. |
1567 | if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) |
1568 | return AliasResult::NoAlias; |
1569 | |
1570 | // Are we checking for alias of the same value? |
1571 | // Because we look 'through' phi nodes, we could look at "Value" pointers from |
1572 | // different iterations. We must therefore make sure that this is not the |
1573 | // case. The function isValueEqualInPotentialCycles ensures that this cannot |
1574 | // happen by looking at the visited phi nodes and making sure they cannot |
1575 | // reach the value. |
1576 | if (isValueEqualInPotentialCycles(V1, V2)) |
1577 | return AliasResult::MustAlias; |
1578 | |
1579 | if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) |
1580 | return AliasResult::NoAlias; // Scalars cannot alias each other |
1581 | |
1582 | // Figure out what objects these things are pointing to if we can. |
1583 | const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); |
1584 | const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); |
1585 | |
1586 | // Null values in the default address space don't point to any object, so they |
1587 | // don't alias any other pointer. |
1588 | if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) |
1589 | if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) |
1590 | return AliasResult::NoAlias; |
1591 | if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) |
1592 | if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) |
1593 | return AliasResult::NoAlias; |
1594 | |
1595 | if (O1 != O2) { |
1596 | // If V1/V2 point to two different objects, we know that we have no alias. |
1597 | if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) |
1598 | return AliasResult::NoAlias; |
1599 | |
1600 | // Constant pointers can't alias with non-const isIdentifiedObject objects. |
1601 | if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || |
1602 | (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) |
1603 | return AliasResult::NoAlias; |
1604 | |
1605 | // Function arguments can't alias with things that are known to be |
1606 | // unambigously identified at the function level. |
1607 | if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || |
1608 | (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) |
1609 | return AliasResult::NoAlias; |
1610 | |
1611 | // If one pointer is the result of a call/invoke or load and the other is a |
1612 | // non-escaping local object within the same function, then we know the |
1613 | // object couldn't escape to a point where the call could return it. |
1614 | // |
1615 | // Note that if the pointers are in different functions, there are a |
1616 | // variety of complications. A call with a nocapture argument may still |
1617 | // temporary store the nocapture argument's value in a temporary memory |
1618 | // location if that memory location doesn't escape. Or it may pass a |
1619 | // nocapture value to other functions as long as they don't capture it. |
1620 | if (isEscapeSource(O1) && |
1621 | AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) |
1622 | return AliasResult::NoAlias; |
1623 | if (isEscapeSource(O2) && |
1624 | AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) |
1625 | return AliasResult::NoAlias; |
1626 | } |
1627 | |
1628 | // If the size of one access is larger than the entire object on the other |
1629 | // side, then we know such behavior is undefined and can assume no alias. |
1630 | bool NullIsValidLocation = NullPointerIsDefined(&F); |
1631 | if ((isObjectSmallerThan( |
1632 | O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, |
1633 | TLI, NullIsValidLocation)) || |
1634 | (isObjectSmallerThan( |
1635 | O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, |
1636 | TLI, NullIsValidLocation))) |
1637 | return AliasResult::NoAlias; |
1638 | |
1639 | // If one the accesses may be before the accessed pointer, canonicalize this |
1640 | // by using unknown after-pointer sizes for both accesses. This is |
1641 | // equivalent, because regardless of which pointer is lower, one of them |
1642 | // will always came after the other, as long as the underlying objects aren't |
1643 | // disjoint. We do this so that the rest of BasicAA does not have to deal |
1644 | // with accesses before the base pointer, and to improve cache utilization by |
1645 | // merging equivalent states. |
1646 | if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { |
1647 | V1Size = LocationSize::afterPointer(); |
1648 | V2Size = LocationSize::afterPointer(); |
1649 | } |
1650 | |
1651 | // FIXME: If this depth limit is hit, then we may cache sub-optimal results |
1652 | // for recursive queries. For this reason, this limit is chosen to be large |
1653 | // enough to be very rarely hit, while still being small enough to avoid |
1654 | // stack overflows. |
1655 | if (AAQI.Depth >= 512) |
1656 | return AliasResult::MayAlias; |
1657 | |
1658 | // Check the cache before climbing up use-def chains. This also terminates |
1659 | // otherwise infinitely recursive queries. |
1660 | AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); |
1661 | const bool Swapped = V1 > V2; |
1662 | if (Swapped) |
1663 | std::swap(Locs.first, Locs.second); |
1664 | const auto &Pair = AAQI.AliasCache.try_emplace( |
1665 | Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); |
1666 | if (!Pair.second) { |
1667 | auto &Entry = Pair.first->second; |
1668 | if (!Entry.isDefinitive()) { |
1669 | // Remember that we used an assumption. |
1670 | ++Entry.NumAssumptionUses; |
1671 | ++AAQI.NumAssumptionUses; |
1672 | } |
1673 | // Cache contains sorted {V1,V2} pairs but we should return original order. |
1674 | auto Result = Entry.Result; |
1675 | Result.swap(Swapped); |
1676 | return Result; |
1677 | } |
1678 | |
1679 | int OrigNumAssumptionUses = AAQI.NumAssumptionUses; |
1680 | unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); |
1681 | AliasResult Result = |
1682 | aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); |
1683 | |
1684 | auto It = AAQI.AliasCache.find(Locs); |
1685 | assert(It != AAQI.AliasCache.end() && "Must be in cache")(static_cast <bool> (It != AAQI.AliasCache.end() && "Must be in cache") ? void (0) : __assert_fail ("It != AAQI.AliasCache.end() && \"Must be in cache\"" , "llvm/lib/Analysis/BasicAliasAnalysis.cpp", 1685, __extension__ __PRETTY_FUNCTION__)); |
1686 | auto &Entry = It->second; |
1687 | |
1688 | // Check whether a NoAlias assumption has been used, but disproven. |
1689 | bool AssumptionDisproven = |
1690 | Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; |
1691 | if (AssumptionDisproven) |
1692 | Result = AliasResult::MayAlias; |
1693 | |
1694 | // This is a definitive result now, when considered as a root query. |
1695 | AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; |
1696 | Entry.Result = Result; |
1697 | // Cache contains sorted {V1,V2} pairs. |
1698 | Entry.Result.swap(Swapped); |
1699 | Entry.NumAssumptionUses = -1; |
1700 | |
1701 | // If the assumption has been disproven, remove any results that may have |
1702 | // been based on this assumption. Do this after the Entry updates above to |
1703 | // avoid iterator invalidation. |
1704 | if (AssumptionDisproven) |
1705 | while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) |
1706 | AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); |
1707 | |
1708 | // The result may still be based on assumptions higher up in the chain. |
1709 | // Remember it, so it can be purged from the cache later. |
1710 | if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && |
1711 | Result != AliasResult::MayAlias) |
1712 | AAQI.AssumptionBasedResults.push_back(Locs); |
1713 | return Result; |
1714 | } |
1715 | |
1716 | AliasResult BasicAAResult::aliasCheckRecursive( |
1717 | const Value *V1, LocationSize V1Size, |
1718 | const Value *V2, LocationSize V2Size, |
1719 | AAQueryInfo &AAQI, const Value *O1, const Value *O2) { |
1720 | if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { |
1721 | AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); |
1722 | if (Result != AliasResult::MayAlias) |
1723 | return Result; |
1724 | } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { |
1725 | AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); |
1726 | Result.swap(); |
1727 | if (Result != AliasResult::MayAlias) |
1728 | return Result; |
1729 | } |
1730 | |
1731 | if (const PHINode *PN = dyn_cast<PHINode>(V1)) { |
1732 | AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); |
1733 | if (Result != AliasResult::MayAlias) |
1734 | return Result; |
1735 | } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { |
1736 | AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); |
1737 | Result.swap(); |
1738 | if (Result != AliasResult::MayAlias) |
1739 | return Result; |
1740 | } |
1741 | |
1742 | if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { |
1743 | AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); |
1744 | if (Result != AliasResult::MayAlias) |
1745 | return Result; |
1746 | } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { |
1747 | AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); |
1748 | Result.swap(); |
1749 | if (Result != AliasResult::MayAlias) |
1750 | return Result; |
1751 | } |
1752 | |
1753 | // If both pointers are pointing into the same object and one of them |
1754 | // accesses the entire object, then the accesses must overlap in some way. |
1755 | if (O1 == O2) { |
1756 | bool NullIsValidLocation = NullPointerIsDefined(&F); |
1757 | if (V1Size.isPrecise() && V2Size.isPrecise() && |
1758 | (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || |
1759 | isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) |
1760 | return AliasResult::PartialAlias; |
1761 | } |
1762 | |
1763 | return AliasResult::MayAlias; |
1764 | } |
1765 | |
1766 | /// Check whether two Values can be considered equivalent. |
1767 | /// |
1768 | /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether |
1769 | /// they can not be part of a cycle in the value graph by looking at all |
1770 | /// visited phi nodes an making sure that the phis cannot reach the value. We |
1771 | /// have to do this because we are looking through phi nodes (That is we say |
1772 | /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). |
1773 | bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, |
1774 | const Value *V2) { |
1775 | if (V != V2) |
1776 | return false; |
1777 | |
1778 | const Instruction *Inst = dyn_cast<Instruction>(V); |
1779 | if (!Inst) |
1780 | return true; |
1781 | |
1782 | if (VisitedPhiBBs.empty()) |
1783 | return true; |
1784 | |
1785 | if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) |
1786 | return false; |
1787 | |
1788 | // Make sure that the visited phis cannot reach the Value. This ensures that |
1789 | // the Values cannot come from different iterations of a potential cycle the |
1790 | // phi nodes could be involved in. |
1791 | for (auto *P : VisitedPhiBBs) |
1792 | if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) |
1793 | return false; |
1794 | |
1795 | return true; |
1796 | } |
1797 | |
1798 | /// Computes the symbolic difference between two de-composed GEPs. |
1799 | void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, |
1800 | const DecomposedGEP &SrcGEP) { |
1801 | DestGEP.Offset -= SrcGEP.Offset; |
1802 | for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { |
1803 | // Find V in Dest. This is N^2, but pointer indices almost never have more |
1804 | // than a few variable indexes. |
1805 | bool Found = false; |
1806 | for (auto I : enumerate(DestGEP.VarIndices)) { |
1807 | VariableGEPIndex &Dest = I.value(); |
1808 | if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || |
1809 | !Dest.Val.hasSameCastsAs(Src.Val)) |
1810 | continue; |
1811 | |
1812 | // If we found it, subtract off Scale V's from the entry in Dest. If it |
1813 | // goes to zero, remove the entry. |
1814 | if (Dest.Scale != Src.Scale) { |
1815 | Dest.Scale -= Src.Scale; |
1816 | Dest.IsNSW = false; |
1817 | } else { |
1818 | DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); |
1819 | } |
1820 | Found = true; |
1821 | break; |
1822 | } |
1823 | |
1824 | // If we didn't consume this entry, add it to the end of the Dest list. |
1825 | if (!Found) { |
1826 | VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; |
1827 | DestGEP.VarIndices.push_back(Entry); |
1828 | } |
1829 | } |
1830 | } |
1831 | |
1832 | bool BasicAAResult::constantOffsetHeuristic( |
1833 | const DecomposedGEP &GEP, LocationSize MaybeV1Size, |
1834 | LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { |
1835 | if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || |
1836 | !MaybeV2Size.hasValue()) |
1837 | return false; |
1838 | |
1839 | const uint64_t V1Size = MaybeV1Size.getValue(); |
1840 | const uint64_t V2Size = MaybeV2Size.getValue(); |
1841 | |
1842 | const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; |
1843 | |
1844 | if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) || |
1845 | Var0.Scale != -Var1.Scale || |
1846 | Var0.Val.V->getType() != Var1.Val.V->getType()) |
1847 | return false; |
1848 | |
1849 | // We'll strip off the Extensions of Var0 and Var1 and do another round |
1850 | // of GetLinearExpression decomposition. In the example above, if Var0 |
1851 | // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. |
1852 | |
1853 | LinearExpression E0 = |
1854 | GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT); |
1855 | LinearExpression E1 = |
1856 | GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT); |
1857 | if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) || |
1858 | !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) |
1859 | return false; |
1860 | |
1861 | // We have a hit - Var0 and Var1 only differ by a constant offset! |
1862 | |
1863 | // If we've been sext'ed then zext'd the maximum difference between Var0 and |
1864 | // Var1 is possible to calculate, but we're just interested in the absolute |
1865 | // minimum difference between the two. The minimum distance may occur due to |
1866 | // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so |
1867 | // the minimum distance between %i and %i + 5 is 3. |
1868 | APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; |
1869 | MinDiff = APIntOps::umin(MinDiff, Wrapped); |
1870 | APInt MinDiffBytes = |
1871 | MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); |
1872 | |
1873 | // We can't definitely say whether GEP1 is before or after V2 due to wrapping |
1874 | // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other |
1875 | // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and |
1876 | // V2Size can fit in the MinDiffBytes gap. |
1877 | return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && |
1878 | MinDiffBytes.uge(V2Size + GEP.Offset.abs()); |
1879 | } |
1880 | |
1881 | //===----------------------------------------------------------------------===// |
1882 | // BasicAliasAnalysis Pass |
1883 | //===----------------------------------------------------------------------===// |
1884 | |
1885 | AnalysisKey BasicAA::Key; |
1886 | |
1887 | BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { |
1888 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); |
1889 | auto &AC = AM.getResult<AssumptionAnalysis>(F); |
1890 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); |
1891 | auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); |
1892 | return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); |
1893 | } |
1894 | |
1895 | BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { |
1896 | initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); |
1897 | } |
1898 | |
1899 | char BasicAAWrapperPass::ID = 0; |
1900 | |
1901 | void BasicAAWrapperPass::anchor() {} |
1902 | |
1903 | INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",static void *initializeBasicAAWrapperPassPassOnce(PassRegistry &Registry) { |
1904 | "Basic Alias Analysis (stateless AA impl)", true, true)static void *initializeBasicAAWrapperPassPassOnce(PassRegistry &Registry) { |
1905 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); |
1906 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); |
1907 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); |
1908 | INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)initializePhiValuesWrapperPassPass(Registry); |
1909 | INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",PassInfo *PI = new PassInfo( "Basic Alias Analysis (stateless AA impl)" , "basic-aa", &BasicAAWrapperPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<BasicAAWrapperPass>), true, true); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeBasicAAWrapperPassPassFlag; void llvm::initializeBasicAAWrapperPassPass (PassRegistry &Registry) { llvm::call_once(InitializeBasicAAWrapperPassPassFlag , initializeBasicAAWrapperPassPassOnce, std::ref(Registry)); } |
1910 | "Basic Alias Analysis (stateless AA impl)", true, true)PassInfo *PI = new PassInfo( "Basic Alias Analysis (stateless AA impl)" , "basic-aa", &BasicAAWrapperPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<BasicAAWrapperPass>), true, true); Registry .registerPass(*PI, true); return PI; } static llvm::once_flag InitializeBasicAAWrapperPassPassFlag; void llvm::initializeBasicAAWrapperPassPass (PassRegistry &Registry) { llvm::call_once(InitializeBasicAAWrapperPassPassFlag , initializeBasicAAWrapperPassPassOnce, std::ref(Registry)); } |
1911 | |
1912 | FunctionPass *llvm::createBasicAAWrapperPass() { |
1913 | return new BasicAAWrapperPass(); |
1914 | } |
1915 | |
1916 | bool BasicAAWrapperPass::runOnFunction(Function &F) { |
1917 | auto &ACT = getAnalysis<AssumptionCacheTracker>(); |
1918 | auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); |
1919 | auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); |
1920 | auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); |
1921 | |
1922 | Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, |
1923 | TLIWP.getTLI(F), ACT.getAssumptionCache(F), |
1924 | &DTWP.getDomTree(), |
1925 | PVWP ? &PVWP->getResult() : nullptr)); |
1926 | |
1927 | return false; |
1928 | } |
1929 | |
1930 | void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
1931 | AU.setPreservesAll(); |
1932 | AU.addRequiredTransitive<AssumptionCacheTracker>(); |
1933 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
1934 | AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); |
1935 | AU.addUsedIfAvailable<PhiValuesWrapperPass>(); |
1936 | } |
1937 | |
1938 | BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { |
1939 | return BasicAAResult( |
1940 | F.getParent()->getDataLayout(), F, |
1941 | P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), |
1942 | P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); |
1943 | } |