LLVM 19.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CFG.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/User.h"
49#include "llvm/IR/Value.h"
51#include "llvm/Pass.h"
57#include <cassert>
58#include <cstdint>
59#include <cstdlib>
60#include <optional>
61#include <utility>
62
63#define DEBUG_TYPE "basicaa"
64
65using namespace llvm;
66
67/// Enable analysis of recursive PHI nodes.
69 cl::init(true));
70
71static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden, cl::init(true));
73
74/// SearchLimitReached / SearchTimes shows how often the limit of
75/// to decompose GEPs is reached. It will affect the precision
76/// of basic alias analysis.
77STATISTIC(SearchLimitReached, "Number of times the limit to "
78 "decompose GEPs is reached");
79STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80
81// The max limit of the search depth in DecomposeGEPExpression() and
82// getUnderlyingObject().
83static const unsigned MaxLookupSearchDepth = 6;
84
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
90 // depend on them.
91 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93 return true;
94
95 // Otherwise this analysis result remains valid.
96 return false;
97}
98
99//===----------------------------------------------------------------------===//
100// Useful predicates
101//===----------------------------------------------------------------------===//
102
103/// Returns the size of the object specified by V or UnknownSize if unknown.
104static std::optional<TypeSize> getObjectSize(const Value *V,
105 const DataLayout &DL,
106 const TargetLibraryInfo &TLI,
107 bool NullIsValidLoc,
108 bool RoundToAlign = false) {
110 ObjectSizeOpts Opts;
111 Opts.RoundToAlign = RoundToAlign;
112 Opts.NullIsUnknownSize = NullIsValidLoc;
113 if (getObjectSize(V, Size, DL, &TLI, Opts))
114 return TypeSize::getFixed(Size);
115 return std::nullopt;
116}
117
118/// Returns true if we can prove that the object specified by V is smaller than
119/// Size.
121 const DataLayout &DL,
122 const TargetLibraryInfo &TLI,
123 bool NullIsValidLoc) {
124 // Note that the meanings of the "object" are slightly different in the
125 // following contexts:
126 // c1: llvm::getObjectSize()
127 // c2: llvm.objectsize() intrinsic
128 // c3: isObjectSmallerThan()
129 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130 // refers to the "entire object".
131 //
132 // Consider this example:
133 // char *p = (char*)malloc(100)
134 // char *q = p+80;
135 //
136 // In the context of c1 and c2, the "object" pointed by q refers to the
137 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 //
139 // However, in the context of c3, the "object" refers to the chunk of memory
140 // being allocated. So, the "object" has 100 bytes, and q points to the middle
141 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142 // parameter, before the llvm::getObjectSize() is called to get the size of
143 // entire object, we should:
144 // - either rewind the pointer q to the base-address of the object in
145 // question (in this case rewind to p), or
146 // - just give up. It is up to caller to make sure the pointer is pointing
147 // to the base address the object.
148 //
149 // We go for 2nd option for simplicity.
150 if (!isIdentifiedObject(V))
151 return false;
152
153 // This function needs to use the aligned object size because we allow
154 // reads a bit past the end given sufficient alignment.
155 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
156 /*RoundToAlign*/ true);
157
158 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
159}
160
161/// Return the minimal extent from \p V to the end of the underlying object,
162/// assuming the result is used in an aliasing query. E.g., we do use the query
163/// location size and the fact that null pointers cannot alias here.
165 const LocationSize &LocSize,
166 const DataLayout &DL,
167 bool NullIsValidLoc) {
168 // If we have dereferenceability information we know a lower bound for the
169 // extent as accesses for a lower offset would be valid. We need to exclude
170 // the "or null" part if null is a valid pointer. We can ignore frees, as an
171 // access after free would be undefined behavior.
172 bool CanBeNull, CanBeFreed;
173 uint64_t DerefBytes =
174 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
175 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
176 // If queried with a precise location size, we assume that location size to be
177 // accessed, thus valid.
178 if (LocSize.isPrecise())
179 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
180 return TypeSize::getFixed(DerefBytes);
181}
182
183/// Returns true if we can prove that the object specified by V has size Size.
184static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
185 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
186 std::optional<TypeSize> ObjectSize =
187 getObjectSize(V, DL, TLI, NullIsValidLoc);
188 return ObjectSize && *ObjectSize == Size;
189}
190
191/// Return true if both V1 and V2 are VScale
192static bool areBothVScale(const Value *V1, const Value *V2) {
195}
196
197//===----------------------------------------------------------------------===//
198// CaptureInfo implementations
199//===----------------------------------------------------------------------===//
200
201CaptureInfo::~CaptureInfo() = default;
202
204 const Instruction *I, bool OrAt) {
205 return isNonEscapingLocalObject(Object, &IsCapturedCache);
206}
207
208static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
209 const LoopInfo *LI) {
210 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
212 return Succs.empty() ||
213 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
214}
215
217 const Instruction *I, bool OrAt) {
218 if (!isIdentifiedFunctionLocal(Object))
219 return false;
220
221 auto Iter = EarliestEscapes.insert({Object, nullptr});
222 if (Iter.second) {
223 Instruction *EarliestCapture = FindEarliestCapture(
224 Object, *const_cast<Function *>(DT.getRoot()->getParent()),
225 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
226 if (EarliestCapture) {
227 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
228 Ins.first->second.push_back(Object);
229 }
230 Iter.first->second = EarliestCapture;
231 }
232
233 // No capturing instruction.
234 if (!Iter.first->second)
235 return true;
236
237 // No context instruction means any use is capturing.
238 if (!I)
239 return false;
240
241 if (I == Iter.first->second) {
242 if (OrAt)
243 return false;
244 return isNotInCycle(I, &DT, LI);
245 }
246
247 return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
248}
249
251 auto Iter = Inst2Obj.find(I);
252 if (Iter != Inst2Obj.end()) {
253 for (const Value *Obj : Iter->second)
254 EarliestEscapes.erase(Obj);
255 Inst2Obj.erase(I);
256 }
257}
258
259//===----------------------------------------------------------------------===//
260// GetElementPtr Instruction Decomposition and Analysis
261//===----------------------------------------------------------------------===//
262
263namespace {
264/// Represents zext(sext(trunc(V))).
265struct CastedValue {
266 const Value *V;
267 unsigned ZExtBits = 0;
268 unsigned SExtBits = 0;
269 unsigned TruncBits = 0;
270
271 explicit CastedValue(const Value *V) : V(V) {}
272 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
273 unsigned TruncBits)
274 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
275
276 unsigned getBitWidth() const {
277 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
278 SExtBits;
279 }
280
281 CastedValue withValue(const Value *NewV) const {
282 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
283 }
284
285 /// Replace V with zext(NewV)
286 CastedValue withZExtOfValue(const Value *NewV) const {
287 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
289 if (ExtendBy <= TruncBits)
290 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
291
292 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
293 ExtendBy -= TruncBits;
294 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
295 }
296
297 /// Replace V with sext(NewV)
298 CastedValue withSExtOfValue(const Value *NewV) const {
299 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
301 if (ExtendBy <= TruncBits)
302 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
303
304 // zext(sext(sext(NewV)))
305 ExtendBy -= TruncBits;
306 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
307 }
308
309 APInt evaluateWith(APInt N) const {
310 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
311 "Incompatible bit width");
312 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
313 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
314 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
315 return N;
316 }
317
318 ConstantRange evaluateWith(ConstantRange N) const {
319 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
320 "Incompatible bit width");
321 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
322 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
323 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
324 return N;
325 }
326
327 bool canDistributeOver(bool NUW, bool NSW) const {
328 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
329 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
330 // trunc(x op y) == trunc(x) op trunc(y)
331 return (!ZExtBits || NUW) && (!SExtBits || NSW);
332 }
333
334 bool hasSameCastsAs(const CastedValue &Other) const {
335 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
336 TruncBits == Other.TruncBits;
337 }
338};
339
340/// Represents zext(sext(trunc(V))) * Scale + Offset.
341struct LinearExpression {
342 CastedValue Val;
343 APInt Scale;
345
346 /// True if all operations in this expression are NSW.
347 bool IsNSW;
348
349 LinearExpression(const CastedValue &Val, const APInt &Scale,
350 const APInt &Offset, bool IsNSW)
351 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
352
353 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
354 unsigned BitWidth = Val.getBitWidth();
355 Scale = APInt(BitWidth, 1);
356 Offset = APInt(BitWidth, 0);
357 }
358
359 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
360 // The check for zero offset is necessary, because generally
361 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
362 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
363 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
364 }
365};
366}
367
368/// Analyzes the specified value as a linear expression: "A*V + B", where A and
369/// B are constant integers.
370static LinearExpression GetLinearExpression(
371 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
373 // Limit our recursion depth.
374 if (Depth == 6)
375 return Val;
376
377 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
378 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
379 Val.evaluateWith(Const->getValue()), true);
380
381 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
382 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
383 APInt RHS = Val.evaluateWith(RHSC->getValue());
384 // The only non-OBO case we deal with is or, and only limited to the
385 // case where it is both nuw and nsw.
386 bool NUW = true, NSW = true;
387 if (isa<OverflowingBinaryOperator>(BOp)) {
388 NUW &= BOp->hasNoUnsignedWrap();
389 NSW &= BOp->hasNoSignedWrap();
390 }
391 if (!Val.canDistributeOver(NUW, NSW))
392 return Val;
393
394 // While we can distribute over trunc, we cannot preserve nowrap flags
395 // in that case.
396 if (Val.TruncBits)
397 NUW = NSW = false;
398
399 LinearExpression E(Val);
400 switch (BOp->getOpcode()) {
401 default:
402 // We don't understand this instruction, so we can't decompose it any
403 // further.
404 return Val;
405 case Instruction::Or:
406 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
407 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
408 return Val;
409
410 [[fallthrough]];
411 case Instruction::Add: {
412 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
413 Depth + 1, AC, DT);
414 E.Offset += RHS;
415 E.IsNSW &= NSW;
416 break;
417 }
418 case Instruction::Sub: {
419 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
420 Depth + 1, AC, DT);
421 E.Offset -= RHS;
422 E.IsNSW &= NSW;
423 break;
424 }
425 case Instruction::Mul:
426 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
427 Depth + 1, AC, DT)
428 .mul(RHS, NSW);
429 break;
430 case Instruction::Shl:
431 // We're trying to linearize an expression of the kind:
432 // shl i8 -128, 36
433 // where the shift count exceeds the bitwidth of the type.
434 // We can't decompose this further (the expression would return
435 // a poison value).
436 if (RHS.getLimitedValue() > Val.getBitWidth())
437 return Val;
438
439 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
440 Depth + 1, AC, DT);
441 E.Offset <<= RHS.getLimitedValue();
442 E.Scale <<= RHS.getLimitedValue();
443 E.IsNSW &= NSW;
444 break;
445 }
446 return E;
447 }
448 }
449
450 if (isa<ZExtInst>(Val.V))
451 return GetLinearExpression(
452 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
453 DL, Depth + 1, AC, DT);
454
455 if (isa<SExtInst>(Val.V))
456 return GetLinearExpression(
457 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
458 DL, Depth + 1, AC, DT);
459
460 return Val;
461}
462
463/// To ensure a pointer offset fits in an integer of size IndexSize
464/// (in bits) when that size is smaller than the maximum index size. This is
465/// an issue, for example, in particular for 32b pointers with negative indices
466/// that rely on two's complement wrap-arounds for precise alias information
467/// where the maximum index size is 64b.
468static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
469 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
470 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
471 if (ShiftBits != 0) {
472 Offset <<= ShiftBits;
473 Offset.ashrInPlace(ShiftBits);
474 }
475}
476
477namespace {
478// A linear transformation of a Value; this class represents
479// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
480struct VariableGEPIndex {
481 CastedValue Val;
482 APInt Scale;
483
484 // Context instruction to use when querying information about this index.
485 const Instruction *CxtI;
486
487 /// True if all operations in this expression are NSW.
488 bool IsNSW;
489
490 /// True if the index should be subtracted rather than added. We don't simply
491 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
492 /// non-wrapping, while X + INT_MIN*(-1) wraps.
493 bool IsNegated;
494
495 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
496 if (IsNegated == Other.IsNegated)
497 return Scale == -Other.Scale;
498 return Scale == Other.Scale;
499 }
500
501 void dump() const {
502 print(dbgs());
503 dbgs() << "\n";
504 }
505 void print(raw_ostream &OS) const {
506 OS << "(V=" << Val.V->getName()
507 << ", zextbits=" << Val.ZExtBits
508 << ", sextbits=" << Val.SExtBits
509 << ", truncbits=" << Val.TruncBits
510 << ", scale=" << Scale
511 << ", nsw=" << IsNSW
512 << ", negated=" << IsNegated << ")";
513 }
514};
515}
516
517// Represents the internal structure of a GEP, decomposed into a base pointer,
518// constant offsets, and variable scaled indices.
520 // Base pointer of the GEP
521 const Value *Base;
522 // Total constant offset from base.
524 // Scaled variable (non-constant) indices.
526 // Are all operations inbounds GEPs or non-indexing operations?
527 // (std::nullopt iff expression doesn't involve any geps)
528 std::optional<bool> InBounds;
529
530 void dump() const {
531 print(dbgs());
532 dbgs() << "\n";
533 }
534 void print(raw_ostream &OS) const {
535 OS << "(DecomposedGEP Base=" << Base->getName()
536 << ", Offset=" << Offset
537 << ", VarIndices=[";
538 for (size_t i = 0; i < VarIndices.size(); i++) {
539 if (i != 0)
540 OS << ", ";
541 VarIndices[i].print(OS);
542 }
543 OS << "])";
544 }
545};
546
547
548/// If V is a symbolic pointer expression, decompose it into a base pointer
549/// with a constant offset and a number of scaled symbolic offsets.
550///
551/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
552/// in the VarIndices vector) are Value*'s that are known to be scaled by the
553/// specified amount, but which may have other unrepresented high bits. As
554/// such, the gep cannot necessarily be reconstructed from its decomposed form.
556BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
558 // Limit recursion depth to limit compile time in crazy cases.
559 unsigned MaxLookup = MaxLookupSearchDepth;
560 SearchTimes++;
561 const Instruction *CxtI = dyn_cast<Instruction>(V);
562
563 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
564 DecomposedGEP Decomposed;
565 Decomposed.Offset = APInt(MaxIndexSize, 0);
566 do {
567 // See if this is a bitcast or GEP.
568 const Operator *Op = dyn_cast<Operator>(V);
569 if (!Op) {
570 // The only non-operator case we can handle are GlobalAliases.
571 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
572 if (!GA->isInterposable()) {
573 V = GA->getAliasee();
574 continue;
575 }
576 }
577 Decomposed.Base = V;
578 return Decomposed;
579 }
580
581 if (Op->getOpcode() == Instruction::BitCast ||
582 Op->getOpcode() == Instruction::AddrSpaceCast) {
583 V = Op->getOperand(0);
584 continue;
585 }
586
587 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
588 if (!GEPOp) {
589 if (const auto *PHI = dyn_cast<PHINode>(V)) {
590 // Look through single-arg phi nodes created by LCSSA.
591 if (PHI->getNumIncomingValues() == 1) {
592 V = PHI->getIncomingValue(0);
593 continue;
594 }
595 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
596 // CaptureTracking can know about special capturing properties of some
597 // intrinsics like launder.invariant.group, that can't be expressed with
598 // the attributes, but have properties like returning aliasing pointer.
599 // Because some analysis may assume that nocaptured pointer is not
600 // returned from some special intrinsic (because function would have to
601 // be marked with returns attribute), it is crucial to use this function
602 // because it should be in sync with CaptureTracking. Not using it may
603 // cause weird miscompilations where 2 aliasing pointers are assumed to
604 // noalias.
605 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
606 V = RP;
607 continue;
608 }
609 }
610
611 Decomposed.Base = V;
612 return Decomposed;
613 }
614
615 // Track whether we've seen at least one in bounds gep, and if so, whether
616 // all geps parsed were in bounds.
617 if (Decomposed.InBounds == std::nullopt)
618 Decomposed.InBounds = GEPOp->isInBounds();
619 else if (!GEPOp->isInBounds())
620 Decomposed.InBounds = false;
621
622 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
623
624 unsigned AS = GEPOp->getPointerAddressSpace();
625 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
627 unsigned IndexSize = DL.getIndexSizeInBits(AS);
628 // Assume all GEP operands are constants until proven otherwise.
629 bool GepHasConstantOffset = true;
630 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
631 I != E; ++I, ++GTI) {
632 const Value *Index = *I;
633 // Compute the (potentially symbolic) offset in bytes for this index.
634 if (StructType *STy = GTI.getStructTypeOrNull()) {
635 // For a struct, add the member offset.
636 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
637 if (FieldNo == 0)
638 continue;
639
640 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
641 continue;
642 }
643
644 // For an array/pointer, add the element offset, explicitly scaled.
645 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
646 if (CIdx->isZero())
647 continue;
648
649 // Don't attempt to analyze GEPs if the scalable index is not zero.
650 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
651 if (AllocTypeSize.isScalable()) {
652 Decomposed.Base = V;
653 return Decomposed;
654 }
655
656 Decomposed.Offset += AllocTypeSize.getFixedValue() *
657 CIdx->getValue().sextOrTrunc(MaxIndexSize);
658 continue;
659 }
660
661 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
662 if (AllocTypeSize.isScalable()) {
663 Decomposed.Base = V;
664 return Decomposed;
665 }
666
667 GepHasConstantOffset = false;
668
669 // If the integer type is smaller than the index size, it is implicitly
670 // sign extended or truncated to index size.
671 unsigned Width = Index->getType()->getIntegerBitWidth();
672 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
673 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
674 LinearExpression LE = GetLinearExpression(
675 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
676
677 // Scale by the type size.
678 unsigned TypeSize = AllocTypeSize.getFixedValue();
679 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
680 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
681 APInt Scale = LE.Scale.sext(MaxIndexSize);
682
683 // If we already had an occurrence of this index variable, merge this
684 // scale into it. For example, we want to handle:
685 // A[x][x] -> x*16 + x*4 -> x*20
686 // This also ensures that 'x' only appears in the index list once.
687 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
688 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
689 areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
690 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
691 Scale += Decomposed.VarIndices[i].Scale;
692 LE.IsNSW = false; // We cannot guarantee nsw for the merge.
693 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
694 break;
695 }
696 }
697
698 // Make sure that we have a scale that makes sense for this target's
699 // index size.
700 adjustToIndexSize(Scale, IndexSize);
701
702 if (!!Scale) {
703 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
704 /* IsNegated */ false};
705 Decomposed.VarIndices.push_back(Entry);
706 }
707 }
708
709 // Take care of wrap-arounds
710 if (GepHasConstantOffset)
711 adjustToIndexSize(Decomposed.Offset, IndexSize);
712
713 // Analyze the base pointer next.
714 V = GEPOp->getOperand(0);
715 } while (--MaxLookup);
716
717 // If the chain of expressions is too deep, just return early.
718 Decomposed.Base = V;
719 SearchLimitReached++;
720 return Decomposed;
721}
722
724 AAQueryInfo &AAQI,
725 bool IgnoreLocals) {
726 assert(Visited.empty() && "Visited must be cleared after use!");
727 auto _ = make_scope_exit([&] { Visited.clear(); });
728
729 unsigned MaxLookup = 8;
731 Worklist.push_back(Loc.Ptr);
733
734 do {
735 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
736 if (!Visited.insert(V).second)
737 continue;
738
739 // Ignore allocas if we were instructed to do so.
740 if (IgnoreLocals && isa<AllocaInst>(V))
741 continue;
742
743 // If the location points to memory that is known to be invariant for
744 // the life of the underlying SSA value, then we can exclude Mod from
745 // the set of valid memory effects.
746 //
747 // An argument that is marked readonly and noalias is known to be
748 // invariant while that function is executing.
749 if (const Argument *Arg = dyn_cast<Argument>(V)) {
750 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
751 Result |= ModRefInfo::Ref;
752 continue;
753 }
754 }
755
756 // A global constant can't be mutated.
757 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
758 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
759 // global to be marked constant in some modules and non-constant in
760 // others. GV may even be a declaration, not a definition.
761 if (!GV->isConstant())
762 return ModRefInfo::ModRef;
763 continue;
764 }
765
766 // If both select values point to local memory, then so does the select.
767 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
768 Worklist.push_back(SI->getTrueValue());
769 Worklist.push_back(SI->getFalseValue());
770 continue;
771 }
772
773 // If all values incoming to a phi node point to local memory, then so does
774 // the phi.
775 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
776 // Don't bother inspecting phi nodes with many operands.
777 if (PN->getNumIncomingValues() > MaxLookup)
778 return ModRefInfo::ModRef;
779 append_range(Worklist, PN->incoming_values());
780 continue;
781 }
782
783 // Otherwise be conservative.
784 return ModRefInfo::ModRef;
785 } while (!Worklist.empty() && --MaxLookup);
786
787 // If we hit the maximum number of instructions to examine, be conservative.
788 if (!Worklist.empty())
789 return ModRefInfo::ModRef;
790
791 return Result;
792}
793
794static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
795 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
796 return II && II->getIntrinsicID() == IID;
797}
798
799/// Returns the behavior when calling the given call site.
801 AAQueryInfo &AAQI) {
802 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
803
804 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
805 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
806 // Operand bundles on the call may also read or write memory, in addition
807 // to the behavior of the called function.
808 if (Call->hasReadingOperandBundles())
809 FuncME |= MemoryEffects::readOnly();
810 if (Call->hasClobberingOperandBundles())
811 FuncME |= MemoryEffects::writeOnly();
812 Min &= FuncME;
813 }
814
815 return Min;
816}
817
818/// Returns the behavior when calling the given function. For use when the call
819/// site is not known.
821 switch (F->getIntrinsicID()) {
822 case Intrinsic::experimental_guard:
823 case Intrinsic::experimental_deoptimize:
824 // These intrinsics can read arbitrary memory, and additionally modref
825 // inaccessible memory to model control dependence.
826 return MemoryEffects::readOnly() |
828 }
829
830 return F->getMemoryEffects();
831}
832
834 unsigned ArgIdx) {
835 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
836 return ModRefInfo::Mod;
837
838 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
839 return ModRefInfo::Ref;
840
841 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
843
844 return ModRefInfo::ModRef;
845}
846
847#ifndef NDEBUG
848static const Function *getParent(const Value *V) {
849 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
850 if (!inst->getParent())
851 return nullptr;
852 return inst->getParent()->getParent();
853 }
854
855 if (const Argument *arg = dyn_cast<Argument>(V))
856 return arg->getParent();
857
858 return nullptr;
859}
860
861static bool notDifferentParent(const Value *O1, const Value *O2) {
862
863 const Function *F1 = getParent(O1);
864 const Function *F2 = getParent(O2);
865
866 return !F1 || !F2 || F1 == F2;
867}
868#endif
869
871 const MemoryLocation &LocB, AAQueryInfo &AAQI,
872 const Instruction *CtxI) {
873 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
874 "BasicAliasAnalysis doesn't support interprocedural queries.");
875 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
876}
877
878/// Checks to see if the specified callsite can clobber the specified memory
879/// object.
880///
881/// Since we only look at local properties of this function, we really can't
882/// say much about this query. We do, however, use simple "address taken"
883/// analysis on local objects.
885 const MemoryLocation &Loc,
886 AAQueryInfo &AAQI) {
887 assert(notDifferentParent(Call, Loc.Ptr) &&
888 "AliasAnalysis query involving multiple functions!");
889
890 const Value *Object = getUnderlyingObject(Loc.Ptr);
891
892 // Calls marked 'tail' cannot read or write allocas from the current frame
893 // because the current frame might be destroyed by the time they run. However,
894 // a tail call may use an alloca with byval. Calling with byval copies the
895 // contents of the alloca into argument registers or stack slots, so there is
896 // no lifetime issue.
897 if (isa<AllocaInst>(Object))
898 if (const CallInst *CI = dyn_cast<CallInst>(Call))
899 if (CI->isTailCall() &&
900 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
902
903 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
904 // modify them even though the alloca is not escaped.
905 if (auto *AI = dyn_cast<AllocaInst>(Object))
906 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
907 return ModRefInfo::Mod;
908
909 // A call can access a locally allocated object either because it is passed as
910 // an argument to the call, or because it has escaped prior to the call.
911 //
912 // Make sure the object has not escaped here, and then check that none of the
913 // call arguments alias the object below.
914 if (!isa<Constant>(Object) && Call != Object &&
915 AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
916
917 // Optimistically assume that call doesn't touch Object and check this
918 // assumption in the following loop.
920
921 unsigned OperandNo = 0;
922 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
923 CI != CE; ++CI, ++OperandNo) {
924 if (!(*CI)->getType()->isPointerTy())
925 continue;
926
927 // Call doesn't access memory through this operand, so we don't care
928 // if it aliases with Object.
929 if (Call->doesNotAccessMemory(OperandNo))
930 continue;
931
932 // If this is a no-capture pointer argument, see if we can tell that it
933 // is impossible to alias the pointer we're checking.
934 AliasResult AR =
937 // Operand doesn't alias 'Object', continue looking for other aliases
938 if (AR == AliasResult::NoAlias)
939 continue;
940 // Operand aliases 'Object', but call doesn't modify it. Strengthen
941 // initial assumption and keep looking in case if there are more aliases.
942 if (Call->onlyReadsMemory(OperandNo)) {
943 Result |= ModRefInfo::Ref;
944 continue;
945 }
946 // Operand aliases 'Object' but call only writes into it.
947 if (Call->onlyWritesMemory(OperandNo)) {
948 Result |= ModRefInfo::Mod;
949 continue;
950 }
951 // This operand aliases 'Object' and call reads and writes into it.
952 // Setting ModRef will not yield an early return below, MustAlias is not
953 // used further.
954 Result = ModRefInfo::ModRef;
955 break;
956 }
957
958 // Early return if we improved mod ref information
959 if (!isModAndRefSet(Result))
960 return Result;
961 }
962
963 // If the call is malloc/calloc like, we can assume that it doesn't
964 // modify any IR visible value. This is only valid because we assume these
965 // routines do not read values visible in the IR. TODO: Consider special
966 // casing realloc and strdup routines which access only their arguments as
967 // well. Or alternatively, replace all of this with inaccessiblememonly once
968 // that's implemented fully.
969 if (isMallocOrCallocLikeFn(Call, &TLI)) {
970 // Be conservative if the accessed pointer may alias the allocation -
971 // fallback to the generic handling below.
972 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
975 }
976
977 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
978 // writing so that proper control dependencies are maintained but they never
979 // mod any particular memory location visible to the IR.
980 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
981 // intrinsic is now modeled as reading memory. This prevents hoisting the
982 // invariant.start intrinsic over stores. Consider:
983 // *ptr = 40;
984 // *ptr = 50;
985 // invariant_start(ptr)
986 // int val = *ptr;
987 // print(val);
988 //
989 // This cannot be transformed to:
990 //
991 // *ptr = 40;
992 // invariant_start(ptr)
993 // *ptr = 50;
994 // int val = *ptr;
995 // print(val);
996 //
997 // The transformation will cause the second store to be ignored (based on
998 // rules of invariant.start) and print 40, while the first program always
999 // prints 50.
1000 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1001 return ModRefInfo::Ref;
1002
1003 // Be conservative.
1004 return ModRefInfo::ModRef;
1005}
1006
1008 const CallBase *Call2,
1009 AAQueryInfo &AAQI) {
1010 // Guard intrinsics are marked as arbitrarily writing so that proper control
1011 // dependencies are maintained but they never mods any particular memory
1012 // location.
1013 //
1014 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1015 // heap state at the point the guard is issued needs to be consistent in case
1016 // the guard invokes the "deopt" continuation.
1017
1018 // NB! This function is *not* commutative, so we special case two
1019 // possibilities for guard intrinsics.
1020
1021 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1022 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1025
1026 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1027 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1030
1031 // Be conservative.
1032 return ModRefInfo::ModRef;
1033}
1034
1035/// Return true if we know V to the base address of the corresponding memory
1036/// object. This implies that any address less than V must be out of bounds
1037/// for the underlying object. Note that just being isIdentifiedObject() is
1038/// not enough - For example, a negative offset from a noalias argument or call
1039/// can be inbounds w.r.t the actual underlying object.
1040static bool isBaseOfObject(const Value *V) {
1041 // TODO: We can handle other cases here
1042 // 1) For GC languages, arguments to functions are often required to be
1043 // base pointers.
1044 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1045 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1046}
1047
1048/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1049/// another pointer.
1050///
1051/// We know that V1 is a GEP, but we don't know anything about V2.
1052/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1053/// V2.
1054AliasResult BasicAAResult::aliasGEP(
1055 const GEPOperator *GEP1, LocationSize V1Size,
1056 const Value *V2, LocationSize V2Size,
1057 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1058 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1059 // TODO: This limitation exists for compile-time reasons. Relax it if we
1060 // can avoid exponential pathological cases.
1061 if (!isa<GEPOperator>(V2))
1062 return AliasResult::MayAlias;
1063
1064 // If both accesses have unknown size, we can only check whether the base
1065 // objects don't alias.
1066 AliasResult BaseAlias =
1067 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1068 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1069 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1071 }
1072
1073 DominatorTree *DT = getDT(AAQI);
1074 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1075 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1076
1077 // Bail if we were not able to decompose anything.
1078 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1079 return AliasResult::MayAlias;
1080
1081 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1082 // symbolic difference.
1083 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1084
1085 // If an inbounds GEP would have to start from an out of bounds address
1086 // for the two to alias, then we can assume noalias.
1087 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1088 // size
1089 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1090 V2Size.hasValue() && !V2Size.isScalable() &&
1091 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1092 isBaseOfObject(DecompGEP2.Base))
1093 return AliasResult::NoAlias;
1094
1095 if (isa<GEPOperator>(V2)) {
1096 // Symmetric case to above.
1097 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1098 V1Size.hasValue() && !V1Size.isScalable() &&
1099 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1100 isBaseOfObject(DecompGEP1.Base))
1101 return AliasResult::NoAlias;
1102 }
1103
1104 // For GEPs with identical offsets, we can preserve the size and AAInfo
1105 // when performing the alias check on the underlying objects.
1106 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1107 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1108 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1109
1110 // Do the base pointers alias?
1111 AliasResult BaseAlias =
1112 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1113 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1114
1115 // If we get a No or May, then return it immediately, no amount of analysis
1116 // will improve this situation.
1117 if (BaseAlias != AliasResult::MustAlias) {
1118 assert(BaseAlias == AliasResult::NoAlias ||
1119 BaseAlias == AliasResult::MayAlias);
1120 return BaseAlias;
1121 }
1122
1123 // If there is a constant difference between the pointers, but the difference
1124 // is less than the size of the associated memory object, then we know
1125 // that the objects are partially overlapping. If the difference is
1126 // greater, we know they do not overlap.
1127 if (DecompGEP1.VarIndices.empty()) {
1128 APInt &Off = DecompGEP1.Offset;
1129
1130 // Initialize for Off >= 0 (V2 <= GEP1) case.
1131 const Value *LeftPtr = V2;
1132 const Value *RightPtr = GEP1;
1133 LocationSize VLeftSize = V2Size;
1134 LocationSize VRightSize = V1Size;
1135 const bool Swapped = Off.isNegative();
1136
1137 if (Swapped) {
1138 // Swap if we have the situation where:
1139 // + +
1140 // | BaseOffset |
1141 // ---------------->|
1142 // |-->V1Size |-------> V2Size
1143 // GEP1 V2
1144 std::swap(LeftPtr, RightPtr);
1145 std::swap(VLeftSize, VRightSize);
1146 Off = -Off;
1147 }
1148
1149 if (!VLeftSize.hasValue())
1150 return AliasResult::MayAlias;
1151
1152 const TypeSize LSize = VLeftSize.getValue();
1153 if (!LSize.isScalable()) {
1154 if (Off.ult(LSize)) {
1155 // Conservatively drop processing if a phi was visited and/or offset is
1156 // too big.
1158 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1159 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1160 // Memory referenced by right pointer is nested. Save the offset in
1161 // cache. Note that originally offset estimated as GEP1-V2, but
1162 // AliasResult contains the shift that represents GEP1+Offset=V2.
1163 AR.setOffset(-Off.getSExtValue());
1164 AR.swap(Swapped);
1165 }
1166 return AR;
1167 }
1168 return AliasResult::NoAlias;
1169 } else {
1170 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1171 ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1172 bool Overflow;
1173 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1174 APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1175 if (!Overflow && Off.uge(UpperRange))
1176 return AliasResult::NoAlias;
1177 }
1178 }
1179
1180 // VScale Alias Analysis - Given one scalable offset between accesses and a
1181 // scalable typesize, we can divide each side by vscale, treating both values
1182 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1183 if (DecompGEP1.VarIndices.size() == 1 &&
1184 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1185 DecompGEP1.Offset.isZero() &&
1186 PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1188 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1189 APInt Scale =
1190 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1191 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1192
1193 // Check if the offset is known to not overflow, if it does then attempt to
1194 // prove it with the known values of vscale_range.
1195 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1196 if (Overflows) {
1197 ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1198 (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1199 }
1200
1201 if (!Overflows) {
1202 // Note that we do not check that the typesize is scalable, as vscale >= 1
1203 // so noalias still holds so long as the dependency distance is at least
1204 // as big as the typesize.
1205 if (VLeftSize.hasValue() &&
1206 Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1207 return AliasResult::NoAlias;
1208 }
1209 }
1210
1211 // Bail on analysing scalable LocationSize
1212 if (V1Size.isScalable() || V2Size.isScalable())
1213 return AliasResult::MayAlias;
1214
1215 // We need to know both acess sizes for all the following heuristics.
1216 if (!V1Size.hasValue() || !V2Size.hasValue())
1217 return AliasResult::MayAlias;
1218
1219 APInt GCD;
1220 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1221 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1222 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1223 const APInt &Scale = Index.Scale;
1224 APInt ScaleForGCD = Scale;
1225 if (!Index.IsNSW)
1226 ScaleForGCD =
1228
1229 if (i == 0)
1230 GCD = ScaleForGCD.abs();
1231 else
1232 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1233
1234 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1235 true, &AC, Index.CxtI);
1236 KnownBits Known =
1237 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1238 CR = CR.intersectWith(
1239 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1241 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1242
1243 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1244 "Bit widths are normalized to MaxIndexSize");
1245 if (Index.IsNSW)
1246 CR = CR.smul_sat(ConstantRange(Scale));
1247 else
1248 CR = CR.smul_fast(ConstantRange(Scale));
1249
1250 if (Index.IsNegated)
1251 OffsetRange = OffsetRange.sub(CR);
1252 else
1253 OffsetRange = OffsetRange.add(CR);
1254 }
1255
1256 // We now have accesses at two offsets from the same base:
1257 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1258 // 2. 0 with size V2Size
1259 // Using arithmetic modulo GCD, the accesses are at
1260 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1261 // into the range [V2Size..GCD), then we know they cannot overlap.
1262 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1263 if (ModOffset.isNegative())
1264 ModOffset += GCD; // We want mod, not rem.
1265 if (ModOffset.uge(V2Size.getValue()) &&
1266 (GCD - ModOffset).uge(V1Size.getValue()))
1267 return AliasResult::NoAlias;
1268
1269 // Compute ranges of potentially accessed bytes for both accesses. If the
1270 // interseciton is empty, there can be no overlap.
1271 unsigned BW = OffsetRange.getBitWidth();
1272 ConstantRange Range1 = OffsetRange.add(
1273 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1274 ConstantRange Range2 =
1275 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1276 if (Range1.intersectWith(Range2).isEmptySet())
1277 return AliasResult::NoAlias;
1278
1279 // Try to determine the range of values for VarIndex such that
1280 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1281 std::optional<APInt> MinAbsVarIndex;
1282 if (DecompGEP1.VarIndices.size() == 1) {
1283 // VarIndex = Scale*V.
1284 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1285 if (Var.Val.TruncBits == 0 &&
1286 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1287 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1288 // potentially wrapping math.
1289 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1290 if (Var.IsNSW)
1291 return true;
1292
1293 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1294 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1295 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1296 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1297 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1298 if (MaxScaleValueBW <= 0)
1299 return false;
1300 return Var.Scale.ule(
1301 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1302 };
1303 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1304 // presence of potentially wrapping math.
1305 if (MultiplyByScaleNoWrap(Var)) {
1306 // If V != 0 then abs(VarIndex) >= abs(Scale).
1307 MinAbsVarIndex = Var.Scale.abs();
1308 }
1309 }
1310 } else if (DecompGEP1.VarIndices.size() == 2) {
1311 // VarIndex = Scale*V0 + (-Scale)*V1.
1312 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1313 // Check that MayBeCrossIteration is false, to avoid reasoning about
1314 // inequality of values across loop iterations.
1315 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1316 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1317 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1318 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1319 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1320 DT))
1321 MinAbsVarIndex = Var0.Scale.abs();
1322 }
1323
1324 if (MinAbsVarIndex) {
1325 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1326 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1327 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1328 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1329 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1330 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1331 return AliasResult::NoAlias;
1332 }
1333
1334 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1335 return AliasResult::NoAlias;
1336
1337 // Statically, we can see that the base objects are the same, but the
1338 // pointers have dynamic offsets which we can't resolve. And none of our
1339 // little tricks above worked.
1340 return AliasResult::MayAlias;
1341}
1342
1344 // If the results agree, take it.
1345 if (A == B)
1346 return A;
1347 // A mix of PartialAlias and MustAlias is PartialAlias.
1351 // Otherwise, we don't know anything.
1352 return AliasResult::MayAlias;
1353}
1354
1355/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1356/// against another.
1358BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1359 const Value *V2, LocationSize V2Size,
1360 AAQueryInfo &AAQI) {
1361 // If the values are Selects with the same condition, we can do a more precise
1362 // check: just check for aliases between the values on corresponding arms.
1363 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1364 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1365 AAQI)) {
1366 AliasResult Alias =
1367 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1368 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1369 if (Alias == AliasResult::MayAlias)
1370 return AliasResult::MayAlias;
1371 AliasResult ThisAlias =
1372 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1373 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1374 return MergeAliasResults(ThisAlias, Alias);
1375 }
1376
1377 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1378 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1379 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1380 MemoryLocation(V2, V2Size), AAQI);
1381 if (Alias == AliasResult::MayAlias)
1382 return AliasResult::MayAlias;
1383
1384 AliasResult ThisAlias =
1385 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1386 MemoryLocation(V2, V2Size), AAQI);
1387 return MergeAliasResults(ThisAlias, Alias);
1388}
1389
1390/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1391/// another.
1392AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1393 const Value *V2, LocationSize V2Size,
1394 AAQueryInfo &AAQI) {
1395 if (!PN->getNumIncomingValues())
1396 return AliasResult::NoAlias;
1397 // If the values are PHIs in the same block, we can do a more precise
1398 // as well as efficient check: just check for aliases between the values
1399 // on corresponding edges.
1400 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1401 if (PN2->getParent() == PN->getParent()) {
1402 std::optional<AliasResult> Alias;
1403 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1404 AliasResult ThisAlias = AAQI.AAR.alias(
1405 MemoryLocation(PN->getIncomingValue(i), PNSize),
1407 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1408 AAQI);
1409 if (Alias)
1410 *Alias = MergeAliasResults(*Alias, ThisAlias);
1411 else
1412 Alias = ThisAlias;
1413 if (*Alias == AliasResult::MayAlias)
1414 break;
1415 }
1416 return *Alias;
1417 }
1418
1420 // If a phi operand recurses back to the phi, we can still determine NoAlias
1421 // if we don't alias the underlying objects of the other phi operands, as we
1422 // know that the recursive phi needs to be based on them in some way.
1423 bool isRecursive = false;
1424 auto CheckForRecPhi = [&](Value *PV) {
1426 return false;
1427 if (getUnderlyingObject(PV) == PN) {
1428 isRecursive = true;
1429 return true;
1430 }
1431 return false;
1432 };
1433
1434 SmallPtrSet<Value *, 4> UniqueSrc;
1435 Value *OnePhi = nullptr;
1436 for (Value *PV1 : PN->incoming_values()) {
1437 // Skip the phi itself being the incoming value.
1438 if (PV1 == PN)
1439 continue;
1440
1441 if (isa<PHINode>(PV1)) {
1442 if (OnePhi && OnePhi != PV1) {
1443 // To control potential compile time explosion, we choose to be
1444 // conserviate when we have more than one Phi input. It is important
1445 // that we handle the single phi case as that lets us handle LCSSA
1446 // phi nodes and (combined with the recursive phi handling) simple
1447 // pointer induction variable patterns.
1448 return AliasResult::MayAlias;
1449 }
1450 OnePhi = PV1;
1451 }
1452
1453 if (CheckForRecPhi(PV1))
1454 continue;
1455
1456 if (UniqueSrc.insert(PV1).second)
1457 V1Srcs.push_back(PV1);
1458 }
1459
1460 if (OnePhi && UniqueSrc.size() > 1)
1461 // Out of an abundance of caution, allow only the trivial lcssa and
1462 // recursive phi cases.
1463 return AliasResult::MayAlias;
1464
1465 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1466 // value. This should only be possible in blocks unreachable from the entry
1467 // block, but return MayAlias just in case.
1468 if (V1Srcs.empty())
1469 return AliasResult::MayAlias;
1470
1471 // If this PHI node is recursive, indicate that the pointer may be moved
1472 // across iterations. We can only prove NoAlias if different underlying
1473 // objects are involved.
1474 if (isRecursive)
1476
1477 // In the recursive alias queries below, we may compare values from two
1478 // different loop iterations.
1479 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1480
1481 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1482 MemoryLocation(V2, V2Size), AAQI);
1483
1484 // Early exit if the check of the first PHI source against V2 is MayAlias.
1485 // Other results are not possible.
1486 if (Alias == AliasResult::MayAlias)
1487 return AliasResult::MayAlias;
1488 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1489 // remain valid to all elements and needs to conservatively return MayAlias.
1490 if (isRecursive && Alias != AliasResult::NoAlias)
1491 return AliasResult::MayAlias;
1492
1493 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1494 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1495 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1496 Value *V = V1Srcs[i];
1497
1498 AliasResult ThisAlias = AAQI.AAR.alias(
1499 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1500 Alias = MergeAliasResults(ThisAlias, Alias);
1501 if (Alias == AliasResult::MayAlias)
1502 break;
1503 }
1504
1505 return Alias;
1506}
1507
1508/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1509/// array references.
1510AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1511 const Value *V2, LocationSize V2Size,
1512 AAQueryInfo &AAQI,
1513 const Instruction *CtxI) {
1514 // If either of the memory references is empty, it doesn't matter what the
1515 // pointer values are.
1516 if (V1Size.isZero() || V2Size.isZero())
1517 return AliasResult::NoAlias;
1518
1519 // Strip off any casts if they exist.
1521 V2 = V2->stripPointerCastsForAliasAnalysis();
1522
1523 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1524 // value for undef that aliases nothing in the program.
1525 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1526 return AliasResult::NoAlias;
1527
1528 // Are we checking for alias of the same value?
1529 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1530 // different iterations. We must therefore make sure that this is not the
1531 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1532 // happen by looking at the visited phi nodes and making sure they cannot
1533 // reach the value.
1534 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1536
1537 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1538 return AliasResult::NoAlias; // Scalars cannot alias each other
1539
1540 // Figure out what objects these things are pointing to if we can.
1543
1544 // Null values in the default address space don't point to any object, so they
1545 // don't alias any other pointer.
1546 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1547 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1548 return AliasResult::NoAlias;
1549 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1550 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1551 return AliasResult::NoAlias;
1552
1553 if (O1 != O2) {
1554 // If V1/V2 point to two different objects, we know that we have no alias.
1556 return AliasResult::NoAlias;
1557
1558 // Function arguments can't alias with things that are known to be
1559 // unambigously identified at the function level.
1560 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1561 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1562 return AliasResult::NoAlias;
1563
1564 // If one pointer is the result of a call/invoke or load and the other is a
1565 // non-escaping local object within the same function, then we know the
1566 // object couldn't escape to a point where the call could return it.
1567 //
1568 // Note that if the pointers are in different functions, there are a
1569 // variety of complications. A call with a nocapture argument may still
1570 // temporary store the nocapture argument's value in a temporary memory
1571 // location if that memory location doesn't escape. Or it may pass a
1572 // nocapture value to other functions as long as they don't capture it.
1573 if (isEscapeSource(O1) && AAQI.CI->isNotCapturedBefore(
1574 O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1575 return AliasResult::NoAlias;
1576 if (isEscapeSource(O2) && AAQI.CI->isNotCapturedBefore(
1577 O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1578 return AliasResult::NoAlias;
1579 }
1580
1581 // If the size of one access is larger than the entire object on the other
1582 // side, then we know such behavior is undefined and can assume no alias.
1583 bool NullIsValidLocation = NullPointerIsDefined(&F);
1585 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1586 TLI, NullIsValidLocation)) ||
1588 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1589 TLI, NullIsValidLocation)))
1590 return AliasResult::NoAlias;
1591
1593 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1594 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1595 continue;
1596
1597 AssumeInst *Assume = cast<AssumeInst>(Elem);
1598 OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1599 if (OBU.getTagName() == "separate_storage") {
1600 assert(OBU.Inputs.size() == 2);
1601 const Value *Hint1 = OBU.Inputs[0].get();
1602 const Value *Hint2 = OBU.Inputs[1].get();
1603 // This is often a no-op; instcombine rewrites this for us. No-op
1604 // getUnderlyingObject calls are fast, though.
1605 const Value *HintO1 = getUnderlyingObject(Hint1);
1606 const Value *HintO2 = getUnderlyingObject(Hint2);
1607
1608 DominatorTree *DT = getDT(AAQI);
1609 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1610 if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1611 return isValidAssumeForContext(Assume, PtrI, DT,
1612 /* AllowEphemerals */ true);
1613 }
1614 if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1615 const Instruction *FirstI =
1616 &*PtrA->getParent()->getEntryBlock().begin();
1617 return isValidAssumeForContext(Assume, FirstI, DT,
1618 /* AllowEphemerals */ true);
1619 }
1620 return false;
1621 };
1622
1623 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1624 // Note that we go back to V1 and V2 for the
1625 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1626 // so strictly more assumptions are valid for them.
1627 if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1628 /* AllowEphemerals */ true)) ||
1629 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1630 return AliasResult::NoAlias;
1631 }
1632 }
1633 }
1634 }
1635 }
1636
1637 // If one the accesses may be before the accessed pointer, canonicalize this
1638 // by using unknown after-pointer sizes for both accesses. This is
1639 // equivalent, because regardless of which pointer is lower, one of them
1640 // will always came after the other, as long as the underlying objects aren't
1641 // disjoint. We do this so that the rest of BasicAA does not have to deal
1642 // with accesses before the base pointer, and to improve cache utilization by
1643 // merging equivalent states.
1644 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1645 V1Size = LocationSize::afterPointer();
1646 V2Size = LocationSize::afterPointer();
1647 }
1648
1649 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1650 // for recursive queries. For this reason, this limit is chosen to be large
1651 // enough to be very rarely hit, while still being small enough to avoid
1652 // stack overflows.
1653 if (AAQI.Depth >= 512)
1654 return AliasResult::MayAlias;
1655
1656 // Check the cache before climbing up use-def chains. This also terminates
1657 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1658 // cache key, because some cases where MayBeCrossIteration==false returns
1659 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1660 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1661 {V2, V2Size, AAQI.MayBeCrossIteration});
1662 const bool Swapped = V1 > V2;
1663 if (Swapped)
1664 std::swap(Locs.first, Locs.second);
1665 const auto &Pair = AAQI.AliasCache.try_emplace(
1667 if (!Pair.second) {
1668 auto &Entry = Pair.first->second;
1669 if (!Entry.isDefinitive()) {
1670 // Remember that we used an assumption.
1671 ++Entry.NumAssumptionUses;
1672 ++AAQI.NumAssumptionUses;
1673 }
1674 // Cache contains sorted {V1,V2} pairs but we should return original order.
1675 auto Result = Entry.Result;
1676 Result.swap(Swapped);
1677 return Result;
1678 }
1679
1680 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1681 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1683 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1684
1685 auto It = AAQI.AliasCache.find(Locs);
1686 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1687 auto &Entry = It->second;
1688
1689 // Check whether a NoAlias assumption has been used, but disproven.
1690 bool AssumptionDisproven =
1691 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1692 if (AssumptionDisproven)
1694
1695 // This is a definitive result now, when considered as a root query.
1696 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1697 Entry.Result = Result;
1698 // Cache contains sorted {V1,V2} pairs.
1699 Entry.Result.swap(Swapped);
1700 Entry.NumAssumptionUses = -1;
1701
1702 // If the assumption has been disproven, remove any results that may have
1703 // been based on this assumption. Do this after the Entry updates above to
1704 // avoid iterator invalidation.
1705 if (AssumptionDisproven)
1706 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1708
1709 // The result may still be based on assumptions higher up in the chain.
1710 // Remember it, so it can be purged from the cache later.
1711 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1712 Result != AliasResult::MayAlias)
1714 return Result;
1715}
1716
1717AliasResult BasicAAResult::aliasCheckRecursive(
1718 const Value *V1, LocationSize V1Size,
1719 const Value *V2, LocationSize V2Size,
1720 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1721 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1722 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1723 if (Result != AliasResult::MayAlias)
1724 return Result;
1725 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1726 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1727 Result.swap();
1728 if (Result != AliasResult::MayAlias)
1729 return Result;
1730 }
1731
1732 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1733 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1734 if (Result != AliasResult::MayAlias)
1735 return Result;
1736 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1737 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1738 Result.swap();
1739 if (Result != AliasResult::MayAlias)
1740 return Result;
1741 }
1742
1743 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1744 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1745 if (Result != AliasResult::MayAlias)
1746 return Result;
1747 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1748 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1749 Result.swap();
1750 if (Result != AliasResult::MayAlias)
1751 return Result;
1752 }
1753
1754 // If both pointers are pointing into the same object and one of them
1755 // accesses the entire object, then the accesses must overlap in some way.
1756 if (O1 == O2) {
1757 bool NullIsValidLocation = NullPointerIsDefined(&F);
1758 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1759 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1760 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1762 }
1763
1764 return AliasResult::MayAlias;
1765}
1766
1767/// Check whether two Values can be considered equivalent.
1768///
1769/// If the values may come from different cycle iterations, this will also
1770/// check that the values are not part of cycle. We have to do this because we
1771/// are looking through phi nodes, that is we say
1772/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1773bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1774 const Value *V2,
1775 const AAQueryInfo &AAQI) {
1776 if (V != V2)
1777 return false;
1778
1779 if (!AAQI.MayBeCrossIteration)
1780 return true;
1781
1782 // Non-instructions and instructions in the entry block cannot be part of
1783 // a loop.
1784 const Instruction *Inst = dyn_cast<Instruction>(V);
1785 if (!Inst || Inst->getParent()->isEntryBlock())
1786 return true;
1787
1788 return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1789}
1790
1791/// Computes the symbolic difference between two de-composed GEPs.
1792void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1793 const DecomposedGEP &SrcGEP,
1794 const AAQueryInfo &AAQI) {
1795 DestGEP.Offset -= SrcGEP.Offset;
1796 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1797 // Find V in Dest. This is N^2, but pointer indices almost never have more
1798 // than a few variable indexes.
1799 bool Found = false;
1800 for (auto I : enumerate(DestGEP.VarIndices)) {
1801 VariableGEPIndex &Dest = I.value();
1802 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1803 !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1804 !Dest.Val.hasSameCastsAs(Src.Val))
1805 continue;
1806
1807 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1808 if (Dest.IsNegated) {
1809 Dest.Scale = -Dest.Scale;
1810 Dest.IsNegated = false;
1811 Dest.IsNSW = false;
1812 }
1813
1814 // If we found it, subtract off Scale V's from the entry in Dest. If it
1815 // goes to zero, remove the entry.
1816 if (Dest.Scale != Src.Scale) {
1817 Dest.Scale -= Src.Scale;
1818 Dest.IsNSW = false;
1819 } else {
1820 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1821 }
1822 Found = true;
1823 break;
1824 }
1825
1826 // If we didn't consume this entry, add it to the end of the Dest list.
1827 if (!Found) {
1828 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1829 /* IsNegated */ true};
1830 DestGEP.VarIndices.push_back(Entry);
1831 }
1832 }
1833}
1834
1835bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1836 LocationSize MaybeV1Size,
1837 LocationSize MaybeV2Size,
1838 AssumptionCache *AC,
1839 DominatorTree *DT,
1840 const AAQueryInfo &AAQI) {
1841 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1842 !MaybeV2Size.hasValue())
1843 return false;
1844
1845 const uint64_t V1Size = MaybeV1Size.getValue();
1846 const uint64_t V2Size = MaybeV2Size.getValue();
1847
1848 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1849
1850 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1851 !Var0.hasNegatedScaleOf(Var1) ||
1852 Var0.Val.V->getType() != Var1.Val.V->getType())
1853 return false;
1854
1855 // We'll strip off the Extensions of Var0 and Var1 and do another round
1856 // of GetLinearExpression decomposition. In the example above, if Var0
1857 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1858
1859 LinearExpression E0 =
1860 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1861 LinearExpression E1 =
1862 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1863 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1864 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1865 return false;
1866
1867 // We have a hit - Var0 and Var1 only differ by a constant offset!
1868
1869 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1870 // Var1 is possible to calculate, but we're just interested in the absolute
1871 // minimum difference between the two. The minimum distance may occur due to
1872 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1873 // the minimum distance between %i and %i + 5 is 3.
1874 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1875 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1876 APInt MinDiffBytes =
1877 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1878
1879 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1880 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1881 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1882 // V2Size can fit in the MinDiffBytes gap.
1883 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1884 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1885}
1886
1887//===----------------------------------------------------------------------===//
1888// BasicAliasAnalysis Pass
1889//===----------------------------------------------------------------------===//
1890
1891AnalysisKey BasicAA::Key;
1892
1894 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1895 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1896 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1897 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1898}
1899
1902}
1903
1904char BasicAAWrapperPass::ID = 0;
1905
1906void BasicAAWrapperPass::anchor() {}
1907
1909 "Basic Alias Analysis (stateless AA impl)", true, true)
1914 "Basic Alias Analysis (stateless AA impl)", true, true)
1915
1917 return new BasicAAWrapperPass();
1918}
1919
1921 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1922 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1923 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1924
1925 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1926 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1927 &DTWP.getDomTree()));
1928
1929 return false;
1930}
1931
1933 AU.setPreservesAll();
1937}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S1
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static void adjustToIndexSize(APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
basic Basic Alias true
static const unsigned MaxLookupSearchDepth
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1290
Hexagon Common GEP
#define _
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
CaptureInfo * CI
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasCacheT AliasCache
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1977
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:184
APInt abs() const
Get the absolute value.
Definition: APInt.h:1737
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1589
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1742
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1966
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:312
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1199
The possible results of an alias query.
Definition: AliasAnalysis.h:81
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:99
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:387
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:405
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:429
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:551
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2326
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
A constant pointer value that points to null.
Definition: Constants.h:547
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
NodeT * getRoot() const
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:420
Type * getSourceElementType() const
Definition: Operator.cpp:60
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:464
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
const BasicBlock * getParent() const
Definition: Instruction.h:151
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
bool hasValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
bool isZero() const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
This class represents the LLVM 'select' instruction.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
size_type size() const
Definition: SmallPtrSet.h:94
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Class to represent struct types.
Definition: DerivedTypes.h:216
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_iterator op_begin()
Definition: User.h:234
Value * getOperand(unsigned i) const
Definition: User.h:169
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:709
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2188
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:767
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
VScaleVal_match m_VScale()
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:456
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2415
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition: CFG.cpp:133
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2082
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2014
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
Definition: ModRef.h:45
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
virtual ~CaptureInfo()=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1350
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Definition: InstrTypes.h:1369
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1351
A utility class that uses RAII to save and restore the value of a variable.