64#define DEBUG_TYPE "basicaa"
78STATISTIC(SearchLimitReached,
"Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
109 bool RoundToAlign =
false) {
124 bool NullIsValidLoc) {
156 std::optional<TypeSize> ObjectSize =
getObjectSize(V,
DL, TLI, NullIsValidLoc,
168 bool NullIsValidLoc) {
173 bool CanBeNull, CanBeFreed;
175 V.getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
176 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
187 std::optional<TypeSize> ObjectSize =
189 return ObjectSize && *ObjectSize ==
Size;
213 return Succs.
empty() ||
222 auto Iter = EarliestEscapes.insert({Object,
nullptr});
227 if (EarliestCapture) {
228 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
229 Ins.first->second.push_back(Object);
231 Iter.first->second = EarliestCapture;
235 if (!Iter.first->second)
242 if (
I == Iter.first->second) {
252 auto Iter = Inst2Obj.find(
I);
253 if (Iter != Inst2Obj.end()) {
254 for (
const Value *Obj : Iter->second)
255 EarliestEscapes.erase(Obj);
268 unsigned ZExtBits = 0;
269 unsigned SExtBits = 0;
270 unsigned TruncBits = 0;
272 bool IsNonNegative =
false;
274 explicit CastedValue(
const Value *V) : V(V) {}
275 explicit CastedValue(
const Value *V,
unsigned ZExtBits,
unsigned SExtBits,
276 unsigned TruncBits,
bool IsNonNegative)
277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
278 IsNonNegative(IsNonNegative) {}
281 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
285 CastedValue withValue(
const Value *NewV,
bool PreserveNonNeg)
const {
286 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
287 IsNonNegative && PreserveNonNeg);
291 CastedValue withZExtOfValue(
const Value *NewV,
bool ZExtNonNegative)
const {
292 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
294 if (ExtendBy <= TruncBits)
297 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
301 ExtendBy -= TruncBits;
306 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
311 CastedValue withSExtOfValue(
const Value *NewV)
const {
312 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
314 if (ExtendBy <= TruncBits)
317 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
321 ExtendBy -= TruncBits;
324 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
328 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
329 "Incompatible bit width");
330 if (TruncBits)
N =
N.trunc(
N.getBitWidth() - TruncBits);
331 if (SExtBits)
N =
N.sext(
N.getBitWidth() + SExtBits);
332 if (ZExtBits)
N =
N.zext(
N.getBitWidth() + ZExtBits);
337 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
338 "Incompatible bit width");
339 if (TruncBits)
N =
N.truncate(
N.getBitWidth() - TruncBits);
340 if (SExtBits)
N =
N.signExtend(
N.getBitWidth() + SExtBits);
341 if (ZExtBits)
N =
N.zeroExtend(
N.getBitWidth() + ZExtBits);
345 bool canDistributeOver(
bool NUW,
bool NSW)
const {
349 return (!ZExtBits || NUW) && (!SExtBits || NSW);
352 bool hasSameCastsAs(
const CastedValue &
Other)
const {
353 if (ZExtBits ==
Other.ZExtBits && SExtBits ==
Other.SExtBits &&
354 TruncBits ==
Other.TruncBits)
358 if (IsNonNegative ||
Other.IsNonNegative)
359 return (ZExtBits + SExtBits ==
Other.ZExtBits +
Other.SExtBits &&
360 TruncBits ==
Other.TruncBits);
366struct LinearExpression {
374 LinearExpression(
const CastedValue &Val,
const APInt &Scale,
376 : Val(Val), Scale(Scale),
Offset(
Offset), IsNSW(IsNSW) {}
378 LinearExpression(
const CastedValue &Val) : Val(Val), IsNSW(
true) {
379 unsigned BitWidth = Val.getBitWidth();
384 LinearExpression mul(
const APInt &
Other,
bool MulIsNSW)
const {
387 bool NSW = IsNSW && (
Other.isOne() || (MulIsNSW &&
Offset.isZero()));
402 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
403 return LinearExpression(Val,
APInt(Val.getBitWidth(), 0),
404 Val.evaluateWith(Const->getValue()),
true);
406 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
407 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
408 APInt RHS = Val.evaluateWith(RHSC->getValue());
411 bool NUW =
true, NSW =
true;
412 if (isa<OverflowingBinaryOperator>(BOp)) {
413 NUW &= BOp->hasNoUnsignedWrap();
414 NSW &= BOp->hasNoSignedWrap();
416 if (!Val.canDistributeOver(NUW, NSW))
424 LinearExpression E(Val);
425 switch (BOp->getOpcode()) {
430 case Instruction::Or:
432 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
436 case Instruction::Add: {
443 case Instruction::Sub: {
450 case Instruction::Mul:
455 case Instruction::Shl:
461 if (
RHS.getLimitedValue() > Val.getBitWidth())
466 E.Offset <<=
RHS.getLimitedValue();
467 E.Scale <<=
RHS.getLimitedValue();
475 if (
const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
477 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()),
DL,
480 if (isa<SExtInst>(Val.V))
482 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
494 assert(IndexSize <=
Offset.getBitWidth() &&
"Invalid IndexSize!");
495 unsigned ShiftBits =
Offset.getBitWidth() - IndexSize;
496 if (ShiftBits != 0) {
498 Offset.ashrInPlace(ShiftBits);
505struct VariableGEPIndex {
520 bool hasNegatedScaleOf(
const VariableGEPIndex &
Other)
const {
521 if (IsNegated ==
Other.IsNegated)
522 return Scale == -
Other.Scale;
523 return Scale ==
Other.Scale;
531 OS <<
"(V=" << Val.V->getName()
532 <<
", zextbits=" << Val.ZExtBits
533 <<
", sextbits=" << Val.SExtBits
534 <<
", truncbits=" << Val.TruncBits
535 <<
", scale=" << Scale
537 <<
", negated=" << IsNegated <<
")";
586 const Instruction *CxtI = dyn_cast<Instruction>(V);
588 unsigned MaxIndexSize =
DL.getMaxIndexSizeInBits();
589 DecomposedGEP Decomposed;
590 Decomposed.Offset =
APInt(MaxIndexSize, 0);
596 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
597 if (!GA->isInterposable()) {
598 V = GA->getAliasee();
606 if (
Op->getOpcode() == Instruction::BitCast ||
607 Op->getOpcode() == Instruction::AddrSpaceCast) {
608 V =
Op->getOperand(0);
614 if (
const auto *
PHI = dyn_cast<PHINode>(V)) {
616 if (
PHI->getNumIncomingValues() == 1) {
617 V =
PHI->getIncomingValue(0);
620 }
else if (
const auto *Call = dyn_cast<CallBase>(V)) {
642 if (Decomposed.InBounds == std::nullopt)
645 Decomposed.InBounds =
false;
652 unsigned IndexSize =
DL.getIndexSizeInBits(AS);
654 bool GepHasConstantOffset =
true;
656 I != E; ++
I, ++GTI) {
661 unsigned FieldNo = cast<ConstantInt>(
Index)->getZExtValue();
665 Decomposed.Offset +=
DL.getStructLayout(STy)->getElementOffset(FieldNo);
682 CIdx->getValue().sextOrTrunc(MaxIndexSize);
692 GepHasConstantOffset =
false;
696 unsigned Width =
Index->getType()->getIntegerBitWidth();
697 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
698 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
700 CastedValue(
Index, 0, SExtBits, TruncBits,
false), DL, 0, AC, DT);
705 Decomposed.Offset +=
LE.Offset.sext(MaxIndexSize);
706 APInt Scale =
LE.Scale.sext(MaxIndexSize);
712 for (
unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
713 if ((Decomposed.VarIndices[i].Val.V ==
LE.Val.V ||
715 Decomposed.VarIndices[i].Val.hasSameCastsAs(
LE.Val)) {
716 Scale += Decomposed.VarIndices[i].Scale;
718 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
728 VariableGEPIndex
Entry = {
LE.Val, Scale, CxtI,
LE.IsNSW,
730 Decomposed.VarIndices.push_back(Entry);
735 if (GepHasConstantOffset)
740 }
while (--MaxLookup);
744 SearchLimitReached++;
751 assert(Visited.empty() &&
"Visited must be cleared after use!");
754 unsigned MaxLookup = 8;
761 if (!Visited.insert(V).second)
765 if (IgnoreLocals && isa<AllocaInst>(V))
774 if (
const Argument *Arg = dyn_cast<Argument>(V)) {
775 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
786 if (!GV->isConstant())
792 if (
const SelectInst *SI = dyn_cast<SelectInst>(V)) {
800 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
802 if (PN->getNumIncomingValues() > MaxLookup)
810 }
while (!Worklist.
empty() && --MaxLookup);
813 if (!Worklist.
empty())
821 return II &&
II->getIntrinsicID() == IID;
827 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
829 if (
const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
833 if (Call->hasReadingOperandBundles())
835 if (Call->hasClobberingOperandBundles())
846 switch (
F->getIntrinsicID()) {
847 case Intrinsic::experimental_guard:
848 case Intrinsic::experimental_deoptimize:
855 return F->getMemoryEffects();
860 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
863 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
866 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
874 if (
const Instruction *inst = dyn_cast<Instruction>(V)) {
875 if (!inst->getParent())
880 if (
const Argument *arg = dyn_cast<Argument>(V))
881 return arg->getParent();
891 return !F1 || !F2 || F1 == F2;
899 "BasicAliasAnalysis doesn't support interprocedural queries.");
900 return aliasCheck(LocA.
Ptr, LocA.
Size, LocB.
Ptr, LocB.
Size, AAQI, CtxI);
913 "AliasAnalysis query involving multiple functions!");
922 if (isa<AllocaInst>(Object))
923 if (
const CallInst *CI = dyn_cast<CallInst>(Call))
924 if (CI->isTailCall() &&
925 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
930 if (
auto *AI = dyn_cast<AllocaInst>(Object))
931 if (!AI->isStaticAlloca() &&
isIntrinsicCall(Call, Intrinsic::stackrestore))
939 if (!isa<Constant>(Object) && Call != Object &&
946 unsigned OperandNo = 0;
947 for (
auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
948 CI != CE; ++CI, ++OperandNo) {
949 if (!(*CI)->getType()->isPointerTy())
954 if (Call->doesNotAccessMemory(OperandNo))
967 if (Call->onlyReadsMemory(OperandNo)) {
972 if (Call->onlyWritesMemory(OperandNo)) {
1070 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1086 if (!isa<GEPOperator>(V2))
1099 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1100 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1103 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1108 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1114 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1116 DecompGEP1.Offset.sge(V2Size.
getValue()) &&
1120 if (isa<GEPOperator>(V2)) {
1122 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1124 DecompGEP1.Offset.sle(-V1Size.
getValue()) &&
1131 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1152 if (DecompGEP1.VarIndices.empty()) {
1157 const Value *RightPtr = GEP1;
1160 const bool Swapped =
Off.isNegative();
1179 if (
Off.ult(LSize)) {
1184 Off.ule(INT32_MAX) && (Off + VRightSize.
getValue()).ule(LSize)) {
1200 if (!Overflow &&
Off.uge(UpperRange))
1208 if (DecompGEP1.VarIndices.size() == 1 &&
1209 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1210 DecompGEP1.Offset.isZero() &&
1213 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1215 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1220 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1246 for (
unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1247 const VariableGEPIndex &
Index = DecompGEP1.VarIndices[i];
1249 APInt ScaleForGCD = Scale;
1255 GCD = ScaleForGCD.
abs();
1260 true, &AC,
Index.CxtI);
1269 "Bit widths are normalized to MaxIndexSize");
1275 if (
Index.IsNegated)
1276 OffsetRange = OffsetRange.
sub(CR);
1278 OffsetRange = OffsetRange.
add(CR);
1287 APInt ModOffset = DecompGEP1.Offset.
srem(GCD);
1291 (GCD - ModOffset).uge(V1Size.
getValue()))
1306 std::optional<APInt> MinAbsVarIndex;
1307 if (DecompGEP1.VarIndices.size() == 1) {
1309 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1310 if (Var.Val.TruncBits == 0 &&
1314 auto MultiplyByScaleNoWrap = [](
const VariableGEPIndex &Var) {
1318 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1322 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1323 if (MaxScaleValueBW <= 0)
1325 return Var.Scale.ule(
1330 if (MultiplyByScaleNoWrap(Var)) {
1332 MinAbsVarIndex = Var.Scale.abs();
1335 }
else if (DecompGEP1.VarIndices.size() == 2) {
1340 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1341 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1342 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1346 MinAbsVarIndex = Var0.Scale.abs();
1349 if (MinAbsVarIndex) {
1351 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1352 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1359 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1388 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1389 if (isValueEqualInPotentialCycles(
SI->getCondition(), SI2->getCondition(),
1425 if (
const PHINode *PN2 = dyn_cast<PHINode>(V2))
1426 if (PN2->getParent() == PN->
getParent()) {
1427 std::optional<AliasResult> Alias;
1448 bool isRecursive =
false;
1449 auto CheckForRecPhi = [&](
Value *PV) {
1460 Value *OnePhi =
nullptr;
1466 if (isa<PHINode>(PV1)) {
1467 if (OnePhi && OnePhi != PV1) {
1478 if (CheckForRecPhi(PV1))
1481 if (UniqueSrc.
insert(PV1).second)
1485 if (OnePhi && UniqueSrc.
size() > 1)
1520 for (
unsigned i = 1, e = V1Srcs.
size(); i != e; ++i) {
1546 V2 =
V2->stripPointerCastsForAliasAnalysis();
1550 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1559 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1599 O2, dyn_cast<Instruction>(O1),
true))
1602 O1, dyn_cast<Instruction>(O2),
true))
1611 TLI, NullIsValidLocation)) ||
1614 TLI, NullIsValidLocation)))
1624 if (OBU.
getTagName() ==
"separate_storage") {
1634 auto ValidAssumeForPtrContext = [&](
const Value *
Ptr) {
1639 if (
const Argument *PtrA = dyn_cast<Argument>(
Ptr)) {
1641 &*PtrA->
getParent()->getEntryBlock().begin();
1648 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1654 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1678 if (AAQI.
Depth >= 512)
1687 const bool Swapped = V1 >
V2;
1693 auto &
Entry = Pair.first->second;
1694 if (!
Entry.isDefinitive()) {
1696 ++
Entry.NumAssumptionUses;
1708 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1712 auto &
Entry = It->second;
1715 bool AssumptionDisproven =
1717 if (AssumptionDisproven)
1724 Entry.Result.swap(Swapped);
1725 Entry.NumAssumptionUses = -1;
1730 if (AssumptionDisproven)
1746 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1750 }
else if (
const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1757 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1761 }
else if (
const PHINode *PN = dyn_cast<PHINode>(V2)) {
1772 }
else if (
const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1798bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1809 const Instruction *Inst = dyn_cast<Instruction>(V);
1810 if (!Inst || Inst->
getParent()->isEntryBlock())
1817void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1818 const DecomposedGEP &SrcGEP,
1820 DestGEP.Offset -= SrcGEP.Offset;
1821 for (
const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1825 for (
auto I :
enumerate(DestGEP.VarIndices)) {
1826 VariableGEPIndex &Dest =
I.value();
1827 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1829 !Dest.Val.hasSameCastsAs(Src.Val))
1833 if (Dest.IsNegated) {
1834 Dest.Scale = -Dest.Scale;
1835 Dest.IsNegated =
false;
1841 if (Dest.Scale != Src.Scale) {
1842 Dest.Scale -= Src.Scale;
1845 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +
I.index());
1853 VariableGEPIndex
Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1855 DestGEP.VarIndices.push_back(Entry);
1860bool BasicAAResult::constantOffsetHeuristic(
const DecomposedGEP &
GEP,
1866 if (
GEP.VarIndices.size() != 2 || !MaybeV1Size.
hasValue() ||
1873 const VariableGEPIndex &Var0 =
GEP.VarIndices[0], &Var1 =
GEP.VarIndices[1];
1875 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1876 !Var0.hasNegatedScaleOf(Var1) ||
1877 Var0.Val.V->getType() != Var1.Val.V->getType())
1884 LinearExpression E0 =
1886 LinearExpression E1 =
1888 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1889 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1899 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1901 APInt MinDiffBytes =
1902 MinDiff.
zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.
abs();
1908 return MinDiffBytes.
uge(V1Size +
GEP.Offset.abs()) &&
1909 MinDiffBytes.
uge(V2Size +
GEP.Offset.abs());
1931void BasicAAWrapperPass::anchor() {}
1934 "Basic Alias Analysis (stateless AA impl)",
true,
true)
1946 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1947 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1948 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1951 TLIWP.getTLI(
F), ACT.getAssumptionCache(
F),
1952 &DTWP.getDomTree()));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static void adjustToIndexSize(APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
static const unsigned MaxLookupSearchDepth
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt zext(unsigned width) const
Zero extend to a new width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
A constant pointer value that points to null.
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
bool erase(const KeyT &Val)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Type * getSourceElementType() const
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Module * getParent()
Get the module that this global value is contained inside of...
A wrapper class for inspecting calls to intrinsic functions.
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
This class represents the LLVM 'select' instruction.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
bool match(Val *V, const Pattern &P)
VScaleVal_match m_VScale()
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
std::optional< bool > InBounds
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
A utility class that uses RAII to save and restore the value of a variable.