64#define DEBUG_TYPE "basicaa"
78STATISTIC(SearchLimitReached,
"Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
109 bool RoundToAlign =
false) {
124 bool NullIsValidLoc) {
156 std::optional<TypeSize> ObjectSize =
getObjectSize(V,
DL, TLI, NullIsValidLoc,
168 bool NullIsValidLoc) {
173 bool CanBeNull, CanBeFreed;
175 V.getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
176 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
187 std::optional<TypeSize> ObjectSize =
189 return ObjectSize && *ObjectSize ==
Size;
213 return Succs.
empty() ||
222 auto Iter = EarliestEscapes.insert({Object,
nullptr});
227 if (EarliestCapture) {
228 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
229 Ins.first->second.push_back(Object);
231 Iter.first->second = EarliestCapture;
235 if (!Iter.first->second)
242 if (
I == Iter.first->second) {
252 auto Iter = Inst2Obj.find(
I);
253 if (Iter != Inst2Obj.end()) {
254 for (
const Value *Obj : Iter->second)
255 EarliestEscapes.erase(Obj);
268 unsigned ZExtBits = 0;
269 unsigned SExtBits = 0;
270 unsigned TruncBits = 0;
272 bool IsNonNegative =
false;
274 explicit CastedValue(
const Value *V) : V(V) {}
275 explicit CastedValue(
const Value *V,
unsigned ZExtBits,
unsigned SExtBits,
276 unsigned TruncBits,
bool IsNonNegative)
277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
278 IsNonNegative(IsNonNegative) {}
281 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
285 CastedValue withValue(
const Value *NewV,
bool PreserveNonNeg)
const {
286 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
287 IsNonNegative && PreserveNonNeg);
291 CastedValue withZExtOfValue(
const Value *NewV,
bool ZExtNonNegative)
const {
292 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
294 if (ExtendBy <= TruncBits)
297 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
301 ExtendBy -= TruncBits;
306 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
311 CastedValue withSExtOfValue(
const Value *NewV)
const {
312 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
314 if (ExtendBy <= TruncBits)
317 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
321 ExtendBy -= TruncBits;
324 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
328 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
329 "Incompatible bit width");
330 if (TruncBits)
N =
N.trunc(
N.getBitWidth() - TruncBits);
331 if (SExtBits)
N =
N.sext(
N.getBitWidth() + SExtBits);
332 if (ZExtBits)
N =
N.zext(
N.getBitWidth() + ZExtBits);
337 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
338 "Incompatible bit width");
339 if (TruncBits)
N =
N.truncate(
N.getBitWidth() - TruncBits);
340 if (IsNonNegative && !
N.isAllNonNegative())
344 if (SExtBits)
N =
N.signExtend(
N.getBitWidth() + SExtBits);
345 if (ZExtBits)
N =
N.zeroExtend(
N.getBitWidth() + ZExtBits);
349 bool canDistributeOver(
bool NUW,
bool NSW)
const {
353 return (!ZExtBits || NUW) && (!SExtBits || NSW);
356 bool hasSameCastsAs(
const CastedValue &
Other)
const {
357 if (
V->getType() !=
Other.V->getType())
360 if (ZExtBits ==
Other.ZExtBits && SExtBits ==
Other.SExtBits &&
361 TruncBits ==
Other.TruncBits)
365 if (IsNonNegative ||
Other.IsNonNegative)
366 return (ZExtBits + SExtBits ==
Other.ZExtBits +
Other.SExtBits &&
367 TruncBits ==
Other.TruncBits);
373struct LinearExpression {
381 LinearExpression(
const CastedValue &Val,
const APInt &Scale,
383 : Val(Val), Scale(Scale),
Offset(
Offset), IsNSW(IsNSW) {}
385 LinearExpression(
const CastedValue &Val) : Val(Val), IsNSW(
true) {
386 unsigned BitWidth = Val.getBitWidth();
391 LinearExpression mul(
const APInt &
Other,
bool MulIsNSW)
const {
394 bool NSW = IsNSW && (
Other.isOne() || (MulIsNSW &&
Offset.isZero()));
409 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
410 return LinearExpression(Val,
APInt(Val.getBitWidth(), 0),
411 Val.evaluateWith(Const->getValue()),
true);
413 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
414 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
415 APInt RHS = Val.evaluateWith(RHSC->getValue());
418 bool NUW =
true, NSW =
true;
419 if (isa<OverflowingBinaryOperator>(BOp)) {
420 NUW &= BOp->hasNoUnsignedWrap();
421 NSW &= BOp->hasNoSignedWrap();
423 if (!Val.canDistributeOver(NUW, NSW))
431 LinearExpression E(Val);
432 switch (BOp->getOpcode()) {
437 case Instruction::Or:
439 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
443 case Instruction::Add: {
450 case Instruction::Sub: {
457 case Instruction::Mul:
462 case Instruction::Shl:
468 if (
RHS.getLimitedValue() > Val.getBitWidth())
473 E.Offset <<=
RHS.getLimitedValue();
474 E.Scale <<=
RHS.getLimitedValue();
482 if (
const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
484 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()),
DL,
487 if (isa<SExtInst>(Val.V))
489 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
501 assert(IndexSize <=
Offset.getBitWidth() &&
"Invalid IndexSize!");
502 unsigned ShiftBits =
Offset.getBitWidth() - IndexSize;
503 if (ShiftBits != 0) {
505 Offset.ashrInPlace(ShiftBits);
512struct VariableGEPIndex {
527 bool hasNegatedScaleOf(
const VariableGEPIndex &
Other)
const {
528 if (IsNegated ==
Other.IsNegated)
529 return Scale == -
Other.Scale;
530 return Scale ==
Other.Scale;
538 OS <<
"(V=" << Val.V->getName()
539 <<
", zextbits=" << Val.ZExtBits
540 <<
", sextbits=" << Val.SExtBits
541 <<
", truncbits=" << Val.TruncBits
542 <<
", scale=" << Scale
544 <<
", negated=" << IsNegated <<
")";
593 const Instruction *CxtI = dyn_cast<Instruction>(V);
595 unsigned MaxIndexSize =
DL.getMaxIndexSizeInBits();
596 DecomposedGEP Decomposed;
597 Decomposed.Offset =
APInt(MaxIndexSize, 0);
603 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
604 if (!GA->isInterposable()) {
605 V = GA->getAliasee();
613 if (
Op->getOpcode() == Instruction::BitCast ||
614 Op->getOpcode() == Instruction::AddrSpaceCast) {
615 V =
Op->getOperand(0);
621 if (
const auto *
PHI = dyn_cast<PHINode>(V)) {
623 if (
PHI->getNumIncomingValues() == 1) {
624 V =
PHI->getIncomingValue(0);
627 }
else if (
const auto *Call = dyn_cast<CallBase>(V)) {
655 unsigned IndexSize =
DL.getIndexSizeInBits(AS);
657 bool GepHasConstantOffset =
true;
659 I != E; ++
I, ++GTI) {
664 unsigned FieldNo = cast<ConstantInt>(
Index)->getZExtValue();
668 Decomposed.Offset +=
DL.getStructLayout(STy)->getElementOffset(FieldNo);
685 CIdx->getValue().sextOrTrunc(MaxIndexSize);
695 GepHasConstantOffset =
false;
701 unsigned Width =
Index->getType()->getIntegerBitWidth();
702 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
703 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
705 CastedValue(
Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
710 Decomposed.Offset +=
LE.Offset.sext(MaxIndexSize);
711 APInt Scale =
LE.Scale.sext(MaxIndexSize);
717 for (
unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
718 if ((Decomposed.VarIndices[i].Val.V ==
LE.Val.V ||
720 Decomposed.VarIndices[i].Val.hasSameCastsAs(
LE.Val)) {
721 Scale += Decomposed.VarIndices[i].Scale;
723 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
733 VariableGEPIndex
Entry = {
LE.Val, Scale, CxtI,
LE.IsNSW,
735 Decomposed.VarIndices.push_back(Entry);
740 if (GepHasConstantOffset)
745 }
while (--MaxLookup);
749 SearchLimitReached++;
756 assert(Visited.empty() &&
"Visited must be cleared after use!");
759 unsigned MaxLookup = 8;
766 if (!Visited.insert(V).second)
770 if (IgnoreLocals && isa<AllocaInst>(V))
779 if (
const Argument *Arg = dyn_cast<Argument>(V)) {
780 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
791 if (!GV->isConstant())
797 if (
const SelectInst *SI = dyn_cast<SelectInst>(V)) {
805 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
807 if (PN->getNumIncomingValues() > MaxLookup)
815 }
while (!Worklist.
empty() && --MaxLookup);
818 if (!Worklist.
empty())
826 return II &&
II->getIntrinsicID() == IID;
832 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
834 if (
const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
838 if (Call->hasReadingOperandBundles())
840 if (Call->hasClobberingOperandBundles())
851 switch (
F->getIntrinsicID()) {
852 case Intrinsic::experimental_guard:
853 case Intrinsic::experimental_deoptimize:
860 return F->getMemoryEffects();
865 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
868 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
871 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
879 if (
const Instruction *inst = dyn_cast<Instruction>(V)) {
880 if (!inst->getParent())
885 if (
const Argument *arg = dyn_cast<Argument>(V))
886 return arg->getParent();
896 return !F1 || !F2 || F1 == F2;
904 "BasicAliasAnalysis doesn't support interprocedural queries.");
905 return aliasCheck(LocA.
Ptr, LocA.
Size, LocB.
Ptr, LocB.
Size, AAQI, CtxI);
918 "AliasAnalysis query involving multiple functions!");
927 if (isa<AllocaInst>(Object))
928 if (
const CallInst *CI = dyn_cast<CallInst>(Call))
929 if (CI->isTailCall() &&
930 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
935 if (
auto *AI = dyn_cast<AllocaInst>(Object))
936 if (!AI->isStaticAlloca() &&
isIntrinsicCall(Call, Intrinsic::stackrestore))
944 if (!isa<Constant>(Object) && Call != Object &&
951 unsigned OperandNo = 0;
952 for (
auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
953 CI != CE; ++CI, ++OperandNo) {
954 if (!(*CI)->getType()->isPointerTy())
959 if (Call->doesNotAccessMemory(OperandNo))
972 if (Call->onlyReadsMemory(OperandNo)) {
977 if (Call->onlyWritesMemory(OperandNo)) {
1075 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1091 if (!isa<GEPOperator>(V2))
1104 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1105 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1108 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1112 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1120 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1127 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1129 DecompGEP1.Offset.sge(V2Size.
getValue()) &&
1134 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1136 DecompGEP1.Offset.sle(-V1Size.
getValue()) &&
1142 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1163 if (DecompGEP1.VarIndices.empty()) {
1169 const bool Swapped =
Off.isNegative();
1187 if (
Off.ult(LSize)) {
1192 Off.ule(INT32_MAX) && (Off + VRightSize.
getValue()).ule(LSize)) {
1208 if (!Overflow &&
Off.uge(UpperRange))
1216 if (DecompGEP1.VarIndices.size() == 1 &&
1217 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1218 DecompGEP1.Offset.isZero() &&
1221 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1223 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1228 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1253 if (!DecompGEP1.VarIndices.empty() &&
1254 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.
hasValue() &&
1268 for (
unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1269 const VariableGEPIndex &
Index = DecompGEP1.VarIndices[i];
1271 APInt ScaleForGCD = Scale;
1277 GCD = ScaleForGCD.
abs();
1282 true, &AC,
Index.CxtI);
1291 "Bit widths are normalized to MaxIndexSize");
1297 if (
Index.IsNegated)
1298 OffsetRange = OffsetRange.
sub(CR);
1300 OffsetRange = OffsetRange.
add(CR);
1309 APInt ModOffset = DecompGEP1.Offset.
srem(GCD);
1313 (GCD - ModOffset).uge(V1Size.
getValue()))
1328 std::optional<APInt> MinAbsVarIndex;
1329 if (DecompGEP1.VarIndices.size() == 1) {
1331 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1332 if (Var.Val.TruncBits == 0 &&
1336 auto MultiplyByScaleNoWrap = [](
const VariableGEPIndex &Var) {
1340 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1344 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1345 if (MaxScaleValueBW <= 0)
1347 return Var.Scale.ule(
1352 if (MultiplyByScaleNoWrap(Var)) {
1354 MinAbsVarIndex = Var.Scale.abs();
1357 }
else if (DecompGEP1.VarIndices.size() == 2) {
1362 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1363 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1364 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1368 MinAbsVarIndex = Var0.Scale.abs();
1371 if (MinAbsVarIndex) {
1373 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1374 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1381 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1410 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1411 if (isValueEqualInPotentialCycles(
SI->getCondition(), SI2->getCondition(),
1447 if (
const PHINode *PN2 = dyn_cast<PHINode>(V2))
1448 if (PN2->getParent() == PN->
getParent()) {
1449 std::optional<AliasResult> Alias;
1470 bool isRecursive =
false;
1471 auto CheckForRecPhi = [&](
Value *PV) {
1482 Value *OnePhi =
nullptr;
1488 if (isa<PHINode>(PV1)) {
1489 if (OnePhi && OnePhi != PV1) {
1500 if (CheckForRecPhi(PV1))
1503 if (UniqueSrc.
insert(PV1).second)
1507 if (OnePhi && UniqueSrc.
size() > 1)
1542 for (
unsigned i = 1, e = V1Srcs.
size(); i != e; ++i) {
1568 V2 =
V2->stripPointerCastsForAliasAnalysis();
1572 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1581 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1621 O2, dyn_cast<Instruction>(O1),
true))
1624 O1, dyn_cast<Instruction>(O2),
true))
1633 TLI, NullIsValidLocation)) ||
1636 TLI, NullIsValidLocation)))
1646 if (OBU.
getTagName() ==
"separate_storage") {
1656 auto ValidAssumeForPtrContext = [&](
const Value *
Ptr) {
1661 if (
const Argument *PtrA = dyn_cast<Argument>(
Ptr)) {
1663 &*PtrA->
getParent()->getEntryBlock().begin();
1670 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1676 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1700 if (AAQI.
Depth >= 512)
1709 const bool Swapped = V1 >
V2;
1715 auto &
Entry = Pair.first->second;
1716 if (!
Entry.isDefinitive()) {
1721 if (
Entry.isAssumption())
1722 ++
Entry.NumAssumptionUses;
1733 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1737 auto &
Entry = It->second;
1740 bool AssumptionDisproven =
1742 if (AssumptionDisproven)
1749 Entry.Result.swap(Swapped);
1754 if (AssumptionDisproven)
1770 if (AAQI.
Depth == 1) {
1788 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1792 }
else if (
const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1799 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1803 }
else if (
const PHINode *PN = dyn_cast<PHINode>(V2)) {
1814 }
else if (
const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1840bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1851 const Instruction *Inst = dyn_cast<Instruction>(V);
1852 if (!Inst || Inst->
getParent()->isEntryBlock())
1859void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1860 const DecomposedGEP &SrcGEP,
1864 if (DestGEP.Offset.ult(SrcGEP.Offset))
1865 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1867 DestGEP.Offset -= SrcGEP.Offset;
1868 for (
const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1872 for (
auto I :
enumerate(DestGEP.VarIndices)) {
1873 VariableGEPIndex &Dest =
I.value();
1874 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1876 !Dest.Val.hasSameCastsAs(Src.Val))
1880 if (Dest.IsNegated) {
1881 Dest.Scale = -Dest.Scale;
1882 Dest.IsNegated =
false;
1888 if (Dest.Scale != Src.Scale) {
1891 if (Dest.Scale.ult(Src.Scale))
1892 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1894 Dest.Scale -= Src.Scale;
1897 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +
I.index());
1905 VariableGEPIndex
Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1907 DestGEP.VarIndices.push_back(Entry);
1910 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1915bool BasicAAResult::constantOffsetHeuristic(
const DecomposedGEP &
GEP,
1921 if (
GEP.VarIndices.size() != 2 || !MaybeV1Size.
hasValue() ||
1928 const VariableGEPIndex &Var0 =
GEP.VarIndices[0], &Var1 =
GEP.VarIndices[1];
1930 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1931 !Var0.hasNegatedScaleOf(Var1) ||
1932 Var0.Val.V->getType() != Var1.Val.V->getType())
1939 LinearExpression E0 =
1941 LinearExpression E1 =
1943 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1944 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1954 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1956 APInt MinDiffBytes =
1957 MinDiff.
zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.
abs();
1963 return MinDiffBytes.
uge(V1Size +
GEP.Offset.abs()) &&
1964 MinDiffBytes.
uge(V2Size +
GEP.Offset.abs());
1986void BasicAAWrapperPass::anchor() {}
1989 "Basic Alias Analysis (stateless AA impl)",
true,
true)
2001 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2002 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2003 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2006 TLIWP.getTLI(
F), ACT.getAssumptionCache(
F),
2007 &DTWP.getDomTree()));
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static void adjustToIndexSize(APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
static const unsigned MaxLookupSearchDepth
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt zext(unsigned width) const
Zero extend to a new width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
A constant pointer value that points to null.
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
bool erase(const KeyT &Val)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags all()
bool hasNoUnsignedWrap() const
bool hasNoUnsignedSignedWrap() const
Type * getSourceElementType() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Module * getParent()
Get the module that this global value is contained inside of...
A wrapper class for inspecting calls to intrinsic functions.
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
This class represents the LLVM 'select' instruction.
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
bool match(Val *V, const Pattern &P)
VScaleVal_match m_VScale()
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
static constexpr int Definitive
Cache entry is neither an assumption nor does it use a (non-definitive) assumption.
static constexpr int AssumptionBased
Cache entry is not an assumption itself, but may be using an assumption from higher up the stack.
A special type used by analysis passes to provide an address that identifies that particular analysis...
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
A utility class that uses RAII to save and restore the value of a variable.