64#define DEBUG_TYPE "basicaa"
78STATISTIC(SearchLimitReached,
"Number of times the limit to "
79 "decompose GEPs is reached");
80STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
105 bool RoundToAlign =
false) {
121 bool NullIsValidLoc) {
147 std::optional<TypeSize> ObjectSize =
getObjectSize(V,
DL, TLI, NullIsValidLoc,
159 bool NullIsValidLoc) {
164 bool CanBeNull, CanBeFreed;
166 V.getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
167 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
178 std::optional<TypeSize> ObjectSize =
180 return ObjectSize && *ObjectSize ==
Size;
201 auto [CacheIt, Inserted] =
204 return CacheIt->second;
209 CacheIt->second = Ret;
217 return Succs.
empty() ||
227 auto Iter = EarliestEscapes.try_emplace(Object);
229 std::pair<Instruction *, CaptureComponents> EarliestCapture =
233 if (EarliestCapture.first)
234 Inst2Obj[EarliestCapture.first].push_back(Object);
235 Iter.first->second = EarliestCapture;
238 auto IsNotCapturedBefore = [&]() {
240 Instruction *CaptureInst = Iter.first->second.first;
248 if (
I == CaptureInst) {
256 if (IsNotCapturedBefore())
258 return Iter.first->second.second;
262 auto Iter = Inst2Obj.find(
I);
263 if (Iter != Inst2Obj.end()) {
264 for (
const Value *Obj : Iter->second)
265 EarliestEscapes.erase(Obj);
278 unsigned ZExtBits = 0;
279 unsigned SExtBits = 0;
280 unsigned TruncBits = 0;
282 bool IsNonNegative =
false;
284 explicit CastedValue(
const Value *V) : V(V) {}
285 explicit CastedValue(
const Value *V,
unsigned ZExtBits,
unsigned SExtBits,
286 unsigned TruncBits,
bool IsNonNegative)
287 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
288 IsNonNegative(IsNonNegative) {}
291 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
295 CastedValue withValue(
const Value *NewV,
bool PreserveNonNeg)
const {
296 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
297 IsNonNegative && PreserveNonNeg);
301 CastedValue withZExtOfValue(
const Value *NewV,
bool ZExtNonNegative)
const {
302 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
304 if (ExtendBy <= TruncBits)
307 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
311 ExtendBy -= TruncBits;
316 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
321 CastedValue withSExtOfValue(
const Value *NewV)
const {
322 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
324 if (ExtendBy <= TruncBits)
327 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
331 ExtendBy -= TruncBits;
334 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
338 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
339 "Incompatible bit width");
340 if (TruncBits)
N =
N.trunc(
N.getBitWidth() - TruncBits);
341 if (SExtBits)
N =
N.sext(
N.getBitWidth() + SExtBits);
342 if (ZExtBits)
N =
N.zext(
N.getBitWidth() + ZExtBits);
347 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
348 "Incompatible bit width");
349 if (TruncBits)
N =
N.truncate(
N.getBitWidth() - TruncBits);
350 if (IsNonNegative && !
N.isAllNonNegative())
354 if (SExtBits)
N =
N.signExtend(
N.getBitWidth() + SExtBits);
355 if (ZExtBits)
N =
N.zeroExtend(
N.getBitWidth() + ZExtBits);
359 bool canDistributeOver(
bool NUW,
bool NSW)
const {
363 return (!ZExtBits || NUW) && (!SExtBits || NSW);
366 bool hasSameCastsAs(
const CastedValue &
Other)
const {
367 if (
V->getType() !=
Other.V->getType())
370 if (ZExtBits ==
Other.ZExtBits && SExtBits ==
Other.SExtBits &&
371 TruncBits ==
Other.TruncBits)
375 if (IsNonNegative ||
Other.IsNonNegative)
376 return (ZExtBits + SExtBits ==
Other.ZExtBits +
Other.SExtBits &&
377 TruncBits ==
Other.TruncBits);
383struct LinearExpression {
393 LinearExpression(
const CastedValue &Val,
const APInt &Scale,
395 : Val(Val), Scale(Scale),
Offset(
Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
397 LinearExpression(
const CastedValue &Val)
398 : Val(Val), IsNUW(
true), IsNSW(
true) {
399 unsigned BitWidth = Val.getBitWidth();
404 LinearExpression mul(
const APInt &
Other,
bool MulIsNUW,
bool MulIsNSW)
const {
407 bool NSW = IsNSW && (
Other.isOne() || (MulIsNSW &&
Offset.isZero()));
408 bool NUW = IsNUW && (
Other.isOne() || MulIsNUW);
423 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
424 return LinearExpression(Val,
APInt(Val.getBitWidth(), 0),
425 Val.evaluateWith(Const->getValue()),
true,
true);
427 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
428 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
429 APInt RHS = Val.evaluateWith(RHSC->getValue());
432 bool NUW =
true, NSW =
true;
433 if (isa<OverflowingBinaryOperator>(BOp)) {
434 NUW &= BOp->hasNoUnsignedWrap();
435 NSW &= BOp->hasNoSignedWrap();
437 if (!Val.canDistributeOver(NUW, NSW))
445 LinearExpression E(Val);
446 switch (BOp->getOpcode()) {
451 case Instruction::Or:
453 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
457 case Instruction::Add: {
465 case Instruction::Sub: {
473 case Instruction::Mul:
478 case Instruction::Shl:
484 if (
RHS.getLimitedValue() > Val.getBitWidth())
489 E.Offset <<=
RHS.getLimitedValue();
490 E.Scale <<=
RHS.getLimitedValue();
499 if (
const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
501 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()),
DL,
504 if (isa<SExtInst>(Val.V))
506 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
515struct VariableGEPIndex {
530 bool hasNegatedScaleOf(
const VariableGEPIndex &
Other)
const {
531 if (IsNegated ==
Other.IsNegated)
532 return Scale == -
Other.Scale;
533 return Scale ==
Other.Scale;
541 OS <<
"(V=" << Val.V->getName()
542 <<
", zextbits=" << Val.ZExtBits
543 <<
", sextbits=" << Val.SExtBits
544 <<
", truncbits=" << Val.TruncBits
545 <<
", scale=" << Scale
547 <<
", negated=" << IsNegated <<
")";
596 const Instruction *CxtI = dyn_cast<Instruction>(V);
598 unsigned IndexSize =
DL.getIndexTypeSizeInBits(V->getType());
599 DecomposedGEP Decomposed;
600 Decomposed.Offset =
APInt(IndexSize, 0);
606 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
607 if (!GA->isInterposable()) {
608 V = GA->getAliasee();
616 if (
Op->getOpcode() == Instruction::BitCast ||
617 Op->getOpcode() == Instruction::AddrSpaceCast) {
618 Value *NewV =
Op->getOperand(0);
621 if (
DL.getIndexTypeSizeInBits(NewV->
getType()) != IndexSize) {
631 if (
const auto *
PHI = dyn_cast<PHINode>(V)) {
633 if (
PHI->getNumIncomingValues() == 1) {
634 V =
PHI->getIncomingValue(0);
637 }
else if (
const auto *Call = dyn_cast<CallBase>(V)) {
665 I != E; ++
I, ++GTI) {
670 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
674 Decomposed.Offset +=
DL.getStructLayout(STy)->getElementOffset(FieldNo);
679 if (
const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
691 CIdx->getValue().sextOrTrunc(IndexSize);
705 bool NonNeg = NUSW && NUW;
706 unsigned Width =
Index->getType()->getIntegerBitWidth();
707 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
708 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
710 CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
715 Decomposed.Offset +=
LE.Offset;
718 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
724 for (
unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
725 if ((Decomposed.VarIndices[i].Val.V ==
LE.Val.V ||
727 Decomposed.VarIndices[i].Val.hasSameCastsAs(
LE.Val)) {
728 Scale += Decomposed.VarIndices[i].Scale;
730 LE.IsNSW =
LE.IsNUW =
false;
731 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
737 VariableGEPIndex
Entry = {
LE.Val, Scale, CxtI,
LE.IsNSW,
739 Decomposed.VarIndices.push_back(Entry);
745 }
while (--MaxLookup);
749 SearchLimitReached++;
756 assert(Visited.empty() &&
"Visited must be cleared after use!");
759 unsigned MaxLookup = 8;
766 if (!Visited.insert(V).second)
770 if (IgnoreLocals && isa<AllocaInst>(V))
779 if (
const Argument *Arg = dyn_cast<Argument>(V)) {
780 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
791 if (!GV->isConstant())
797 if (
const SelectInst *SI = dyn_cast<SelectInst>(V)) {
805 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
807 if (PN->getNumIncomingValues() > MaxLookup)
815 }
while (!Worklist.
empty() && --MaxLookup);
818 if (!Worklist.
empty())
826 return II &&
II->getIntrinsicID() == IID;
832 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
834 if (
const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
838 if (Call->hasReadingOperandBundles())
840 if (Call->hasClobberingOperandBundles())
842 if (Call->isVolatile()) {
855 switch (
F->getIntrinsicID()) {
856 case Intrinsic::experimental_guard:
857 case Intrinsic::experimental_deoptimize:
864 return F->getMemoryEffects();
869 if (Call->doesNotAccessMemory(ArgIdx))
872 if (Call->onlyWritesMemory(ArgIdx))
875 if (Call->onlyReadsMemory(ArgIdx))
883 if (
const Instruction *inst = dyn_cast<Instruction>(V)) {
884 if (!inst->getParent())
889 if (
const Argument *arg = dyn_cast<Argument>(V))
890 return arg->getParent();
900 return !F1 || !F2 || F1 == F2;
908 "BasicAliasAnalysis doesn't support interprocedural queries.");
909 return aliasCheck(LocA.
Ptr, LocA.
Size, LocB.
Ptr, LocB.
Size, AAQI, CtxI);
922 "AliasAnalysis query involving multiple functions!");
931 if (isa<AllocaInst>(Object))
932 if (
const CallInst *CI = dyn_cast<CallInst>(Call))
933 if (CI->isTailCall() &&
934 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
939 if (
auto *AI = dyn_cast<AllocaInst>(Object))
940 if (!AI->isStaticAlloca() &&
isIntrinsicCall(Call, Intrinsic::stackrestore))
947 if (ME.doesNotAccessMemory())
961 if (
isModOrRefSet(OtherMR) && !isa<Constant>(Object) && Call != Object &&
962 (isa<AllocaInst>(Object) || !Call->hasFnAttr(Attribute::ReturnsTwice))) {
974 if ((ArgMR | OtherMR) != OtherMR) {
976 for (
const Use &U : Call->data_ops()) {
977 const Value *Arg = U;
980 unsigned ArgIdx = Call->getDataOperandNo(&U);
982 Call->isArgOperand(&U)
990 if (NewArgMR == ArgMR)
1082 auto BaseObjectsAlias = [&]() {
1093 if (!isa<GEPOperator>(V2))
1098 return BaseObjectsAlias();
1102 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1103 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1106 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1110 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1111 return BaseObjectsAlias();
1114 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1122 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1129 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1131 DecompGEP1.Offset.sge(V2Size.
getValue()) &&
1136 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1138 DecompGEP1.Offset.sle(-V1Size.
getValue()) &&
1144 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1165 if (DecompGEP1.VarIndices.empty()) {
1171 const bool Swapped =
Off.isNegative();
1189 if (
Off.ult(LSize)) {
1194 Off.ule(INT32_MAX) && (Off + VRightSize.
getValue()).ule(LSize)) {
1210 if (!Overflow &&
Off.uge(UpperRange))
1218 if (DecompGEP1.VarIndices.size() == 1 &&
1219 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1220 DecompGEP1.Offset.isZero() &&
1223 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1225 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1230 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1255 if (!DecompGEP1.VarIndices.empty() &&
1256 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.
hasValue() &&
1266 unsigned BW = DecompGEP1.Offset.getBitWidth();
1273 for (
unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1274 const VariableGEPIndex &
Index = DecompGEP1.VarIndices[i];
1276 APInt ScaleForGCD = Scale;
1282 GCD = ScaleForGCD.
abs();
1287 true, &AC,
Index.CxtI);
1295 "Bit widths are normalized to MaxIndexSize");
1301 if (
Index.IsNegated)
1302 OffsetRange = OffsetRange.
sub(CR);
1304 OffsetRange = OffsetRange.
add(CR);
1313 APInt ModOffset = DecompGEP1.Offset.
srem(GCD);
1317 (GCD - ModOffset).uge(V1Size.
getValue()))
1331 auto MultiplyByScaleNoWrap = [](
const VariableGEPIndex &Var) {
1335 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1339 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1340 if (MaxScaleValueBW <= 0)
1342 return Var.Scale.ule(
1348 std::optional<APInt> MinAbsVarIndex;
1349 if (DecompGEP1.VarIndices.size() == 1) {
1351 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1352 if (Var.Val.TruncBits == 0 &&
1356 if (MultiplyByScaleNoWrap(Var)) {
1358 MinAbsVarIndex = Var.Scale.abs();
1361 }
else if (DecompGEP1.VarIndices.size() == 2) {
1366 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1367 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1368 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1370 MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) &&
1375 MinAbsVarIndex = Var0.Scale.abs();
1378 if (MinAbsVarIndex) {
1380 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1381 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1388 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1417 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1418 if (isValueEqualInPotentialCycles(
SI->getCondition(), SI2->getCondition(),
1455 if (
const PHINode *PN2 = dyn_cast<PHINode>(V2))
1457 std::optional<AliasResult> Alias;
1478 bool isRecursive =
false;
1479 auto CheckForRecPhi = [&](
Value *PV) {
1490 Value *OnePhi =
nullptr;
1496 if (isa<PHINode>(PV1)) {
1497 if (OnePhi && OnePhi != PV1) {
1508 if (CheckForRecPhi(PV1))
1511 if (UniqueSrc.
insert(PV1).second)
1515 if (OnePhi && UniqueSrc.
size() > 1)
1550 for (
unsigned i = 1, e = V1Srcs.
size(); i != e; ++i) {
1567 if (isa<Argument>(V))
1569 auto *E = dyn_cast<ExtractValueInst>(V);
1570 return E && isa<Argument>(E->getOperand(0));
1590 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1599 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1637 O2, dyn_cast<Instruction>(O1),
true)))
1641 O1, dyn_cast<Instruction>(O2),
true)))
1650 TLI, NullIsValidLocation)) ||
1653 TLI, NullIsValidLocation)))
1663 if (OBU.
getTagName() ==
"separate_storage") {
1673 auto ValidAssumeForPtrContext = [&](
const Value *
Ptr) {
1678 if (
const Argument *PtrA = dyn_cast<Argument>(
Ptr)) {
1680 &*PtrA->
getParent()->getEntryBlock().begin();
1687 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1693 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1717 if (AAQI.
Depth >= 512)
1726 const bool Swapped = V1 > V2;
1732 auto &
Entry = Pair.first->second;
1733 if (!
Entry.isDefinitive()) {
1738 if (
Entry.isAssumption())
1739 ++
Entry.NumAssumptionUses;
1750 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1754 auto &
Entry = It->second;
1757 bool AssumptionDisproven =
1759 if (AssumptionDisproven)
1766 Entry.Result.swap(Swapped);
1771 if (AssumptionDisproven)
1787 if (AAQI.
Depth == 1) {
1805 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1809 }
else if (
const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1816 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1820 }
else if (
const PHINode *PN = dyn_cast<PHINode>(V2)) {
1831 }
else if (
const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1857bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1868 const Instruction *Inst = dyn_cast<Instruction>(V);
1869 if (!Inst || Inst->
getParent()->isEntryBlock())
1876void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1877 const DecomposedGEP &SrcGEP,
1881 if (DestGEP.Offset.ult(SrcGEP.Offset))
1882 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1884 DestGEP.Offset -= SrcGEP.Offset;
1885 for (
const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1889 for (
auto I :
enumerate(DestGEP.VarIndices)) {
1890 VariableGEPIndex &Dest =
I.value();
1891 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1893 !Dest.Val.hasSameCastsAs(Src.Val))
1897 if (Dest.IsNegated) {
1898 Dest.Scale = -Dest.Scale;
1899 Dest.IsNegated =
false;
1905 if (Dest.Scale != Src.Scale) {
1908 if (Dest.Scale.ult(Src.Scale))
1909 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1911 Dest.Scale -= Src.Scale;
1914 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +
I.index());
1922 VariableGEPIndex
Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1924 DestGEP.VarIndices.push_back(Entry);
1927 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1932bool BasicAAResult::constantOffsetHeuristic(
const DecomposedGEP &
GEP,
1938 if (
GEP.VarIndices.size() != 2 || !MaybeV1Size.
hasValue() ||
1945 const VariableGEPIndex &Var0 =
GEP.VarIndices[0], &Var1 =
GEP.VarIndices[1];
1947 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1948 !Var0.hasNegatedScaleOf(Var1) ||
1949 Var0.Val.V->getType() != Var1.Val.V->getType())
1956 LinearExpression E0 =
1958 LinearExpression E1 =
1960 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1961 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1971 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1973 APInt MinDiffBytes =
1974 MinDiff.
zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.
abs();
1980 return MinDiffBytes.
uge(V1Size +
GEP.Offset.abs()) &&
1981 MinDiffBytes.
uge(V2Size +
GEP.Offset.abs());
2001void BasicAAWrapperPass::anchor() {}
2004 "Basic Alias Analysis (stateless AA impl)",
true,
true)
2016 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2017 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2018 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2021 TLIWP.getTLI(
F), ACT.getAssumptionCache(
F),
2022 &DTWP.getDomTree()));
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool isArgumentOrArgumentLike(const Value *V)
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the ModRef info associated with a pointer argument of a call.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
This is the AA result object for the basic, local, and stateless alias analysis.
LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
LLVM_ABI bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
LLVM_ABI BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
A constant pointer value that points to null.
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
void removeInstruction(Instruction *I)
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt) override
Return how Object may be captured before instruction I, considering only provenance captures.
FunctionPass class - This class is used to implement most global optimizations.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags all()
bool hasNoUnsignedWrap() const
bool hasNoUnsignedSignedWrap() const
LLVM_ABI Type * getSourceElementType() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Module * getParent()
Get the module that this global value is contained inside of...
A wrapper class for inspecting calls to intrinsic functions.
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
MemoryEffectsBase getWithoutLoc(Location Loc) const
Get new MemoryEffectsBase with NoModRef on the given Loc.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
This is a utility class that provides an abstraction for the common functionality between Instruction...
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
A set of analyses that are preserved following a run of a transformation pass.
This class represents the LLVM 'select' instruction.
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt) override
Return how Object may be captured before instruction I, considering only provenance captures.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
initializer< Ty > init(const Ty &Val)
@ Assume
Do not drop type tests (default).
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool capturesReadProvenanceOnly(CaptureComponents CC)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI std::pair< Instruction *, CaptureComponents > FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, const DominatorTree &DT, CaptureComponents Mask, unsigned MaxUsesToExplore=0)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool capturesFullProvenance(CaptureComponents CC)
bool isModSet(const ModRefInfo MRI)
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isModOrRefSet(const ModRefInfo MRI)
constexpr unsigned MaxLookupSearchDepth
The max limit of the search depth in DecomposeGEPExpression() and getUnderlyingObject().
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI FunctionPass * createBasicAAWrapperPass()
LLVM_ABI bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
CaptureComponents
Components of the pointer that may be captured.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
@ ArgMem
Access to memory via argument pointers.
@ InaccessibleMem
Memory that is inaccessible via LLVM IR.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
bool isModAndRefSet(const ModRefInfo MRI)
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
LLVM_ABI bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool capturesNothing(CaptureComponents CC)
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
static constexpr int Definitive
Cache entry is neither an assumption nor does it use a (non-definitive) assumption.
static constexpr int AssumptionBased
Cache entry is not an assumption itself, but may be using an assumption from higher up the stack.
A special type used by analysis passes to provide an address that identifies that particular analysis...
virtual CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Return how Object may be captured before instruction I, considering only provenance captures.
virtual ~CaptureAnalysis()=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
A utility class that uses RAII to save and restore the value of a variable.