94#define DEBUG_TYPE "dse"
96STATISTIC(NumRemainingStores,
"Number of stores remaining after DSE");
97STATISTIC(NumRedundantStores,
"Number of redundant stores deleted");
98STATISTIC(NumFastStores,
"Number of stores deleted");
99STATISTIC(NumFastOther,
"Number of other instrs removed");
100STATISTIC(NumCompletePartials,
"Number of stores dead by later partials");
101STATISTIC(NumModifiedStores,
"Number of stores modified");
106 "Number of times a valid candidate is returned from getDomMemoryDef");
108 "Number iterations check for reads in getDomMemoryDef");
111 "Controls which MemoryDefs are eliminated.");
116 cl::desc(
"Enable partial-overwrite tracking in DSE"));
121 cl::desc(
"Enable partial store merging in DSE"));
125 cl::desc(
"The number of memory instructions to scan for "
126 "dead store elimination (default = 150)"));
129 cl::desc(
"The maximum number of steps while walking upwards to find "
130 "MemoryDefs that may be killed (default = 90)"));
134 cl::desc(
"The maximum number candidates that only partially overwrite the "
135 "killing MemoryDef to consider"
140 cl::desc(
"The number of MemoryDefs we consider as candidates to eliminated "
141 "other stores per basic block (default = 5000)"));
146 "The cost of a step in the same basic block as the killing MemoryDef"
152 cl::desc(
"The cost of a step in a different basic "
153 "block than the killing MemoryDef"
158 cl::desc(
"The maximum number of blocks to check when trying to prove that "
159 "all paths to an exit go through a killing block (default = 50)"));
169 cl::desc(
"Allow DSE to optimize memory accesses."));
174 cl::desc(
"Enable the initializes attr improvement in DSE"));
190 switch (
II->getIntrinsicID()) {
191 default:
return false;
192 case Intrinsic::memset:
193 case Intrinsic::memcpy:
194 case Intrinsic::memcpy_element_unordered_atomic:
195 case Intrinsic::memset_element_unordered_atomic:
230enum OverwriteResult {
234 OW_PartialEarlierWithFullLater,
250 if (KillingII ==
nullptr || DeadII ==
nullptr)
252 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID())
255 switch (KillingII->getIntrinsicID()) {
256 case Intrinsic::masked_store:
257 case Intrinsic::vp_store: {
259 auto *KillingTy = KillingII->getArgOperand(0)->getType();
260 auto *DeadTy = DeadII->getArgOperand(0)->getType();
261 if (
DL.getTypeSizeInBits(KillingTy) !=
DL.getTypeSizeInBits(DeadTy))
268 Value *KillingPtr = KillingII->getArgOperand(1);
269 Value *DeadPtr = DeadII->getArgOperand(1);
270 if (KillingPtr != DeadPtr && !
AA.isMustAlias(KillingPtr, DeadPtr))
272 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) {
275 if (KillingII->getArgOperand(2) != DeadII->getArgOperand(2))
277 }
else if (KillingII->getIntrinsicID() == Intrinsic::vp_store) {
280 if (KillingII->getArgOperand(2) != DeadII->getArgOperand(2))
283 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
305 int64_t KillingOff, int64_t DeadOff,
316 KillingOff < int64_t(DeadOff + DeadSize) &&
317 int64_t(KillingOff + KillingSize) >= DeadOff) {
320 auto &IM = IOL[DeadI];
321 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite: DeadLoc [" << DeadOff <<
", "
322 << int64_t(DeadOff + DeadSize) <<
") KillingLoc ["
323 << KillingOff <<
", " << int64_t(KillingOff + KillingSize)
330 int64_t KillingIntStart = KillingOff;
331 int64_t KillingIntEnd = KillingOff + KillingSize;
335 auto ILI = IM.lower_bound(KillingIntStart);
336 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
340 KillingIntStart = std::min(KillingIntStart, ILI->second);
341 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
350 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
351 assert(ILI->second > KillingIntStart &&
"Unexpected interval");
352 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
357 IM[KillingIntEnd] = KillingIntStart;
360 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
361 LLVM_DEBUG(
dbgs() <<
"DSE: Full overwrite from partials: DeadLoc ["
362 << DeadOff <<
", " << int64_t(DeadOff + DeadSize)
363 <<
") Composite KillingLoc [" << ILI->second <<
", "
364 << ILI->first <<
")\n");
365 ++NumCompletePartials;
373 int64_t(DeadOff + DeadSize) > KillingOff &&
374 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
375 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite a dead load [" << DeadOff
376 <<
", " << int64_t(DeadOff + DeadSize)
377 <<
") by a killing store [" << KillingOff <<
", "
378 << int64_t(KillingOff + KillingSize) <<
")\n");
380 return OW_PartialEarlierWithFullLater;
393 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
394 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
407 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
408 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
409 "Expect to be handled as OW_Complete");
429 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
446 auto *MemLocPtr =
const_cast<Value *
>(MemLoc.
Ptr);
451 bool isFirstBlock =
true;
454 while (!WorkList.
empty()) {
466 assert(
B == SecondBB &&
"first block is not the store block");
468 isFirstBlock =
false;
474 for (; BI != EI; ++BI) {
476 if (
I->mayWriteToMemory() &&
I != SecondI)
482 "Should not hit the entry block because SI must be dominated by LI");
492 auto Inserted = Visited.
insert(std::make_pair(Pred, TranslatedPtr));
493 if (!Inserted.second) {
496 if (TranslatedPtr != Inserted.first->second)
501 WorkList.
push_back(std::make_pair(Pred, PredAddr));
510 uint64_t NewSizeInBits,
bool IsOverwriteEnd) {
512 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits;
514 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0);
515 auto SetDeadFragExpr = [](
auto *Assign,
519 uint64_t RelativeOffset = DeadFragment.OffsetInBits -
520 Assign->getExpression()
525 Assign->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) {
526 Assign->setExpression(*
NewExpr);
533 DeadFragment.SizeInBits);
534 Assign->setExpression(Expr);
535 Assign->setKillLocation();
542 auto GetDeadLink = [&Ctx, &LinkToNothing]() {
545 return LinkToNothing;
551 std::optional<DIExpression::FragmentInfo> NewFragment;
553 DeadSliceSizeInBits, Assign,
558 Assign->setKillAddress();
559 Assign->setAssignId(GetDeadLink());
563 if (NewFragment->SizeInBits == 0)
567 auto *NewAssign =
static_cast<decltype(Assign)
>(Assign->clone());
568 NewAssign->insertAfter(Assign->getIterator());
569 NewAssign->setAssignId(GetDeadLink());
571 SetDeadFragExpr(NewAssign, *NewFragment);
572 NewAssign->setKillAddress();
586 for (
auto &Attr : OldAttrs) {
587 if (Attr.hasKindAsEnum()) {
588 switch (Attr.getKindAsEnum()) {
591 case Attribute::Alignment:
593 if (
isAligned(Attr.getAlignment().valueOrOne(), PtrOffset))
596 case Attribute::Dereferenceable:
597 case Attribute::DereferenceableOrNull:
601 case Attribute::NonNull:
602 case Attribute::NoUndef:
610 Intrinsic->removeParamAttrs(ArgNo, AttrsToRemove);
614 uint64_t &DeadSize, int64_t KillingStart,
615 uint64_t KillingSize,
bool IsOverwriteEnd) {
617 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
633 int64_t ToRemoveStart = 0;
637 if (IsOverwriteEnd) {
642 ToRemoveStart = KillingStart + Off;
643 if (DeadSize <=
uint64_t(ToRemoveStart - DeadStart))
645 ToRemoveSize = DeadSize -
uint64_t(ToRemoveStart - DeadStart);
647 ToRemoveStart = DeadStart;
649 "Not overlapping accesses?");
650 ToRemoveSize = KillingSize -
uint64_t(DeadStart - KillingStart);
655 if (ToRemoveSize <= (PrefAlign.
value() - Off))
657 ToRemoveSize -= PrefAlign.
value() - Off;
660 "Should preserve selected alignment");
663 assert(ToRemoveSize > 0 &&
"Shouldn't reach here if nothing to remove");
664 assert(DeadSize > ToRemoveSize &&
"Can't remove more than original size");
666 uint64_t NewSize = DeadSize - ToRemoveSize;
667 if (DeadIntrinsic->isAtomic()) {
670 const uint32_t ElementSize = DeadIntrinsic->getElementSizeInBytes();
671 if (0 != NewSize % ElementSize)
676 << (IsOverwriteEnd ?
"END" :
"BEGIN") <<
": " << *DeadI
677 <<
"\n KILLER [" << ToRemoveStart <<
", "
678 << int64_t(ToRemoveStart + ToRemoveSize) <<
")\n");
680 DeadIntrinsic->setLength(NewSize);
681 DeadIntrinsic->setDestAlignment(PrefAlign);
683 Value *OrigDest = DeadIntrinsic->getRawDest();
684 if (!IsOverwriteEnd) {
685 Value *Indices[1] = {
686 ConstantInt::get(DeadIntrinsic->getLength()->getType(), ToRemoveSize)};
690 NewDestGEP->
setDebugLoc(DeadIntrinsic->getDebugLoc());
691 DeadIntrinsic->setDest(NewDestGEP);
701 DeadStart += ToRemoveSize;
708 int64_t &DeadStart,
uint64_t &DeadSize) {
713 int64_t KillingStart = OII->second;
714 uint64_t KillingSize = OII->first - KillingStart;
716 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
718 if (KillingStart > DeadStart &&
721 (
uint64_t)(KillingStart - DeadStart) < DeadSize &&
724 KillingSize >= DeadSize - (
uint64_t)(KillingStart - DeadStart)) {
725 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
736 int64_t &DeadStart,
uint64_t &DeadSize) {
741 int64_t KillingStart = OII->second;
742 uint64_t KillingSize = OII->first - KillingStart;
744 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
746 if (KillingStart <= DeadStart &&
749 KillingSize > (
uint64_t)(DeadStart - KillingStart)) {
752 assert(KillingSize - (
uint64_t)(DeadStart - KillingStart) < DeadSize &&
753 "Should have been handled as OW_Complete");
754 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
765 int64_t KillingOffset, int64_t DeadOffset,
792 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
793 unsigned LShiftAmount =
794 DL.isBigEndian() ? DeadValue.
getBitWidth() - BitOffsetDiff - KillingBits
797 LShiftAmount + KillingBits);
800 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
802 <<
"\n Killing: " << *KillingI
803 <<
"\n Merged Value: " << Merged <<
'\n');
812 switch (
II->getIntrinsicID()) {
813 case Intrinsic::lifetime_start:
814 case Intrinsic::lifetime_end:
815 case Intrinsic::invariant_end:
816 case Intrinsic::launder_invariant_group:
817 case Intrinsic::assume:
819 case Intrinsic::dbg_declare:
820 case Intrinsic::dbg_label:
821 case Intrinsic::dbg_value:
836 if (CB->onlyAccessesInaccessibleMemory())
841 if (DI->
mayThrow() && !DefVisibleToCaller)
863struct MemoryLocationWrapper {
864 MemoryLocationWrapper(MemoryLocation MemLoc, MemoryDef *MemDef,
865 bool DefByInitializesAttr)
866 : MemLoc(MemLoc), MemDef(MemDef),
867 DefByInitializesAttr(DefByInitializesAttr) {
868 assert(MemLoc.Ptr &&
"MemLoc should be not null");
870 DefInst = MemDef->getMemoryInst();
873 MemoryLocation MemLoc;
874 const Value *UnderlyingObject;
877 bool DefByInitializesAttr =
false;
882struct MemoryDefWrapper {
883 MemoryDefWrapper(MemoryDef *MemDef,
884 ArrayRef<std::pair<MemoryLocation, bool>> MemLocations) {
886 for (
auto &[MemLoc, DefByInitializesAttr] : MemLocations)
887 DefinedLocations.push_back(
888 MemoryLocationWrapper(MemLoc, MemDef, DefByInitializesAttr));
894struct ArgumentInitInfo {
896 bool IsDeadOrInvisibleOnUnwind;
897 ConstantRangeList Inits;
912 bool CallHasNoUnwindAttr) {
918 for (
const auto &Arg : Args) {
919 if (!CallHasNoUnwindAttr && !Arg.IsDeadOrInvisibleOnUnwind)
921 if (Arg.Inits.empty())
926 for (
auto &Arg : Args.drop_front())
927 IntersectedIntervals = IntersectedIntervals.
intersectWith(Arg.Inits);
929 return IntersectedIntervals;
937 EarliestEscapeAnalysis EA;
946 BatchAAResults BatchAA;
950 PostDominatorTree &PDT;
951 const TargetLibraryInfo &TLI;
952 const DataLayout &DL;
957 bool ContainsIrreducibleLoops;
962 SmallPtrSet<MemoryAccess *, 4> SkipStores;
964 DenseMap<const Value *, bool> CapturedBeforeReturn;
967 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
968 DenseMap<const Value *, uint64_t> InvisibleToCallerAfterRetBounded;
970 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
973 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
977 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs;
981 bool AnyUnreachableExit;
986 bool ShouldIterateEndOfFunctionDSE;
993 PostDominatorTree &PDT,
const TargetLibraryInfo &TLI,
995 DSEState(
const DSEState &) =
delete;
996 DSEState &operator=(
const DSEState &) =
delete;
998 LocationSize strengthenLocationSize(
const Instruction *
I,
999 LocationSize
Size)
const;
1009 OverwriteResult isOverwrite(
const Instruction *KillingI,
1010 const Instruction *DeadI,
1011 const MemoryLocation &KillingLoc,
1012 const MemoryLocation &DeadLoc,
1013 int64_t &KillingOff, int64_t &DeadOff);
1015 bool isInvisibleToCallerAfterRet(
const Value *V,
const Value *Ptr,
1016 const LocationSize StoreSize);
1018 bool isInvisibleToCallerOnUnwind(
const Value *V);
1020 std::optional<MemoryLocation> getLocForWrite(Instruction *
I)
const;
1025 getLocForInst(Instruction *
I,
bool ConsiderInitializesAttr);
1029 bool isRemovable(Instruction *
I);
1033 bool isCompleteOverwrite(
const MemoryLocation &DefLoc, Instruction *DefInst,
1034 Instruction *UseInst);
1037 bool isWriteAtEndOfFunction(MemoryDef *Def,
const MemoryLocation &DefLoc);
1042 std::optional<std::pair<MemoryLocation, bool>>
1043 getLocForTerminator(Instruction *
I)
const;
1047 bool isMemTerminatorInst(Instruction *
I)
const;
1051 bool isMemTerminator(
const MemoryLocation &Loc, Instruction *AccessI,
1052 Instruction *MaybeTerm);
1055 bool isReadClobber(
const MemoryLocation &DefLoc, Instruction *UseInst);
1062 bool isGuaranteedLoopIndependent(
const Instruction *Current,
1063 const Instruction *KillingDef,
1064 const MemoryLocation &CurrentLoc);
1069 bool isGuaranteedLoopInvariant(
const Value *Ptr);
1077 std::optional<MemoryAccess *>
1078 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1079 const MemoryLocation &KillingLoc,
const Value *KillingUndObj,
1080 unsigned &ScanLimit,
unsigned &WalkerStepLimit,
1081 bool IsMemTerm,
unsigned &PartialLimit,
1082 bool IsInitializesAttrMemLoc);
1088 SmallPtrSetImpl<MemoryAccess *> *
Deleted =
nullptr);
1094 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1095 const Value *KillingUndObj);
1102 bool isDSEBarrier(
const Value *KillingUndObj, Instruction *DeadI);
1106 bool eliminateDeadWritesAtEndOfFunction();
1110 bool tryFoldIntoCalloc(MemoryDef *Def,
const Value *DefUO);
1114 bool dominatingConditionImpliesValue(MemoryDef *Def);
1118 bool storeIsNoop(MemoryDef *Def,
const Value *DefUO);
1124 bool eliminateRedundantStoresOfExistingValues();
1139 std::pair<bool, bool>
1140 eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper);
1144 bool eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper);
1154 if (Visited.
insert(MA).second)
1171 :
F(
F),
AA(
AA), EA(DT, &LI), BatchAA(
AA, &EA), MSSA(MSSA), DT(DT), PDT(PDT),
1172 TLI(TLI),
DL(
F.getDataLayout()), LI(LI) {
1177 PostOrderNumbers[BB] = PO++;
1180 if (
I.mayThrow() && !MA)
1181 ThrowingBlocks.insert(
I.getParent());
1185 (getLocForWrite(&
I) || isMemTerminatorInst(&
I) ||
1187 MemDefs.push_back(MD);
1194 if (AI.hasPassPointeeByValueCopyAttr()) {
1195 InvisibleToCallerAfterRet.insert({&AI, true});
1199 if (!AI.getType()->isPointerTy())
1203 if (Info.coversAllReachableMemory())
1204 InvisibleToCallerAfterRet.insert({&AI, true});
1205 else if (
uint64_t DeadBytes = Info.getNumberOfDeadBytes())
1206 InvisibleToCallerAfterRetBounded.insert({&AI, DeadBytes});
1213 return isa<UnreachableInst>(E->getTerminator());
1222 (
F == LibFunc_memset_chk ||
F == LibFunc_memcpy_chk)) {
1238OverwriteResult DSEState::isOverwrite(
const Instruction *KillingI,
1239 const Instruction *DeadI,
1240 const MemoryLocation &KillingLoc,
1241 const MemoryLocation &DeadLoc,
1242 int64_t &KillingOff, int64_t &DeadOff) {
1246 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
1249 LocationSize KillingLocSize =
1250 strengthenLocationSize(KillingI, KillingLoc.
Size);
1258 if (DeadUndObj == KillingUndObj && KillingLocSize.
isPrecise() &&
1260 std::optional<TypeSize> KillingUndObjSize =
1262 if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.
getValue())
1273 if (KillingMemI && DeadMemI) {
1274 const Value *KillingV = KillingMemI->getLength();
1275 const Value *DeadV = DeadMemI->getLength();
1276 if (KillingV == DeadV && BatchAA.
isMustAlias(DeadLoc, KillingLoc))
1285 const TypeSize KillingSize = KillingLocSize.
getValue();
1294 AliasResult AAR = BatchAA.
alias(KillingLoc, DeadLoc);
1300 if (KillingSize >= DeadSize)
1307 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
1313 if (DeadUndObj != KillingUndObj) {
1329 const Value *DeadBasePtr =
1331 const Value *KillingBasePtr =
1336 if (DeadBasePtr != KillingBasePtr)
1354 if (DeadOff >= KillingOff) {
1357 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
1361 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
1362 return OW_MaybePartial;
1366 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
1367 return OW_MaybePartial;
1374bool DSEState::isInvisibleToCallerAfterRet(
const Value *V,
const Value *Ptr,
1375 const LocationSize StoreSize) {
1379 auto IBounded = InvisibleToCallerAfterRetBounded.find(V);
1380 if (IBounded != InvisibleToCallerAfterRetBounded.end()) {
1381 int64_t ValueOffset;
1382 [[maybe_unused]]
const Value *BaseValue =
1392 ValueOffset + StoreSize.
getValue() <= IBounded->second &&
1396 auto I = InvisibleToCallerAfterRet.insert({
V,
false});
1397 if (
I.second && isInvisibleToCallerOnUnwind(V) &&
isNoAliasCall(V))
1399 V,
true, CaptureComponents::Provenance));
1400 return I.first->second;
1403bool DSEState::isInvisibleToCallerOnUnwind(
const Value *V) {
1404 bool RequiresNoCaptureBeforeUnwind;
1407 if (!RequiresNoCaptureBeforeUnwind)
1410 auto I = CapturedBeforeReturn.insert({
V,
true});
1417 V,
false, CaptureComponents::Provenance));
1418 return !
I.first->second;
1421std::optional<MemoryLocation> DSEState::getLocForWrite(Instruction *
I)
const {
1422 if (!
I->mayWriteToMemory())
1423 return std::nullopt;
1432DSEState::getLocForInst(Instruction *
I,
bool ConsiderInitializesAttr) {
1434 if (isMemTerminatorInst(
I)) {
1435 if (
auto Loc = getLocForTerminator(
I))
1436 Locations.push_back(std::make_pair(Loc->first,
false));
1440 if (
auto Loc = getLocForWrite(
I))
1441 Locations.push_back(std::make_pair(*Loc,
false));
1443 if (ConsiderInitializesAttr) {
1444 for (
auto &MemLoc : getInitializesArgMemLoc(
I)) {
1445 Locations.push_back(std::make_pair(MemLoc,
true));
1451bool DSEState::isRemovable(Instruction *
I) {
1452 assert(getLocForWrite(
I) &&
"Must have analyzable write");
1456 return SI->isUnordered();
1461 return !
MI->isVolatile();
1465 if (CB->isLifetimeStartOrEnd())
1468 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
1469 !CB->isTerminator();
1475bool DSEState::isCompleteOverwrite(
const MemoryLocation &DefLoc,
1476 Instruction *DefInst, Instruction *UseInst) {
1484 if (CB->onlyAccessesInaccessibleMemory())
1487 int64_t InstWriteOffset, DepWriteOffset;
1488 if (
auto CC = getLocForWrite(UseInst))
1489 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1490 DepWriteOffset) == OW_Complete;
1494bool DSEState::isWriteAtEndOfFunction(MemoryDef *Def,
1495 const MemoryLocation &DefLoc) {
1497 << *
Def->getMemoryInst()
1498 <<
") is at the end the function \n");
1500 SmallPtrSet<MemoryAccess *, 8> Visited;
1503 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1509 MemoryAccess *UseAccess = WorkList[
I];
1514 if (!isGuaranteedLoopInvariant(DefLoc.
Ptr))
1523 if (isReadClobber(DefLoc, UseInst)) {
1524 LLVM_DEBUG(
dbgs() <<
" ... hit read clobber " << *UseInst <<
".\n");
1534std::optional<std::pair<MemoryLocation, bool>>
1535DSEState::getLocForTerminator(Instruction *
I)
const {
1537 if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
1544 return std::nullopt;
1547bool DSEState::isMemTerminatorInst(Instruction *
I)
const {
1549 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
1553bool DSEState::isMemTerminator(
const MemoryLocation &Loc, Instruction *AccessI,
1554 Instruction *MaybeTerm) {
1555 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1556 getLocForTerminator(MaybeTerm);
1567 auto TermLoc = MaybeTermLoc->first;
1568 if (MaybeTermLoc->second) {
1572 int64_t InstWriteOffset = 0;
1573 int64_t DepWriteOffset = 0;
1574 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1575 DepWriteOffset) == OW_Complete;
1578bool DSEState::isReadClobber(
const MemoryLocation &DefLoc,
1579 Instruction *UseInst) {
1592 if (CB->onlyAccessesInaccessibleMemory())
1598bool DSEState::isGuaranteedLoopIndependent(
const Instruction *Current,
1599 const Instruction *KillingDef,
1600 const MemoryLocation &CurrentLoc) {
1608 if (!ContainsIrreducibleLoops && CurrentLI &&
1612 return isGuaranteedLoopInvariant(CurrentLoc.
Ptr);
1615bool DSEState::isGuaranteedLoopInvariant(
const Value *Ptr) {
1618 if (
GEP->hasAllConstantIndices())
1622 return I->getParent()->isEntryBlock() ||
1623 (!ContainsIrreducibleLoops && !LI.
getLoopFor(
I->getParent()));
1628std::optional<MemoryAccess *> DSEState::getDomMemoryDef(
1629 MemoryDef *KillingDef, MemoryAccess *StartAccess,
1630 const MemoryLocation &KillingLoc,
const Value *KillingUndObj,
1631 unsigned &ScanLimit,
unsigned &WalkerStepLimit,
bool IsMemTerm,
1632 unsigned &PartialLimit,
bool IsInitializesAttrMemLoc) {
1633 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1635 return std::nullopt;
1638 MemoryAccess *Current = StartAccess;
1652 std::optional<MemoryLocation> CurrentLoc;
1655 dbgs() <<
" visiting " << *Current;
1668 return std::nullopt;
1676 if (WalkerStepLimit <= StepCost) {
1678 return std::nullopt;
1680 WalkerStepLimit -= StepCost;
1694 if (
canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1695 CanOptimize =
false;
1701 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1703 return std::nullopt;
1708 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1710 return std::nullopt;
1718 return std::nullopt;
1721 if (
any_of(Current->
uses(), [
this, &KillingLoc, StartAccess](Use &U) {
1722 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1723 return !MSSA.dominates(StartAccess, UseOrDef) &&
1724 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1728 return std::nullopt;
1733 CurrentLoc = getLocForWrite(CurrentI);
1734 if (!CurrentLoc || !isRemovable(CurrentI)) {
1735 CanOptimize =
false;
1742 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1744 CanOptimize =
false;
1752 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1753 CanOptimize =
false;
1757 int64_t KillingOffset = 0;
1758 int64_t DeadOffset = 0;
1759 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1760 KillingOffset, DeadOffset);
1766 (OR == OW_Complete || OR == OW_MaybePartial))
1772 CanOptimize =
false;
1777 if (OR == OW_Unknown || OR == OW_None)
1779 else if (OR == OW_MaybePartial) {
1784 if (PartialLimit <= 1) {
1785 WalkerStepLimit -= 1;
1786 LLVM_DEBUG(
dbgs() <<
" ... reached partial limit ... continue with "
1800 SmallPtrSet<Instruction *, 16> KillingDefs;
1802 MemoryAccess *MaybeDeadAccess = Current;
1803 MemoryLocation MaybeDeadLoc = *CurrentLoc;
1805 LLVM_DEBUG(
dbgs() <<
" Checking for reads of " << *MaybeDeadAccess <<
" ("
1806 << *MaybeDeadI <<
")\n");
1809 SmallPtrSet<MemoryAccess *, 32> Visited;
1813 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1814 MemoryAccess *UseAccess = WorkList[
I];
1818 if (ScanLimit < (WorkList.
size() -
I)) {
1820 return std::nullopt;
1823 NumDomMemDefChecks++;
1826 if (
any_of(KillingDefs, [
this, UseAccess](Instruction *KI) {
1829 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing block\n");
1840 if (
any_of(KillingDefs, [
this, UseInst](Instruction *KI) {
1843 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing def\n");
1849 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1852 <<
" ... skipping, memterminator invalidates following accesses\n");
1862 if (UseInst->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1864 return std::nullopt;
1871 bool IsKillingDefFromInitAttr =
false;
1872 if (IsInitializesAttrMemLoc) {
1873 if (KillingI == UseInst &&
1875 IsKillingDefFromInitAttr =
true;
1878 if (isReadClobber(MaybeDeadLoc, UseInst) && !IsKillingDefFromInitAttr) {
1880 return std::nullopt;
1886 if (MaybeDeadAccess == UseAccess &&
1887 !isGuaranteedLoopInvariant(MaybeDeadLoc.
Ptr)) {
1888 LLVM_DEBUG(
dbgs() <<
" ... found not loop invariant self access\n");
1889 return std::nullopt;
1895 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1911 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1913 if (PostOrderNumbers.
find(MaybeKillingBlock)->second <
1914 PostOrderNumbers.
find(MaybeDeadAccess->
getBlock())->second) {
1915 if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.
Ptr,
1918 <<
" ... found killing def " << *UseInst <<
"\n");
1919 KillingDefs.
insert(UseInst);
1923 <<
" ... found preceeding def " << *UseInst <<
"\n");
1924 return std::nullopt;
1934 if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.
Ptr,
1936 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1937 for (Instruction *KD : KillingDefs)
1938 KillingBlocks.
insert(KD->getParent());
1940 "Expected at least a single killing block");
1954 if (!AnyUnreachableExit)
1955 return std::nullopt;
1959 CommonPred =
nullptr;
1963 if (KillingBlocks.
count(CommonPred))
1964 return {MaybeDeadAccess};
1966 SetVector<BasicBlock *> WorkList;
1970 WorkList.
insert(CommonPred);
1972 for (BasicBlock *R : PDT.
roots()) {
1980 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1983 if (KillingBlocks.
count(Current))
1985 if (Current == MaybeDeadAccess->
getBlock())
1986 return std::nullopt;
1996 return std::nullopt;
2003 return {MaybeDeadAccess};
2006void DSEState::deleteDeadInstruction(Instruction *SI,
2007 SmallPtrSetImpl<MemoryAccess *> *
Deleted) {
2008 MemorySSAUpdater Updater(&MSSA);
2013 while (!NowDeadInsts.
empty()) {
2027 SkipStores.insert(MD);
2031 if (
SI->getValueOperand()->getType()->isPointerTy()) {
2033 if (CapturedBeforeReturn.erase(UO))
2034 ShouldIterateEndOfFunctionDSE =
true;
2035 InvisibleToCallerAfterRet.erase(UO);
2036 InvisibleToCallerAfterRetBounded.erase(UO);
2041 Updater.removeMemoryAccess(MA);
2045 if (
I != IOLs.end())
2046 I->second.erase(DeadInst);
2048 for (Use &O : DeadInst->
operands())
2068bool DSEState::mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
2069 const Value *KillingUndObj) {
2073 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
2077 return ThrowingBlocks.count(KillingI->
getParent());
2078 return !ThrowingBlocks.empty();
2081bool DSEState::isDSEBarrier(
const Value *KillingUndObj, Instruction *DeadI) {
2084 if (DeadI->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
2104bool DSEState::eliminateDeadWritesAtEndOfFunction() {
2105 bool MadeChange =
false;
2107 dbgs() <<
"Trying to eliminate MemoryDefs at the end of the function\n");
2109 ShouldIterateEndOfFunctionDSE =
false;
2111 if (SkipStores.contains(Def))
2115 auto DefLoc = getLocForWrite(DefI);
2116 if (!DefLoc || !isRemovable(DefI)) {
2118 "instruction not removable.\n");
2128 if (!isInvisibleToCallerAfterRet(UO, DefLoc->
Ptr, DefLoc->
Size))
2131 if (isWriteAtEndOfFunction(Def, *DefLoc)) {
2133 LLVM_DEBUG(
dbgs() <<
" ... MemoryDef is not accessed until the end "
2134 "of the function\n");
2140 }
while (ShouldIterateEndOfFunctionDSE);
2144bool DSEState::tryFoldIntoCalloc(MemoryDef *Def,
const Value *DefUO) {
2151 if (!StoredConstant || !StoredConstant->
isNullValue())
2154 if (!isRemovable(DefI))
2158 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
2159 F.hasFnAttribute(Attribute::SanitizeAddress) ||
2160 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
F.getName() ==
"calloc")
2165 auto *InnerCallee =
Malloc->getCalledFunction();
2168 LibFunc
Func = NotLibFunc;
2169 StringRef ZeroedVariantName;
2170 if (!TLI.
getLibFunc(*InnerCallee, Func) || !TLI.
has(Func) ||
2171 Func != LibFunc_malloc) {
2176 if (ZeroedVariantName.
empty())
2185 auto shouldCreateCalloc = [](CallInst *
Malloc, CallInst *Memset) {
2188 auto *MallocBB =
Malloc->getParent(), *MemsetBB = Memset->getParent();
2189 if (MallocBB == MemsetBB)
2191 auto *Ptr = Memset->getArgOperand(0);
2192 auto *TI = MallocBB->getTerminator();
2198 if (MemsetBB != FalseBB)
2209 assert(Func == LibFunc_malloc || !ZeroedVariantName.
empty());
2210 Value *Calloc =
nullptr;
2211 if (!ZeroedVariantName.
empty()) {
2212 LLVMContext &Ctx =
Malloc->getContext();
2213 AttributeList
Attrs = InnerCallee->getAttributes();
2215 Attrs.getFnAttr(Attribute::AllocKind).getAllocKind() |
2216 AllocFnKind::Zeroed;
2219 Attrs.addFnAttribute(Ctx, Attribute::getWithAllocKind(Ctx, AllocKind))
2220 .removeFnAttribute(Ctx,
"alloc-variant-zeroed");
2221 FunctionCallee ZeroedVariant =
Malloc->getModule()->getOrInsertFunction(
2222 ZeroedVariantName, InnerCallee->getFunctionType(), Attrs);
2224 ->setCallingConv(
Malloc->getCallingConv());
2227 CallInst *CI = IRB.CreateCall(ZeroedVariant, Args, ZeroedVariantName);
2231 Type *SizeTTy =
Malloc->getArgOperand(0)->getType();
2232 Calloc =
emitCalloc(ConstantInt::get(SizeTTy, 1),
Malloc->getArgOperand(0),
2233 IRB, TLI,
Malloc->getType()->getPointerAddressSpace());
2238 MemorySSAUpdater Updater(&MSSA);
2240 nullptr, MallocDef);
2242 Updater.insertDef(NewAccessMD,
true);
2243 Malloc->replaceAllUsesWith(Calloc);
2248bool DSEState::dominatingConditionImpliesValue(MemoryDef *Def) {
2251 Value *StorePtr = StoreI->getPointerOperand();
2252 Value *StoreVal = StoreI->getValueOperand();
2259 if (!BI || !BI->isConditional())
2265 if (BI->getSuccessor(0) == BI->getSuccessor(1))
2270 if (!
match(BI->getCondition(),
2280 if (Pred == ICmpInst::ICMP_EQ &&
2281 !DT.
dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(0)),
2285 if (Pred == ICmpInst::ICMP_NE &&
2286 !DT.
dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(1)),
2291 MemoryAccess *ClobAcc =
2294 return MSSA.
dominates(ClobAcc, LoadAcc);
2297bool DSEState::storeIsNoop(MemoryDef *Def,
const Value *DefUO) {
2301 Constant *StoredConstant =
nullptr;
2309 if (!isRemovable(DefI))
2312 if (StoredConstant) {
2317 if (InitC && InitC == StoredConstant)
2325 if (dominatingConditionImpliesValue(Def))
2329 if (LoadI->getPointerOperand() ==
Store->getOperand(1)) {
2333 if (LoadAccess ==
Def->getDefiningAccess())
2339 SetVector<MemoryAccess *> ToCheck;
2340 MemoryAccess *Current =
2348 for (
unsigned I = 1;
I < ToCheck.
size(); ++
I) {
2349 Current = ToCheck[
I];
2352 for (
auto &Use : PhiAccess->incoming_values())
2364 if (LoadAccess != Current)
2376 for (
auto OI : IOL) {
2378 MemoryLocation Loc = *getLocForWrite(DeadI);
2379 assert(isRemovable(DeadI) &&
"Expect only removable instruction");
2382 int64_t DeadStart = 0;
2387 if (IntervalMap.empty())
2394bool DSEState::eliminateRedundantStoresOfExistingValues() {
2395 bool MadeChange =
false;
2396 LLVM_DEBUG(
dbgs() <<
"Trying to eliminate MemoryDefs that write the "
2397 "already existing value\n");
2398 for (
auto *Def : MemDefs) {
2403 auto MaybeDefLoc = getLocForWrite(DefInst);
2404 if (!MaybeDefLoc || !isRemovable(DefInst))
2407 MemoryDef *UpperDef;
2411 if (
Def->isOptimized())
2419 auto IsRedundantStore = [&]() {
2427 auto UpperLoc = getLocForWrite(UpperInst);
2430 int64_t InstWriteOffset = 0;
2431 int64_t DepWriteOffset = 0;
2432 auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
2433 InstWriteOffset, DepWriteOffset);
2435 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
2442 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
2444 LLVM_DEBUG(
dbgs() <<
"DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2447 NumRedundantStores++;
2454DSEState::getInitializesArgMemLoc(
const Instruction *
I) {
2460 SmallMapVector<Value *, SmallVector<ArgumentInitInfo, 2>, 2>
Arguments;
2466 ConstantRangeList Inits;
2478 Inits = ConstantRangeList();
2486 bool IsDeadOrInvisibleOnUnwind =
2489 ArgumentInitInfo InitInfo{Idx, IsDeadOrInvisibleOnUnwind, Inits};
2490 bool FoundAliasing =
false;
2491 for (
auto &[Arg, AliasList] :
Arguments) {
2497 FoundAliasing =
true;
2498 AliasList.push_back(InitInfo);
2503 FoundAliasing =
true;
2504 AliasList.push_back(ArgumentInitInfo{Idx, IsDeadOrInvisibleOnUnwind,
2505 ConstantRangeList()});
2514 auto IntersectedRanges =
2516 if (IntersectedRanges.empty())
2519 for (
const auto &Arg : Args) {
2520 for (
const auto &
Range : IntersectedRanges) {
2534std::pair<bool, bool>
2535DSEState::eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper) {
2537 bool DeletedKillingLoc =
false;
2543 SmallSetVector<MemoryAccess *, 8> ToCheck;
2547 SmallPtrSet<MemoryAccess *, 8>
Deleted;
2548 [[maybe_unused]]
unsigned OrigNumSkipStores = SkipStores.size();
2553 for (
unsigned I = 0;
I < ToCheck.
size();
I++) {
2554 MemoryAccess *Current = ToCheck[
I];
2555 if (
Deleted.contains(Current))
2557 std::optional<MemoryAccess *> MaybeDeadAccess = getDomMemoryDef(
2558 KillingLocWrapper.MemDef, Current, KillingLocWrapper.MemLoc,
2559 KillingLocWrapper.UnderlyingObject, ScanLimit, WalkerStepLimit,
2560 isMemTerminatorInst(KillingLocWrapper.DefInst), PartialLimit,
2561 KillingLocWrapper.DefByInitializesAttr);
2563 if (!MaybeDeadAccess) {
2567 MemoryAccess *DeadAccess = *MaybeDeadAccess;
2568 LLVM_DEBUG(
dbgs() <<
" Checking if we can kill " << *DeadAccess);
2570 LLVM_DEBUG(
dbgs() <<
"\n ... adding incoming values to worklist\n");
2579 if (PostOrderNumbers[IncomingBlock] > PostOrderNumbers[PhiBlock])
2580 ToCheck.
insert(IncomingAccess);
2591 MemoryDefWrapper DeadDefWrapper(
2595 assert(DeadDefWrapper.DefinedLocations.size() == 1);
2596 MemoryLocationWrapper &DeadLocWrapper =
2597 DeadDefWrapper.DefinedLocations.front();
2600 NumGetDomMemoryDefPassed++;
2604 if (isMemTerminatorInst(KillingLocWrapper.DefInst)) {
2605 if (KillingLocWrapper.UnderlyingObject != DeadLocWrapper.UnderlyingObject)
2608 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2609 << *KillingLocWrapper.DefInst <<
'\n');
2615 int64_t KillingOffset = 0;
2616 int64_t DeadOffset = 0;
2617 OverwriteResult
OR =
2618 isOverwrite(KillingLocWrapper.DefInst, DeadLocWrapper.DefInst,
2619 KillingLocWrapper.MemLoc, DeadLocWrapper.MemLoc,
2620 KillingOffset, DeadOffset);
2621 if (OR == OW_MaybePartial) {
2622 auto &IOL = IOLs[DeadLocWrapper.DefInst->
getParent()];
2624 KillingOffset, DeadOffset,
2625 DeadLocWrapper.DefInst, IOL);
2633 if (DeadSI && KillingSI && DT.
dominates(DeadSI, KillingSI)) {
2635 KillingSI, DeadSI, KillingOffset, DeadOffset,
DL, BatchAA,
2639 DeadSI->setOperand(0, Merged);
2640 ++NumModifiedStores;
2642 DeletedKillingLoc =
true;
2647 auto I = IOLs.find(DeadSI->getParent());
2648 if (
I != IOLs.end())
2649 I->second.erase(DeadSI);
2654 if (OR == OW_Complete) {
2656 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2657 << *KillingLocWrapper.DefInst <<
'\n');
2665 assert(SkipStores.size() - OrigNumSkipStores ==
Deleted.size() &&
2666 "SkipStores and Deleted out of sync?");
2668 return {
Changed, DeletedKillingLoc};
2671bool DSEState::eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper) {
2672 if (KillingDefWrapper.DefinedLocations.empty()) {
2673 LLVM_DEBUG(
dbgs() <<
"Failed to find analyzable write location for "
2674 << *KillingDefWrapper.DefInst <<
"\n");
2678 bool MadeChange =
false;
2679 for (
auto &KillingLocWrapper : KillingDefWrapper.DefinedLocations) {
2681 << *KillingLocWrapper.MemDef <<
" ("
2682 << *KillingLocWrapper.DefInst <<
")\n");
2683 auto [
Changed, DeletedKillingLoc] = eliminateDeadDefs(KillingLocWrapper);
2687 if (!DeletedKillingLoc && storeIsNoop(KillingLocWrapper.MemDef,
2688 KillingLocWrapper.UnderlyingObject)) {
2690 << *KillingLocWrapper.DefInst <<
'\n');
2692 NumRedundantStores++;
2697 if (!DeletedKillingLoc &&
2698 tryFoldIntoCalloc(KillingLocWrapper.MemDef,
2699 KillingLocWrapper.UnderlyingObject)) {
2700 LLVM_DEBUG(
dbgs() <<
"DSE: Remove memset after forming calloc:\n"
2701 <<
" DEAD: " << *KillingLocWrapper.DefInst <<
'\n');
2714 bool MadeChange =
false;
2715 DSEState State(
F,
AA, MSSA, DT, PDT, TLI, LI);
2717 for (
unsigned I = 0;
I < State.MemDefs.size();
I++) {
2719 if (State.SkipStores.count(KillingDef))
2722 MemoryDefWrapper KillingDefWrapper(
2723 KillingDef, State.getLocForInst(KillingDef->
getMemoryInst(),
2725 MadeChange |= State.eliminateDeadDefs(KillingDefWrapper);
2729 for (
auto &KV : State.IOLs)
2730 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2732 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2733 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2735 while (!State.ToRemove.empty()) {
2736 Instruction *DeadInst = State.ToRemove.pop_back_val();
2756#ifdef LLVM_ENABLE_STATS
2784 if (skipFunction(
F))
2787 AliasAnalysis &
AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2788 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2790 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
2791 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2793 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2794 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2798#ifdef LLVM_ENABLE_STATS
2807 void getAnalysisUsage(AnalysisUsage &AU)
const override {
2826char DSELegacyPass::ID = 0;
2843 return new DSELegacyPass();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
MapVector< Instruction *, OverlapIntervalsTy > InstOverlapIntervalsTy
static bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller)
static cl::opt< bool > EnableInitializesImprovement("enable-dse-initializes-attr-improvement", cl::init(true), cl::Hidden, cl::desc("Enable the initializes attr improvement in DSE"))
static void shortenAssignment(Instruction *Inst, Value *OriginalDest, uint64_t OldOffsetInBits, uint64_t OldSizeInBits, uint64_t NewSizeInBits, bool IsOverwriteEnd)
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
static bool isNoopIntrinsic(Instruction *I)
static ConstantRangeList getIntersectedInitRangeList(ArrayRef< ArgumentInitInfo > Args, bool CallHasNoUnwindAttr)
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
static bool tryToShortenBegin(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
std::map< int64_t, int64_t > OverlapIntervalsTy
static void pushMemUses(MemoryAccess *Acc, SmallVectorImpl< MemoryAccess * > &WorkList, SmallPtrSetImpl< MemoryAccess * > &Visited)
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static cl::opt< unsigned > MemorySSADefsPerBlockLimit("dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, cl::desc("The number of MemoryDefs we consider as candidates to eliminated " "other stores per basic block (default = 5000)"))
static Constant * tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, int64_t KillingOffset, int64_t DeadOffset, const DataLayout &DL, BatchAAResults &AA, DominatorTree *DT)
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, BatchAAResults &AA, const DataLayout &DL, DominatorTree *DT)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, const Instruction *DeadI, BatchAAResults &AA)
Check if two instruction are masked stores that completely overwrite one another.
static cl::opt< unsigned > MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), cl::Hidden, cl::desc("The cost of a step in a different basic " "block than the killing MemoryDef" "(default = 5)"))
static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, uint64_t &DeadSize, int64_t KillingStart, uint64_t KillingSize, bool IsOverwriteEnd)
static cl::opt< unsigned > MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, cl::desc("The number of memory instructions to scan for " "dead store elimination (default = 150)"))
static bool isFuncLocalAndNotCaptured(Value *Arg, const CallBase *CB, EarliestEscapeAnalysis &EA)
static cl::opt< unsigned > MemorySSASameBBStepCost("dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, cl::desc("The cost of a step in the same basic block as the killing MemoryDef" "(default = 1)"))
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, const MemoryLocation &DeadLoc, int64_t KillingOff, int64_t DeadOff, Instruction *DeadI, InstOverlapIntervalsTy &IOL)
Return 'OW_Complete' if a store to the 'KillingLoc' location completely overwrites a store to the 'De...
static cl::opt< unsigned > MemorySSAPartialStoreLimit("dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, cl::desc("The maximum number candidates that only partially overwrite the " "killing MemoryDef to consider" " (default = 5)"))
static std::optional< TypeSize > getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, const Function *F)
static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
static void adjustArgAttributes(AnyMemIntrinsic *Intrinsic, unsigned ArgNo, uint64_t PtrOffset)
Update the attributes given that a memory access is updated (the dereferenced pointer could be moved ...
static cl::opt< unsigned > MemorySSAUpwardsStepLimit("dse-memoryssa-walklimit", cl::init(90), cl::Hidden, cl::desc("The maximum number of steps while walking upwards to find " "MemoryDefs that may be killed (default = 90)"))
static cl::opt< bool > OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, cl::desc("Allow DSE to optimize memory accesses."))
static bool hasInitializesAttr(Instruction *I)
static cl::opt< unsigned > MemorySSAPathCheckLimit("dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, cl::desc("The maximum number of blocks to check when trying to prove that " "all paths to an exit go through a killing block (default = 50)"))
static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, PostDominatorTree &PDT, const TargetLibraryInfo &TLI, const LoopInfo &LI)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
early cse Early CSE w MemorySSA
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
static void deleteDeadInstruction(Instruction *I)
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
int64_t getSExtValue() const
Get sign extended value.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
constexpr int32_t getOffset() const
constexpr bool hasOffset() const
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
An immutable pass that tracks lazily created AssumptionCache objects.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM_ABI ArrayRef< ConstantRange > getValueAsConstantRangeList() const
Return the attribute's value as a ConstantRange array.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
unsigned arg_size() const
This class represents a list of constant ranges.
bool empty() const
Return true if this list contains no members.
LLVM_ABI ConstantRangeList intersectWith(const ConstantRangeList &CRL) const
Return the range list that results from the intersection of this ConstantRangeList with another Const...
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
This is an important base class in LLVM.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static DIAssignID * getDistinct(LLVMContext &Context)
DbgVariableFragmentInfo FragmentInfo
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
iterator_range< root_iterator > roots()
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Context-sensitive CaptureAnalysis provider, which computes and caches the earliest common dominator c...
void removeInstruction(Instruction *I)
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt) override
Return how Object may be captured before instruction I, considering only provenance captures.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
bool isEquality() const
Return true if this predicate is either EQ or NE.
LLVM_ABI bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
const_iterator begin() const
bool empty() const
empty - Return true when no intervals are mapped.
const_iterator end() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This class implements a map that also provides access to all stored values in a deterministic order.
Value * getLength() const
BasicBlock * getBlock() const
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void setOptimized(MemoryAccess *MA)
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
MemoryLocation getWithNewPtr(const Value *NewPtr) const
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An analysis that produces MemorySSA for a function.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
PHITransAddr - An address value which tracks and handles phi translation.
LLVM_ABI Value * translateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate)
translateValue - PHI translate the current address up the CFG from CurBB to Pred, updating our state ...
LLVM_ABI bool isPotentiallyPHITranslatable() const
isPotentiallyPHITranslatable - If this needs PHI translation, return true if we have some hope of doi...
bool needsPHITranslationFromBlock(BasicBlock *BB) const
needsPHITranslationFromBlock - Return true if moving from the specified BasicBlock to its predecessor...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
LLVM_ABI bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
size_type size() const
Determine the number of elements in the SetVector.
void insert_range(Range &&R)
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
constexpr bool empty() const
empty - Check if the string is empty.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ BasicBlock
Various leaf nodes.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
LLVM_ABI bool calculateFragmentIntersect(const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits, uint64_t SliceSizeInBits, const DbgVariableRecord *DVRAssign, std::optional< DIExpression::FragmentInfo > &Result)
Calculate the fragment of the variable in DAI covered from (Dest + SliceOffsetInBits) to to (Dest + S...
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI void initializeDSELegacyPassPass(PassRegistry &)
FunctionAddr VTableAddr Value
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< po_iterator< T > > post_order(const T &G)
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
DomTreeNodeBase< BasicBlock > DomTreeNode
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionAddr VTableAddr Count
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
LLVM_ABI Value * emitCalloc(Value *Num, Value *Size, IRBuilderBase &B, const TargetLibraryInfo &TLI, unsigned AddrSpace)
Emit a call to the calloc function.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI FunctionPass * createDeadStoreEliminationPass()
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
auto predecessors(const MachineBasicBlock *BB)
bool capturesAnything(CaptureComponents CC)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
bool capturesNothing(CaptureComponents CC)
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice,...
bool isRefSet(const ModRefInfo MRI)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.