30#include "llvm/Config/llvm-config.h"
61#define DEBUG_TYPE "memoryssa"
76 "
memssa-check-limit", cl::Hidden, cl::init(100),
78 "will consider trying to walk past (default = 100)"));
81#ifdef EXPENSIVE_CHECKS
101 MemorySSAAnnotatedWriter(
const MemorySSA *M) : MSSA(M) {}
106 OS <<
"; " << *MA <<
"\n";
112 OS <<
"; " << *MA <<
"\n";
124 MemorySSAWalkerAnnotatedWriter(
MemorySSA *M)
125 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
130 OS <<
"; " << *MA <<
"\n";
139 OS <<
" - clobbered by ";
159class MemoryLocOrCall {
164 : MemoryLocOrCall(MUD->getMemoryInst()) {}
166 : MemoryLocOrCall(MUD->getMemoryInst()) {}
169 if (
auto *
C = dyn_cast<CallBase>(Inst)) {
176 if (!isa<FenceInst>(Inst))
194 if (IsCall !=
Other.IsCall)
198 return Loc ==
Other.Loc;
200 if (
Call->getCalledOperand() !=
Other.Call->getCalledOperand())
203 return Call->arg_size() ==
Other.Call->arg_size() &&
204 std::equal(
Call->arg_begin(),
Call->arg_end(),
205 Other.Call->arg_begin());
236 MLOC.getCall()->getCalledOperand()));
238 for (
const Value *Arg : MLOC.getCall()->args())
243 static bool isEqual(
const MemoryLocOrCall &LHS,
const MemoryLocOrCall &RHS) {
258 bool VolatileUse =
Use->isVolatile();
259 bool VolatileClobber = MayClobber->
isVolatile();
261 if (VolatileUse && VolatileClobber)
274 bool SeqCstUse =
Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
276 AtomicOrdering::Acquire);
277 return !(SeqCstUse || MayClobberIsAcquire);
280template <
typename AliasAnalysisType>
283 const Instruction *UseInst, AliasAnalysisType &AA) {
285 assert(DefInst &&
"Defining instruction not actually an instruction");
295 switch (
II->getIntrinsicID()) {
296 case Intrinsic::allow_runtime_check:
297 case Intrinsic::allow_ubsan_check:
298 case Intrinsic::invariant_start:
299 case Intrinsic::invariant_end:
300 case Intrinsic::assume:
301 case Intrinsic::experimental_noalias_scope_decl:
302 case Intrinsic::pseudoprobe:
304 case Intrinsic::dbg_declare:
305 case Intrinsic::dbg_label:
306 case Intrinsic::dbg_value:
313 if (
auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
318 if (
auto *DefLoad = dyn_cast<LoadInst>(DefInst))
319 if (
auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
326template <
typename AliasAnalysisType>
328 const MemoryLocOrCall &UseMLOC,
329 AliasAnalysisType &AA) {
347struct UpwardsMemoryQuery {
357 bool SkipSelfAccess =
false;
359 UpwardsMemoryQuery() =
default;
370template <
typename AliasAnalysisType>
375 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
376 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
400 bool AllowImpreciseClobber =
false) {
401 assert(MSSA.
dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?");
405 "liveOnEntry must clobber itself");
409 bool FoundClobber =
false;
415 while (!Worklist.
empty()) {
423 if (MA == ClobberAt) {
424 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
442 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
448 "Found clobber before reaching ClobberAt!");
452 if (
const auto *MU = dyn_cast<MemoryUse>(MA)) {
455 "Can only find use in def chain if Start is a use");
459 assert(isa<MemoryPhi>(MA));
477 if (AllowImpreciseClobber)
482 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
483 "ClobberAt never acted as a clobber");
502 std::optional<ListIndex> Previous;
505 std::optional<ListIndex> Previous)
509 std::optional<ListIndex> Previous)
510 : DefPath(Loc,
Init,
Init, Previous) {}
516 UpwardsMemoryQuery *Query;
517 unsigned *UpwardWalkLimit;
528 assert(
From->getNumOperands() &&
"Phi with no operands?");
542 struct UpwardsWalkResult {
557 assert(!isa<MemoryUse>(
Desc.Last) &&
"Uses don't exist in my world");
558 assert(UpwardWalkLimit &&
"Need a valid walk limit");
559 bool LimitAlreadyReached =
false;
564 if (!*UpwardWalkLimit) {
565 *UpwardWalkLimit = 1;
566 LimitAlreadyReached =
true;
571 if (Current == StopAt || Current == SkipStopAt)
572 return {Current,
false};
574 if (
auto *MD = dyn_cast<MemoryDef>(Current)) {
578 if (!--*UpwardWalkLimit)
579 return {Current,
true};
586 if (LimitAlreadyReached)
587 *UpwardWalkLimit = 0;
590 "Ended at a non-clobber that's not a phi?");
591 return {
Desc.Last,
false};
595 ListIndex PriorNode) {
607 struct TerminatedPath {
621 std::optional<TerminatedPath>
626 assert(!PausedSearches.
empty() &&
"No searches to continue?");
630 while (!PausedSearches.
empty()) {
632 DefPath &
Node = Paths[PathIndex];
652 if (!VisitedPhis.
insert({Node.Last, Node.Loc}).second)
656 if (Query->SkipSelfAccess &&
Node.Loc == Query->StartingLoc) {
657 assert(isa<MemoryDef>(Query->OriginalAccess));
658 SkipStopWhere = Query->OriginalAccess;
661 UpwardsWalkResult Res = walkToPhiOrClobber(
Node,
664 if (Res.IsKnownClobber) {
665 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
669 TerminatedPath
Term{Res.Result, PathIndex};
670 if (!MSSA.
dominates(Res.Result, StopWhere))
678 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
683 if (Res.Result != SkipStopWhere)
689 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
695 template <
typename T,
typename Walker>
696 struct generic_def_path_iterator
698 std::forward_iterator_tag, T *> {
699 generic_def_path_iterator() =
default;
700 generic_def_path_iterator(Walker *W, ListIndex
N) :
W(
W),
N(
N) {}
704 generic_def_path_iterator &operator++() {
705 N = curNode().Previous;
709 bool operator==(
const generic_def_path_iterator &O)
const {
710 if (
N.has_value() !=
O.N.has_value())
712 return !
N || *
N == *
O.N;
716 T &curNode()
const {
return W->Paths[*
N]; }
719 std::optional<ListIndex>
N;
722 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
723 using const_def_path_iterator =
724 generic_def_path_iterator<const DefPath, const ClobberWalker>;
727 return make_range(def_path_iterator(
this,
From), def_path_iterator());
732 const_def_path_iterator());
737 TerminatedPath PrimaryClobber;
743 ListIndex defPathIndex(
const DefPath &
N)
const {
745 const DefPath *NP = &
N;
747 "Out of bounds DefPath!");
748 return NP - &Paths.
front();
767 "Reset the optimization state.");
772 auto PriorPathsSize = Paths.
size();
778 addSearches(Phi, PausedSearches, 0);
784 auto Dom = Paths.
begin();
785 for (
auto I = std::next(Dom),
E = Paths.
end();
I !=
E; ++
I)
786 if (!MSSA.
dominates(
I->Clobber, Dom->Clobber))
790 std::iter_swap(
Last, Dom);
796 "liveOnEntry wasn't treated as a clobber?");
798 const auto *
Target = getWalkTarget(Current);
801 assert(
all_of(TerminatedPaths, [&](
const TerminatedPath &
P) {
808 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
809 Target, PausedSearches, NewPaused, TerminatedPaths)) {
813 auto Iter =
find_if(def_path(Blocker->LastNode), [&](
const DefPath &
N) {
814 return defPathIndex(N) < PriorPathsSize;
816 assert(Iter != def_path_iterator());
818 DefPath &CurNode = *Iter;
819 assert(CurNode.Last == Current);
846 TerminatedPath
Result{CurNode.Last, defPathIndex(CurNode)};
853 if (NewPaused.
empty()) {
854 MoveDominatedPathToEnd(TerminatedPaths);
856 return {
Result, std::move(TerminatedPaths)};
861 for (ListIndex Paused : NewPaused) {
862 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
863 if (WR.IsKnownClobber)
867 DefChainEnd = WR.Result;
870 if (!TerminatedPaths.
empty()) {
876 assert(DefChainEnd &&
"Failed to find dominating phi/liveOnEntry");
881 for (
const TerminatedPath &TP : TerminatedPaths) {
884 if (DT.
dominates(ChainBB, TP.Clobber->getBlock()))
891 if (!Clobbers.
empty()) {
892 MoveDominatedPathToEnd(Clobbers);
894 return {
Result, std::move(Clobbers)};
898 [&](ListIndex
I) {
return Paths[
I].Last == DefChainEnd; }));
901 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
903 PriorPathsSize = Paths.
size();
904 PausedSearches.
clear();
905 for (ListIndex
I : NewPaused)
906 addSearches(DefChainPhi, PausedSearches,
I);
909 Current = DefChainPhi;
913 void verifyOptResult(
const OptznResult &R)
const {
915 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
919 void resetPhiOptznState() {
926 : MSSA(MSSA), DT(DT) {}
931 UpwardsMemoryQuery &Q,
unsigned &UpWalkLimit) {
934 UpwardWalkLimit = &UpWalkLimit;
942 if (
auto *MU = dyn_cast<MemoryUse>(Start))
943 Current = MU->getDefiningAccess();
945 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
948 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
950 if (WalkResult.IsKnownClobber) {
951 Result = WalkResult.Result;
953 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
954 Current, Q.StartingLoc);
955 verifyOptResult(OptRes);
956 resetPhiOptznState();
957 Result = OptRes.PrimaryClobber.Clobber;
960#ifdef EXPENSIVE_CHECKS
961 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
968struct RenamePassData {
975 : DTN(
D), ChildIt(It), IncomingVal(
M) {}
977 void swap(RenamePassData &
RHS) {
989 ClobberWalker Walker;
1006 bool UseInvariantGroup =
true);
1024 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false);
1029 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1034 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false,
false);
1050 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1051 MUD->resetOptimized();
1067 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
true);
1072 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1088 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1089 MUD->resetOptimized();
1096 bool RenameAllUses) {
1099 auto It = PerBlockAccesses.
find(S);
1101 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1104 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1105 if (RenameAllUses) {
1106 bool ReplacementDone =
false;
1107 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I)
1108 if (Phi->getIncomingBlock(
I) == BB) {
1109 Phi->setIncomingValue(
I, IncomingVal);
1110 ReplacementDone =
true;
1112 (void) ReplacementDone;
1113 assert(ReplacementDone &&
"Incomplete phi during partial rename");
1115 Phi->addIncoming(IncomingVal, BB);
1123 bool RenameAllUses) {
1124 auto It = PerBlockAccesses.
find(BB);
1126 if (It != PerBlockAccesses.
end()) {
1130 if (MUD->getDefiningAccess() ==
nullptr || RenameAllUses)
1131 MUD->setDefiningAccess(IncomingVal);
1132 if (isa<MemoryDef>(&L))
1148 bool SkipVisited,
bool RenameAllUses) {
1149 assert(Root &&
"Trying to rename accesses in an unreachable block");
1156 if (SkipVisited && AlreadyVisited)
1159 IncomingVal = renameBlock(Root->
getBlock(), IncomingVal, RenameAllUses);
1160 renameSuccessorPhis(Root->
getBlock(), IncomingVal, RenameAllUses);
1163 while (!WorkStack.
empty()) {
1166 IncomingVal = WorkStack.
back().IncomingVal;
1168 if (ChildIt ==
Node->end()) {
1172 ++WorkStack.
back().ChildIt;
1176 AlreadyVisited = !Visited.
insert(BB).second;
1177 if (SkipVisited && AlreadyVisited) {
1184 IncomingVal = &*BlockDefs->rbegin();
1186 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1187 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1196void MemorySSA::markUnreachableAsLiveOnEntry(
BasicBlock *BB) {
1198 "Reachable block found while handling unreachable blocks");
1207 auto It = PerBlockAccesses.
find(S);
1209 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1212 auto *
Phi = cast<MemoryPhi>(&Accesses->front());
1213 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1216 auto It = PerBlockAccesses.
find(BB);
1217 if (It == PerBlockAccesses.
end())
1220 auto &Accesses = It->second;
1221 for (
auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1222 auto Next = std::next(AI);
1225 if (
auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1226 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1228 Accesses->erase(AI);
1234 : DT(DT),
F(&Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1235 SkipWalker(nullptr) {
1241 assert(AA &&
"No alias analysis?");
1252 : DT(DT), L(&L), LiveOnEntryDef(nullptr), Walker(nullptr),
1253 SkipWalker(nullptr) {
1259 assert(AA &&
"No alias analysis?");
1263 return *const_cast<BasicBlock *>(BB);
1274 for (
const auto &Pair : PerBlockAccesses)
1280 auto Res = PerBlockAccesses.
insert(std::make_pair(BB,
nullptr));
1283 Res.first->second = std::make_unique<AccessList>();
1284 return Res.first->second.get();
1288 auto Res = PerBlockDefs.
insert(std::make_pair(BB,
nullptr));
1291 Res.first->second = std::make_unique<DefsList>();
1292 return Res.first->second.get();
1308 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1314 struct MemlocStackInfo {
1317 unsigned long StackEpoch;
1318 unsigned long PopEpoch;
1323 unsigned long LowerBound;
1326 unsigned long LastKill;
1330 void optimizeUsesInBlock(
const BasicBlock *,
unsigned long &,
unsigned long &,
1335 CachingWalker *Walker;
1354void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1355 const BasicBlock *BB,
unsigned long &StackEpoch,
unsigned long &PopEpoch,
1361 if (Accesses ==
nullptr)
1368 !VersionStack.
empty() &&
1369 "Version stack should have liveOnEntry sentinel dominating everything");
1373 while (VersionStack.
back()->getBlock() == BackBlock)
1379 auto *MU = dyn_cast<MemoryUse>(&MA);
1386 if (MU->isOptimized())
1389 MemoryLocOrCall UseMLOC(MU);
1390 auto &LocInfo = LocStackInfo[UseMLOC];
1394 if (LocInfo.PopEpoch != PopEpoch) {
1395 LocInfo.PopEpoch = PopEpoch;
1396 LocInfo.StackEpoch = StackEpoch;
1408 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1409 !DT->
dominates(LocInfo.LowerBoundBlock, BB)) {
1413 LocInfo.LowerBound = 0;
1414 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1415 LocInfo.LastKillValid =
false;
1417 }
else if (LocInfo.StackEpoch != StackEpoch) {
1421 LocInfo.PopEpoch = PopEpoch;
1422 LocInfo.StackEpoch = StackEpoch;
1424 if (!LocInfo.LastKillValid) {
1425 LocInfo.LastKill = VersionStack.
size() - 1;
1426 LocInfo.LastKillValid =
true;
1431 assert(LocInfo.LowerBound < VersionStack.
size() &&
1432 "Lower bound out of range");
1433 assert(LocInfo.LastKill < VersionStack.
size() &&
1434 "Last kill info out of range");
1436 unsigned long UpperBound = VersionStack.
size() - 1;
1439 LLVM_DEBUG(
dbgs() <<
"MemorySSA skipping optimization of " << *MU <<
" ("
1440 << *(MU->getMemoryInst()) <<
")"
1441 <<
" because there are "
1442 << UpperBound - LocInfo.LowerBound
1443 <<
" stores to disambiguate\n");
1446 LocInfo.LastKillValid =
false;
1449 bool FoundClobberResult =
false;
1451 while (UpperBound > LocInfo.LowerBound) {
1452 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1458 MU, *AA, UpwardWalkLimit);
1460 while (VersionStack[UpperBound] != Result) {
1464 FoundClobberResult =
true;
1468 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1470 FoundClobberResult =
true;
1478 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1479 MU->setDefiningAccess(VersionStack[UpperBound],
true);
1480 LocInfo.LastKill = UpperBound;
1484 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],
true);
1486 LocInfo.LowerBound = VersionStack.
size() - 1;
1487 LocInfo.LowerBoundBlock = BB;
1497 unsigned long StackEpoch = 1;
1498 unsigned long PopEpoch = 1;
1501 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1505void MemorySSA::placePHINodes(
1509 IDFs.setDefiningBlocks(DefiningBlocks);
1511 IDFs.calculate(IDFBlocks);
1514 for (
auto &BB : IDFBlocks)
1515 createMemoryPhi(BB);
1518template <
typename IterT>
1528 nullptr, &StartingPoint, NextID++));
1537 bool InsertIntoDef =
false;
1546 Accesses = getOrCreateAccessList(&
B);
1547 Accesses->push_back(MUD);
1548 if (isa<MemoryDef>(MUD)) {
1549 InsertIntoDef =
true;
1551 Defs = getOrCreateDefsList(&
B);
1552 Defs->push_back(*MUD);
1558 placePHINodes(DefiningBlocks);
1569 U.set(LiveOnEntryDef.get());
1586 if (!Visited.
count(&BB))
1587 markUnreachableAsLiveOnEntry(&BB);
1594 return Walker.get();
1597 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1599 Walker = std::make_unique<CachingWalker>(
this, WalkerBase.get());
1600 return Walker.get();
1605 return SkipWalker.get();
1608 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1610 SkipWalker = std::make_unique<SkipSelfWalker>(
this, WalkerBase.get());
1611 return SkipWalker.get();
1621 auto *Accesses = getOrCreateAccessList(BB);
1625 if (isa<MemoryPhi>(NewAccess)) {
1626 Accesses->push_front(NewAccess);
1627 auto *Defs = getOrCreateDefsList(BB);
1628 Defs->push_front(*NewAccess);
1631 *Accesses, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1632 Accesses->insert(AI, NewAccess);
1633 if (!isa<MemoryUse>(NewAccess)) {
1634 auto *Defs = getOrCreateDefsList(BB);
1636 *Defs, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1637 Defs->insert(DI, *NewAccess);
1641 Accesses->push_back(NewAccess);
1642 if (!isa<MemoryUse>(NewAccess)) {
1643 auto *Defs = getOrCreateDefsList(BB);
1644 Defs->push_back(*NewAccess);
1647 BlockNumberingValid.erase(BB);
1653 bool WasEnd = InsertPt == Accesses->end();
1655 if (!isa<MemoryUse>(What)) {
1656 auto *Defs = getOrCreateDefsList(BB);
1662 Defs->push_back(*What);
1663 }
else if (isa<MemoryDef>(InsertPt)) {
1664 Defs->insert(InsertPt->getDefsIterator(), *What);
1666 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1669 if (InsertPt == Accesses->end())
1670 Defs->push_back(*What);
1672 Defs->insert(InsertPt->getDefsIterator(), *What);
1675 BlockNumberingValid.erase(BB);
1685 if (
auto *MD = dyn_cast<MemoryDef>(What))
1696 prepareForMoveTo(What, BB);
1702 if (isa<MemoryPhi>(What)) {
1704 "Can only move a Phi at the beginning of the block");
1706 ValueToMemoryAccess.erase(What->
getBlock());
1707 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1709 assert(Inserted &&
"Cannot move a Phi to a block that already has one");
1712 prepareForMoveTo(What, BB);
1721 ValueToMemoryAccess[BB] = Phi;
1728 bool CreationMustSucceed) {
1729 assert(!isa<PHINode>(
I) &&
"Cannot create a defined access for a PHI");
1731 if (CreationMustSucceed)
1732 assert(NewAccess !=
nullptr &&
"Tried to create a memory access for a "
1733 "non-memory touching instruction");
1735 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1736 "A use cannot be a defining access");
1746 if (
auto *SI = dyn_cast<StoreInst>(
I)) {
1747 if (!SI->isUnordered())
1749 }
else if (
auto *LI = dyn_cast<LoadInst>(
I)) {
1750 if (!LI->isUnordered())
1757template <
typename AliasAnalysisType>
1759 AliasAnalysisType *AAP,
1768 switch (
II->getIntrinsicID()) {
1771 case Intrinsic::allow_runtime_check:
1772 case Intrinsic::allow_ubsan_check:
1773 case Intrinsic::assume:
1774 case Intrinsic::experimental_noalias_scope_decl:
1775 case Intrinsic::pseudoprobe:
1783 if (!
I->mayReadFromMemory() && !
I->mayWriteToMemory())
1792 bool DefCheck, UseCheck;
1797 assert((Def == DefCheck || !DefCheck) &&
1798 "Memory accesses should only be reduced");
1799 if (!Def &&
Use != UseCheck) {
1801 assert(!UseCheck &&
"Invalid template");
1826 MUD =
new MemoryDef(
I->getContext(),
nullptr,
I,
I->getParent(), NextID++);
1828 MUD =
new MemoryUse(
I->getContext(),
nullptr,
I,
I->getParent());
1834 ValueToMemoryAccess[
I] = MUD;
1841 "Trying to remove memory access that still has uses");
1842 BlockNumbering.erase(MA);
1843 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1846 if (!isa<MemoryUse>(MA))
1850 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1855 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1856 if (VMA->second == MA)
1857 ValueToMemoryAccess.erase(VMA);
1870 if (!isa<MemoryUse>(MA)) {
1871 auto DefsIt = PerBlockDefs.
find(BB);
1872 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1875 PerBlockDefs.
erase(DefsIt);
1880 auto AccessIt = PerBlockAccesses.
find(BB);
1881 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1883 Accesses->erase(MA);
1885 Accesses->remove(MA);
1887 if (Accesses->empty()) {
1888 PerBlockAccesses.
erase(AccessIt);
1889 BlockNumberingValid.erase(BB);
1894 MemorySSAAnnotatedWriter Writer(
this);
1898 F->print(
OS, &Writer);
1901#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1906#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1918 assert(L &&
"must either have loop or function");
1921 return *const_cast<BasicBlock *>(BB);
1941template <
typename IterT>
1945 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I) {
1946 auto *Pred = Phi->getIncomingBlock(
I);
1947 auto *IncAcc = Phi->getIncomingValue(
I);
1951 if (
auto *DTNode = DT->
getNode(Pred)) {
1953 if (
auto *DefList =
getBlockDefs(DTNode->getBlock())) {
1954 auto *LastAcc = &*(--DefList->end());
1955 assert(LastAcc == IncAcc &&
1956 "Incorrect incoming access into phi.");
1961 DTNode = DTNode->getIDom();
1978template <
typename IterT>
1980 if (BlockNumberingValid.empty())
1985 if (!ValidBlocks.
count(&BB))
1988 ValidBlocks.
erase(&BB);
1996 unsigned long LastNumber = 0;
1998 auto ThisNumberIter = BlockNumbering.find(&MA);
1999 assert(ThisNumberIter != BlockNumbering.end() &&
2000 "MemoryAccess has no domination number in a valid block!");
2002 unsigned long ThisNumber = ThisNumberIter->second;
2003 assert(ThisNumber > LastNumber &&
2004 "Domination numbers should be strictly increasing!");
2006 LastNumber = ThisNumber;
2011 "All valid BasicBlocks should exist in F -- dangling pointers?");
2020template <
typename IterT>
2037 for (
const Use &U : Phi->uses()) {
2038 assert(
dominates(Phi, U) &&
"Memory PHI does not dominate it's uses");
2044 "Incomplete MemoryPhi Node");
2045 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I) {
2046 verifyUseInDefs(Phi->getIncomingValue(
I), Phi);
2048 "Incoming phi block not a block predecessor");
2055 assert((!MA || (AL && (isa<MemoryUse>(MA) ||
DL))) &&
2056 "We have memory affecting instructions "
2057 "in this block but they are not in the "
2058 "access list or defs list");
2066 for (
const Use &U : MD->
uses()) {
2068 "Memory Def does not dominate it's uses");
2082 assert(AL->size() == ActualAccesses.
size() &&
2083 "We don't have the same number of accesses in the block as on the "
2086 "Either we should have a defs list, or we should have no defs");
2088 "We don't have the same number of defs in the block as on the "
2090 auto ALI = AL->begin();
2091 auto AAI = ActualAccesses.
begin();
2092 while (ALI != AL->end() && AAI != ActualAccesses.
end()) {
2093 assert(&*ALI == *AAI &&
"Not the same accesses in the same order");
2097 ActualAccesses.
clear();
2100 auto ADI = ActualDefs.
begin();
2101 while (DLI !=
DL->
end() && ADI != ActualDefs.
end()) {
2102 assert(&*DLI == *ADI &&
"Not the same defs in the same order");
2117 "Null def but use not point to live on entry def");
2120 "Did not find use in def's use list");
2129void MemorySSA::renumberBlock(
const BasicBlock *
B)
const {
2131 unsigned long CurrentNumber = 0;
2133 assert(AL !=
nullptr &&
"Asking to renumber an empty block");
2134 for (
const auto &
I : *AL)
2135 BlockNumbering[&
I] = ++CurrentNumber;
2136 BlockNumberingValid.insert(
B);
2147 "Asking for local domination when accesses are in different blocks!");
2149 if (Dominatee == Dominator)
2162 if (!BlockNumberingValid.count(DominatorBlock))
2163 renumberBlock(DominatorBlock);
2165 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2167 assert(DominatorNum != 0 &&
"Block was not numbered properly");
2168 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2169 assert(DominateeNum != 0 &&
"Block was not numbered properly");
2170 return DominatorNum < DominateeNum;
2175 if (Dominator == Dominatee)
2187 const Use &Dominatee)
const {
2189 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2191 if (UseBB != Dominator->
getBlock())
2212 switch (getValueID()) {
2224 if (
A &&
A->getID())
2230 OS << getID() <<
" = MemoryDef(";
2234 if (isOptimized()) {
2236 printID(getOptimized());
2241 ListSeparator LS(
",");
2242 OS << getID() <<
" = MemoryPhi(";
2243 for (
const auto &
Op : operands()) {
2253 if (
unsigned ID = MA->
getID())
2265 if (UO && UO->
getID())
2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2283 MemorySSAAnnotatedWriter MSSAWriter;
2287 :
F(
F), MSSAWriter(&MSSA) {}
2290 MemorySSAAnnotatedWriter &
getWriter() {
return MSSAWriter; }
2333 [](std::string &S,
unsigned &
I,
unsigned Idx) ->
void {
2334 std::string Str = S.substr(
I,
Idx -
I);
2336 if (SR.
count(
" = MemoryDef(") || SR.
count(
" = MemoryPhi(") ||
2337 SR.
count(
"MemoryUse("))
2356 return getNodeLabel(Node, CFGInfo).find(
';') != std::string::npos
2357 ?
"style=filled, fillcolor=lightpink"
2385 if (EnsureOptimizedUses)
2391 OS <<
"MemorySSA for function: " <<
F.getName() <<
"\n";
2401 OS <<
"MemorySSA (walker) for function: " <<
F.getName() <<
"\n";
2402 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2403 F.print(
OS, &Writer);
2430 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2431 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2454 assert(!isa<MemoryUse>(StartingAccess) &&
"Use cannot be defining access");
2457 if (Loc.
Ptr ==
nullptr)
2458 return StartingAccess;
2461 if (
auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2463 return StartingUseOrDef;
2465 I = StartingUseOrDef->getMemoryInst();
2469 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2470 return StartingUseOrDef;
2473 UpwardsMemoryQuery Q;
2474 Q.OriginalAccess = StartingAccess;
2475 Q.StartingLoc = Loc;
2483 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2485 dbgs() <<
"Clobber starting at access " << *StartingAccess <<
"\n";
2487 dbgs() <<
" for instruction " << *
I <<
"\n";
2488 dbgs() <<
" is " << *Clobber <<
"\n";
2495 if (!
I.hasMetadata(LLVMContext::MD_invariant_group) ||
I.isVolatile())
2507 if (isa<Constant>(PointerOperand))
2512 for (
const User *Us : PointerOperand->
users()) {
2513 auto *U = dyn_cast<Instruction>(Us);
2514 if (!U || U == &
I || !DT.
dominates(U, MostDominatingInstruction))
2520 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2522 MostDominatingInstruction = U;
2526 return MostDominatingInstruction == &
I ? nullptr : MostDominatingInstruction;
2531 bool SkipSelf,
bool UseInvariantGroup) {
2532 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2534 if (!StartingAccess)
2537 if (UseInvariantGroup) {
2539 *StartingAccess->getMemoryInst(), MSSA->
getDomTree())) {
2540 assert(isa<LoadInst>(
I) || isa<StoreInst>(
I));
2544 if (isa<MemoryUse>(ClobberMA))
2545 return ClobberMA->getDefiningAccess();
2550 bool IsOptimized =
false;
2555 if (StartingAccess->isOptimized()) {
2556 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2557 return StartingAccess->getOptimized();
2561 const Instruction *
I = StartingAccess->getMemoryInst();
2565 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2566 return StartingAccess;
2568 UpwardsMemoryQuery Q(
I, StartingAccess);
2572 StartingAccess->setOptimized(LiveOnEntry);
2579 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2584 StartingAccess->setOptimized(DefiningAccess);
2585 return DefiningAccess;
2589 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2590 StartingAccess->setOptimized(OptimizedAccess);
2592 OptimizedAccess = StartingAccess->getOptimized();
2594 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *
I <<
" is ");
2596 LLVM_DEBUG(
dbgs() <<
"Optimized Memory SSA clobber for " << *
I <<
" is ");
2600 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2601 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2602 assert(isa<MemoryDef>(Q.OriginalAccess));
2603 Q.SkipSelfAccess =
true;
2604 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2606 Result = OptimizedAccess;
2608 LLVM_DEBUG(
dbgs() <<
"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2617 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(MA))
2618 return Use->getDefiningAccess();
2624 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2625 return Use->getDefiningAccess();
2626 return StartingAccess;
2641bool upward_defs_iterator::IsGuaranteedLoopInvariant(
const Value *
Ptr)
const {
2642 auto IsGuaranteedLoopInvariantBase = [](
const Value *
Ptr) {
2643 Ptr =
Ptr->stripPointerCasts();
2644 if (!isa<Instruction>(
Ptr))
2646 return isa<AllocaInst>(
Ptr);
2649 Ptr =
Ptr->stripPointerCasts();
2650 if (
auto *
I = dyn_cast<Instruction>(
Ptr)) {
2651 if (
I->getParent()->isEntryBlock())
2654 if (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
2655 return IsGuaranteedLoopInvariantBase(
GEP->getPointerOperand()) &&
2656 GEP->hasAllConstantIndices();
2658 return IsGuaranteedLoopInvariantBase(
Ptr);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
#define LLVM_ATTRIBUTE_UNUSED
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::optional< std::vector< StOtherPiece > > Other
DenseMap< Block *, BlockRelaxAux > Blocks
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
This file provides utility analysis objects describing memory locations.
static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
static const char LiveOnEntryStr[]
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
static bool isOrdered(const Instruction *I)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
const Function * getFunction()
MemorySSAAnnotatedWriter & getWriter()
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
LLVM Basic Block Representation.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const
Print the basic block to an output stream with an optional AssemblyAnnotationWriter.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVMContext & getContext() const
Get the context in which this basic block lives.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Extension point for the Value hierarchy.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Represents a single loop in the control flow graph.
BasicBlock * getBlock() const
void print(raw_ostream &OS) const
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void print(raw_ostream &OS) const
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
Represents phi nodes for memory accesses.
void print(raw_ostream &OS) const
An analysis that produces MemorySSA for a function.
Result run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This is the generic walker interface for walkers of MemorySSA.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
MemorySSAWalker(MemorySSA *)
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Legacy analysis pass which computes MemorySSA.
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
A MemorySSAWalker that does AA walks to disambiguate accesses.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
~CachingWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
ClobberWalkerBase(MemorySSA *M, DominatorTree *D)
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
~SkipSelfWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
MemorySSAWalker * getSkipSelfWalker()
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
void verifyOrderingDominationAndDefUses(IterT Blocks, VerificationLevel=VerificationLevel::Fast) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
MemorySSAWalker * getWalker()
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
DominatorTree & getDomTree() const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
void verifyDominationNumbers(IterT Blocks) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
void verifyPrevDefInPhis(IterT Blocks) const
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
void print(raw_ostream &) const
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
void setOptimized(MemoryAccess *)
Sets the optimized use for a MemoryDef.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Represents read-only accesses to memory.
void print(raw_ostream &OS) const
A Module instance is used to store all the information related to an LLVM module.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
size_t count(char C) const
Return the number of occurrences of C in the string.
Target - Wrapper for Target specific information.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
void dropAllReferences()
Drop all references to operands.
LLVM Value Representation.
iterator_range< user_iterator > users()
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
An opaque object representing a hash code.
base_list_type::iterator iterator
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A simple intrusive list implementation.
reverse_iterator rbegin()
This class provides various memory handling functions that manipulate MemoryBlock instances.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
NodeAddr< PhiNode * > Phi
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void initializeMemorySSAWrapperPassPass(PassRegistry &)
APInt operator*(APInt a, uint64_t RHS)
auto successors(const MachineBasicBlock *BB)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto pred_size(const MachineBasicBlock *BB)
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
bool isModSet(const ModRefInfo MRI)
auto find_if_not(R &&Range, UnaryPredicate P)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
bool isModOrRefSet(const ModRefInfo MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool VerifyMemorySSA
Enables verification of MemorySSA.
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
upward_defs_iterator upward_defs_end()
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool isRefSet(const ModRefInfo MRI)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
DOTGraphTraits(bool IsSimple=false)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Description of the encoding of one expression Op.
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
static MemoryLocOrCall getEmptyKey()
static MemoryLocOrCall getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
static size_t size(DOTFuncMSSAInfo *CFGInfo)
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)