File: | lib/Transforms/Utils/MemorySSA.cpp |
Location: | line 561, column 9 |
Description: | Function call argument is an uninitialized value |
1 | //===-- MemorySSA.cpp - Memory SSA Builder---------------------------===// | |||||
2 | // | |||||
3 | // The LLVM Compiler Infrastructure | |||||
4 | // | |||||
5 | // This file is distributed under the University of Illinois Open Source | |||||
6 | // License. See LICENSE.TXT for details. | |||||
7 | // | |||||
8 | //===----------------------------------------------------------------===// | |||||
9 | // | |||||
10 | // This file implements the MemorySSA class. | |||||
11 | // | |||||
12 | //===----------------------------------------------------------------===// | |||||
13 | #include "llvm/Transforms/Utils/MemorySSA.h" | |||||
14 | #include "llvm/ADT/DenseMap.h" | |||||
15 | #include "llvm/ADT/DenseSet.h" | |||||
16 | #include "llvm/ADT/DepthFirstIterator.h" | |||||
17 | #include "llvm/ADT/GraphTraits.h" | |||||
18 | #include "llvm/ADT/PostOrderIterator.h" | |||||
19 | #include "llvm/ADT/STLExtras.h" | |||||
20 | #include "llvm/ADT/SmallPtrSet.h" | |||||
21 | #include "llvm/ADT/SmallSet.h" | |||||
22 | #include "llvm/ADT/Statistic.h" | |||||
23 | #include "llvm/Analysis/AliasAnalysis.h" | |||||
24 | #include "llvm/Analysis/CFG.h" | |||||
25 | #include "llvm/Analysis/GlobalsModRef.h" | |||||
26 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | |||||
27 | #include "llvm/Analysis/MemoryLocation.h" | |||||
28 | #include "llvm/Analysis/PHITransAddr.h" | |||||
29 | #include "llvm/IR/AssemblyAnnotationWriter.h" | |||||
30 | #include "llvm/IR/DataLayout.h" | |||||
31 | #include "llvm/IR/Dominators.h" | |||||
32 | #include "llvm/IR/GlobalVariable.h" | |||||
33 | #include "llvm/IR/IRBuilder.h" | |||||
34 | #include "llvm/IR/IntrinsicInst.h" | |||||
35 | #include "llvm/IR/LLVMContext.h" | |||||
36 | #include "llvm/IR/Metadata.h" | |||||
37 | #include "llvm/IR/Module.h" | |||||
38 | #include "llvm/IR/PatternMatch.h" | |||||
39 | #include "llvm/Support/Debug.h" | |||||
40 | #include "llvm/Support/FormattedStream.h" | |||||
41 | #include "llvm/Transforms/Scalar.h" | |||||
42 | #include <algorithm> | |||||
43 | ||||||
44 | #define DEBUG_TYPE"memoryssa" "memoryssa" | |||||
45 | using namespace llvm; | |||||
46 | STATISTIC(NumClobberCacheLookups, "Number of Memory SSA version cache lookups")static llvm::Statistic NumClobberCacheLookups = { "memoryssa" , "Number of Memory SSA version cache lookups", 0, 0 }; | |||||
47 | STATISTIC(NumClobberCacheHits, "Number of Memory SSA version cache hits")static llvm::Statistic NumClobberCacheHits = { "memoryssa", "Number of Memory SSA version cache hits" , 0, 0 }; | |||||
48 | STATISTIC(NumClobberCacheInserts, "Number of MemorySSA version cache inserts")static llvm::Statistic NumClobberCacheInserts = { "memoryssa" , "Number of MemorySSA version cache inserts", 0, 0 }; | |||||
49 | INITIALIZE_PASS_WITH_OPTIONS_BEGIN(MemorySSAPrinterPass, "print-memoryssa",static void* initializeMemorySSAPrinterPassPassOnce(PassRegistry &Registry) { MemorySSAPrinterPass::registerOptions(); | |||||
50 | "Memory SSA", true, true)static void* initializeMemorySSAPrinterPassPassOnce(PassRegistry &Registry) { MemorySSAPrinterPass::registerOptions(); | |||||
51 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||||
52 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||||
53 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | |||||
54 | INITIALIZE_PASS_END(MemorySSAPrinterPass, "print-memoryssa", "Memory SSA", true,PassInfo *PI = new PassInfo("Memory SSA", "print-memoryssa", & MemorySSAPrinterPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor < MemorySSAPrinterPass >), true, true); Registry.registerPass (*PI, true); return PI; } void llvm::initializeMemorySSAPrinterPassPass (PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeMemorySSAPrinterPassPassOnce (Registry); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||||
55 | true)PassInfo *PI = new PassInfo("Memory SSA", "print-memoryssa", & MemorySSAPrinterPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor < MemorySSAPrinterPass >), true, true); Registry.registerPass (*PI, true); return PI; } void llvm::initializeMemorySSAPrinterPassPass (PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeMemorySSAPrinterPassPassOnce (Registry); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||||
56 | INITIALIZE_PASS(MemorySSALazy, "memoryssalazy", "Memory SSA", true, true)static void* initializeMemorySSALazyPassOnce(PassRegistry & Registry) { PassInfo *PI = new PassInfo("Memory SSA", "memoryssalazy" , & MemorySSALazy ::ID, PassInfo::NormalCtor_t(callDefaultCtor < MemorySSALazy >), true, true); Registry.registerPass( *PI, true); return PI; } void llvm::initializeMemorySSALazyPass (PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeMemorySSALazyPassOnce( Registry); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while ( tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||||
57 | ||||||
58 | namespace llvm { | |||||
59 | ||||||
60 | /// \brief An assembly annotator class to print Memory SSA information in | |||||
61 | /// comments. | |||||
62 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | |||||
63 | friend class MemorySSA; | |||||
64 | const MemorySSA *MSSA; | |||||
65 | ||||||
66 | public: | |||||
67 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | |||||
68 | ||||||
69 | virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, | |||||
70 | formatted_raw_ostream &OS) { | |||||
71 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | |||||
72 | OS << "; " << *MA << "\n"; | |||||
73 | } | |||||
74 | ||||||
75 | virtual void emitInstructionAnnot(const Instruction *I, | |||||
76 | formatted_raw_ostream &OS) { | |||||
77 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | |||||
78 | OS << "; " << *MA << "\n"; | |||||
79 | } | |||||
80 | }; | |||||
81 | } | |||||
82 | ||||||
83 | namespace { | |||||
84 | struct RenamePassData { | |||||
85 | DomTreeNode *DTN; | |||||
86 | DomTreeNode::const_iterator ChildIt; | |||||
87 | MemoryAccess *IncomingVal; | |||||
88 | ||||||
89 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | |||||
90 | MemoryAccess *M) | |||||
91 | : DTN(D), ChildIt(It), IncomingVal(M) {} | |||||
92 | void swap(RenamePassData &RHS) { | |||||
93 | std::swap(DTN, RHS.DTN); | |||||
94 | std::swap(ChildIt, RHS.ChildIt); | |||||
95 | std::swap(IncomingVal, RHS.IncomingVal); | |||||
96 | } | |||||
97 | }; | |||||
98 | } | |||||
99 | ||||||
100 | namespace llvm { | |||||
101 | /// \brief Rename a single basic block into MemorySSA form. | |||||
102 | /// Uses the standard SSA renaming algorithm. | |||||
103 | /// \returns The new incoming value. | |||||
104 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, | |||||
105 | MemoryAccess *IncomingVal) { | |||||
106 | auto It = PerBlockAccesses.find(BB); | |||||
107 | // Skip most processing if the list is empty. | |||||
108 | if (It != PerBlockAccesses.end()) { | |||||
109 | AccessListType *Accesses = It->second.get(); | |||||
110 | for (MemoryAccess &L : *Accesses) { | |||||
111 | switch (L.getValueID()) { | |||||
112 | case Value::MemoryUseVal: | |||||
113 | cast<MemoryUse>(&L)->setDefiningAccess(IncomingVal); | |||||
114 | break; | |||||
115 | case Value::MemoryDefVal: | |||||
116 | // We can't legally optimize defs, because we only allow single | |||||
117 | // memory phis/uses on operations, and if we optimize these, we can | |||||
118 | // end up with multiple reaching defs. Uses do not have this | |||||
119 | // problem, since they do not produce a value | |||||
120 | cast<MemoryDef>(&L)->setDefiningAccess(IncomingVal); | |||||
121 | IncomingVal = &L; | |||||
122 | break; | |||||
123 | case Value::MemoryPhiVal: | |||||
124 | IncomingVal = &L; | |||||
125 | break; | |||||
126 | } | |||||
127 | } | |||||
128 | } | |||||
129 | ||||||
130 | // Pass through values to our successors | |||||
131 | for (const BasicBlock *S : successors(BB)) { | |||||
132 | auto It = PerBlockAccesses.find(S); | |||||
133 | // Rename the phi nodes in our successor block | |||||
134 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | |||||
135 | continue; | |||||
136 | AccessListType *Accesses = It->second.get(); | |||||
137 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | |||||
138 | assert(std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) &&((std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) && "Must be at least one edge from Succ to BB!") ? static_cast< void> (0) : __assert_fail ("std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) && \"Must be at least one edge from Succ to BB!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 139, __PRETTY_FUNCTION__)) | |||||
139 | "Must be at least one edge from Succ to BB!")((std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) && "Must be at least one edge from Succ to BB!") ? static_cast< void> (0) : __assert_fail ("std::find(succ_begin(BB), succ_end(BB), S) != succ_end(BB) && \"Must be at least one edge from Succ to BB!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 139, __PRETTY_FUNCTION__)); | |||||
140 | Phi->addIncoming(IncomingVal, BB); | |||||
141 | } | |||||
142 | ||||||
143 | return IncomingVal; | |||||
144 | } | |||||
145 | ||||||
146 | /// \brief This is the standard SSA renaming algorithm. | |||||
147 | /// | |||||
148 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | |||||
149 | /// in phi nodes in our successors. | |||||
150 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | |||||
151 | SmallPtrSet<BasicBlock *, 16> &Visited) { | |||||
152 | SmallVector<RenamePassData, 32> WorkStack; | |||||
153 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal); | |||||
154 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | |||||
155 | Visited.insert(Root->getBlock()); | |||||
156 | ||||||
157 | while (!WorkStack.empty()) { | |||||
158 | DomTreeNode *Node = WorkStack.back().DTN; | |||||
159 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | |||||
160 | IncomingVal = WorkStack.back().IncomingVal; | |||||
161 | ||||||
162 | if (ChildIt == Node->end()) { | |||||
163 | WorkStack.pop_back(); | |||||
164 | } else { | |||||
165 | DomTreeNode *Child = *ChildIt; | |||||
166 | ++WorkStack.back().ChildIt; | |||||
167 | BasicBlock *BB = Child->getBlock(); | |||||
168 | Visited.insert(BB); | |||||
169 | IncomingVal = renameBlock(BB, IncomingVal); | |||||
170 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | |||||
171 | } | |||||
172 | } | |||||
173 | } | |||||
174 | ||||||
175 | /// \brief Compute dominator levels, used by the phi insertion algorithm above. | |||||
176 | void MemorySSA::computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels) { | |||||
177 | for (auto DFI = df_begin(DT->getRootNode()), DFE = df_end(DT->getRootNode()); | |||||
178 | DFI != DFE; ++DFI) | |||||
179 | DomLevels[*DFI] = DFI.getPathLength() - 1; | |||||
180 | } | |||||
181 | ||||||
182 | /// \brief This handles unreachable block acccesses by deleting phi nodes in | |||||
183 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | |||||
184 | /// being uses of the live on entry definition. | |||||
185 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | |||||
186 | assert(!DT->isReachableFromEntry(BB) &&((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 187, __PRETTY_FUNCTION__)) | |||||
187 | "Reachable block found while handling unreachable blocks")((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 187, __PRETTY_FUNCTION__)); | |||||
188 | ||||||
189 | auto It = PerBlockAccesses.find(BB); | |||||
190 | if (It == PerBlockAccesses.end()) | |||||
191 | return; | |||||
192 | ||||||
193 | auto &Accesses = It->second; | |||||
194 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | |||||
195 | auto Next = std::next(AI); | |||||
196 | // If we have a phi, just remove it. We are going to replace all | |||||
197 | // users with live on entry. | |||||
198 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | |||||
199 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | |||||
200 | else | |||||
201 | Accesses->erase(AI); | |||||
202 | AI = Next; | |||||
203 | } | |||||
204 | } | |||||
205 | ||||||
206 | MemorySSA::MemorySSA(Function &Func) | |||||
207 | : AA(nullptr), DT(nullptr), F(Func), LiveOnEntryDef(nullptr), | |||||
208 | Walker(nullptr), NextID(0) {} | |||||
209 | ||||||
210 | MemorySSA::~MemorySSA() { | |||||
211 | // Drop all our references | |||||
212 | for (const auto &Pair : PerBlockAccesses) | |||||
213 | for (MemoryAccess &MA : *Pair.second) | |||||
214 | MA.dropAllReferences(); | |||||
215 | } | |||||
216 | ||||||
217 | MemorySSA::AccessListType *MemorySSA::getOrCreateAccessList(BasicBlock *BB) { | |||||
218 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | |||||
219 | ||||||
220 | if (Res.second) | |||||
221 | Res.first->second = make_unique<AccessListType>(); | |||||
222 | return Res.first->second.get(); | |||||
223 | } | |||||
224 | ||||||
225 | MemorySSAWalker *MemorySSA::buildMemorySSA(AliasAnalysis *AA, | |||||
226 | DominatorTree *DT) { | |||||
227 | if (Walker) | |||||
228 | return Walker; | |||||
229 | ||||||
230 | assert(!this->AA && !this->DT &&((!this->AA && !this->DT && "MemorySSA without a walker already has AA or DT?" ) ? static_cast<void> (0) : __assert_fail ("!this->AA && !this->DT && \"MemorySSA without a walker already has AA or DT?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 231, __PRETTY_FUNCTION__)) | |||||
231 | "MemorySSA without a walker already has AA or DT?")((!this->AA && !this->DT && "MemorySSA without a walker already has AA or DT?" ) ? static_cast<void> (0) : __assert_fail ("!this->AA && !this->DT && \"MemorySSA without a walker already has AA or DT?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 231, __PRETTY_FUNCTION__)); | |||||
232 | ||||||
233 | Walker = new CachingMemorySSAWalker(this, AA, DT); | |||||
234 | this->AA = AA; | |||||
235 | this->DT = DT; | |||||
236 | ||||||
237 | // We create an access to represent "live on entry", for things like | |||||
238 | // arguments or users of globals, where the memory they use is defined before | |||||
239 | // the beginning of the function. We do not actually insert it into the IR. | |||||
240 | // We do not define a live on exit for the immediate uses, and thus our | |||||
241 | // semantics do *not* imply that something with no immediate uses can simply | |||||
242 | // be removed. | |||||
243 | BasicBlock &StartingPoint = F.getEntryBlock(); | |||||
244 | LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, | |||||
245 | &StartingPoint, NextID++); | |||||
246 | ||||||
247 | // We maintain lists of memory accesses per-block, trading memory for time. We | |||||
248 | // could just look up the memory access for every possible instruction in the | |||||
249 | // stream. | |||||
250 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | |||||
251 | SmallPtrSet<BasicBlock *, 32> DefUseBlocks; | |||||
252 | // Go through each block, figure out where defs occur, and chain together all | |||||
253 | // the accesses. | |||||
254 | for (BasicBlock &B : F) { | |||||
255 | bool InsertIntoDef = false; | |||||
256 | AccessListType *Accesses = nullptr; | |||||
257 | for (Instruction &I : B) { | |||||
258 | MemoryUseOrDef *MUD = createNewAccess(&I); | |||||
259 | if (!MUD) | |||||
260 | continue; | |||||
261 | InsertIntoDef |= isa<MemoryDef>(MUD); | |||||
262 | ||||||
263 | if (!Accesses) | |||||
264 | Accesses = getOrCreateAccessList(&B); | |||||
265 | Accesses->push_back(MUD); | |||||
266 | } | |||||
267 | if (InsertIntoDef) | |||||
268 | DefiningBlocks.insert(&B); | |||||
269 | if (Accesses) | |||||
270 | DefUseBlocks.insert(&B); | |||||
271 | } | |||||
272 | ||||||
273 | // Compute live-in. | |||||
274 | // Live in is normally defined as "all the blocks on the path from each def to | |||||
275 | // each of it's uses". | |||||
276 | // MemoryDef's are implicit uses of previous state, so they are also uses. | |||||
277 | // This means we don't really have def-only instructions. The only | |||||
278 | // MemoryDef's that are not really uses are those that are of the LiveOnEntry | |||||
279 | // variable (because LiveOnEntry can reach anywhere, and every def is a | |||||
280 | // must-kill of LiveOnEntry). | |||||
281 | // In theory, you could precisely compute live-in by using alias-analysis to | |||||
282 | // disambiguate defs and uses to see which really pair up with which. | |||||
283 | // In practice, this would be really expensive and difficult. So we simply | |||||
284 | // assume all defs are also uses that need to be kept live. | |||||
285 | // Because of this, the end result of this live-in computation will be "the | |||||
286 | // entire set of basic blocks that reach any use". | |||||
287 | ||||||
288 | SmallPtrSet<BasicBlock *, 32> LiveInBlocks; | |||||
289 | SmallVector<BasicBlock *, 64> LiveInBlockWorklist(DefUseBlocks.begin(), | |||||
290 | DefUseBlocks.end()); | |||||
291 | // Now that we have a set of blocks where a value is live-in, recursively add | |||||
292 | // predecessors until we find the full region the value is live. | |||||
293 | while (!LiveInBlockWorklist.empty()) { | |||||
294 | BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); | |||||
295 | ||||||
296 | // The block really is live in here, insert it into the set. If already in | |||||
297 | // the set, then it has already been processed. | |||||
298 | if (!LiveInBlocks.insert(BB).second) | |||||
299 | continue; | |||||
300 | ||||||
301 | // Since the value is live into BB, it is either defined in a predecessor or | |||||
302 | // live into it to. | |||||
303 | LiveInBlockWorklist.append(pred_begin(BB), pred_end(BB)); | |||||
304 | } | |||||
305 | ||||||
306 | // Determine where our MemoryPhi's should go | |||||
307 | ForwardIDFCalculator IDFs(*DT); | |||||
308 | IDFs.setDefiningBlocks(DefiningBlocks); | |||||
309 | IDFs.setLiveInBlocks(LiveInBlocks); | |||||
310 | SmallVector<BasicBlock *, 32> IDFBlocks; | |||||
311 | IDFs.calculate(IDFBlocks); | |||||
312 | ||||||
313 | // Now place MemoryPhi nodes. | |||||
314 | for (auto &BB : IDFBlocks) { | |||||
315 | // Insert phi node | |||||
316 | AccessListType *Accesses = getOrCreateAccessList(BB); | |||||
317 | MemoryPhi *Phi = new MemoryPhi(F.getContext(), BB, NextID++); | |||||
318 | ValueToMemoryAccess.insert(std::make_pair(BB, Phi)); | |||||
319 | // Phi's always are placed at the front of the block. | |||||
320 | Accesses->push_front(Phi); | |||||
321 | } | |||||
322 | ||||||
323 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | |||||
324 | // filled in with all blocks. | |||||
325 | SmallPtrSet<BasicBlock *, 16> Visited; | |||||
326 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | |||||
327 | ||||||
328 | // Now optimize the MemoryUse's defining access to point to the nearest | |||||
329 | // dominating clobbering def. | |||||
330 | // This ensures that MemoryUse's that are killed by the same store are | |||||
331 | // immediate users of that store, one of the invariants we guarantee. | |||||
332 | for (auto DomNode : depth_first(DT)) { | |||||
333 | BasicBlock *BB = DomNode->getBlock(); | |||||
334 | auto AI = PerBlockAccesses.find(BB); | |||||
335 | if (AI == PerBlockAccesses.end()) | |||||
336 | continue; | |||||
337 | AccessListType *Accesses = AI->second.get(); | |||||
338 | for (auto &MA : *Accesses) { | |||||
339 | if (auto *MU = dyn_cast<MemoryUse>(&MA)) { | |||||
340 | Instruction *Inst = MU->getMemoryInst(); | |||||
341 | MU->setDefiningAccess(Walker->getClobberingMemoryAccess(Inst)); | |||||
342 | } | |||||
343 | } | |||||
344 | } | |||||
345 | ||||||
346 | // Mark the uses in unreachable blocks as live on entry, so that they go | |||||
347 | // somewhere. | |||||
348 | for (auto &BB : F) | |||||
349 | if (!Visited.count(&BB)) | |||||
350 | markUnreachableAsLiveOnEntry(&BB); | |||||
351 | ||||||
352 | return Walker; | |||||
353 | } | |||||
354 | ||||||
355 | /// \brief Helper function to create new memory accesses | |||||
356 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { | |||||
357 | // The assume intrinsic has a control dependency which we model by claiming | |||||
358 | // that it writes arbitrarily. Ignore that fake memory dependency here. | |||||
359 | // FIXME: Replace this special casing with a more accurate modelling of | |||||
360 | // assume's control dependency. | |||||
361 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | |||||
362 | if (II->getIntrinsicID() == Intrinsic::assume) | |||||
363 | return nullptr; | |||||
364 | ||||||
365 | // Find out what affect this instruction has on memory. | |||||
366 | ModRefInfo ModRef = AA->getModRefInfo(I); | |||||
367 | bool Def = bool(ModRef & MRI_Mod); | |||||
368 | bool Use = bool(ModRef & MRI_Ref); | |||||
369 | ||||||
370 | // It's possible for an instruction to not modify memory at all. During | |||||
371 | // construction, we ignore them. | |||||
372 | if (!Def && !Use) | |||||
373 | return nullptr; | |||||
374 | ||||||
375 | assert((Def || Use) &&(((Def || Use) && "Trying to create a memory access with a non-memory instruction" ) ? static_cast<void> (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 376, __PRETTY_FUNCTION__)) | |||||
376 | "Trying to create a memory access with a non-memory instruction")(((Def || Use) && "Trying to create a memory access with a non-memory instruction" ) ? static_cast<void> (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 376, __PRETTY_FUNCTION__)); | |||||
377 | ||||||
378 | MemoryUseOrDef *MUD; | |||||
379 | if (Def) | |||||
380 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | |||||
381 | else | |||||
382 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | |||||
383 | ValueToMemoryAccess.insert(std::make_pair(I, MUD)); | |||||
384 | return MUD; | |||||
385 | } | |||||
386 | ||||||
387 | MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock, | |||||
388 | enum InsertionPlace Where) { | |||||
389 | // Handle the initial case | |||||
390 | if (Where == Beginning) | |||||
391 | // The only thing that could define us at the beginning is a phi node | |||||
392 | if (MemoryPhi *Phi = getMemoryAccess(UseBlock)) | |||||
393 | return Phi; | |||||
394 | ||||||
395 | DomTreeNode *CurrNode = DT->getNode(UseBlock); | |||||
396 | // Need to be defined by our dominator | |||||
397 | if (Where == Beginning) | |||||
398 | CurrNode = CurrNode->getIDom(); | |||||
399 | Where = End; | |||||
400 | while (CurrNode) { | |||||
401 | auto It = PerBlockAccesses.find(CurrNode->getBlock()); | |||||
402 | if (It != PerBlockAccesses.end()) { | |||||
403 | auto &Accesses = It->second; | |||||
404 | for (auto RAI = Accesses->rbegin(), RAE = Accesses->rend(); RAI != RAE; | |||||
405 | ++RAI) { | |||||
406 | if (isa<MemoryDef>(*RAI) || isa<MemoryPhi>(*RAI)) | |||||
407 | return &*RAI; | |||||
408 | } | |||||
409 | } | |||||
410 | CurrNode = CurrNode->getIDom(); | |||||
411 | } | |||||
412 | return LiveOnEntryDef.get(); | |||||
413 | } | |||||
414 | ||||||
415 | /// \brief Returns true if \p Replacer dominates \p Replacee . | |||||
416 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, | |||||
417 | const MemoryAccess *Replacee) const { | |||||
418 | if (isa<MemoryUseOrDef>(Replacee)) | |||||
419 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); | |||||
420 | const auto *MP = cast<MemoryPhi>(Replacee); | |||||
421 | // For a phi node, the use occurs in the predecessor block of the phi node. | |||||
422 | // Since we may occur multiple times in the phi node, we have to check each | |||||
423 | // operand to ensure Replacer dominates each operand where Replacee occurs. | |||||
424 | for (const Use &Arg : MP->operands()) { | |||||
425 | if (Arg.get() != Replacee && | |||||
426 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) | |||||
427 | return false; | |||||
428 | } | |||||
429 | return true; | |||||
430 | } | |||||
431 | ||||||
432 | /// \brief If all arguments of a MemoryPHI are defined by the same incoming | |||||
433 | /// argument, return that argument. | |||||
434 | static MemoryAccess *onlySingleValue(MemoryPhi *MP) { | |||||
435 | MemoryAccess *MA = nullptr; | |||||
436 | ||||||
437 | for (auto &Arg : MP->operands()) { | |||||
438 | if (!MA) | |||||
439 | MA = cast<MemoryAccess>(Arg); | |||||
440 | else if (MA != Arg) | |||||
441 | return nullptr; | |||||
442 | } | |||||
443 | return MA; | |||||
444 | } | |||||
445 | ||||||
446 | /// \brief Properly remove \p MA from all of MemorySSA's lookup tables. | |||||
447 | /// | |||||
448 | /// Because of the way the intrusive list and use lists work, it is important to | |||||
449 | /// do removal in the right order. | |||||
450 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | |||||
451 | assert(MA->use_empty() &&((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 452, __PRETTY_FUNCTION__)) | |||||
452 | "Trying to remove memory access that still has uses")((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 452, __PRETTY_FUNCTION__)); | |||||
453 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) | |||||
454 | MUD->setDefiningAccess(nullptr); | |||||
455 | // Invalidate our walker's cache if necessary | |||||
456 | if (!isa<MemoryUse>(MA)) | |||||
457 | Walker->invalidateInfo(MA); | |||||
458 | // The call below to erase will destroy MA, so we can't change the order we | |||||
459 | // are doing things here | |||||
460 | Value *MemoryInst; | |||||
461 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) { | |||||
462 | MemoryInst = MUD->getMemoryInst(); | |||||
463 | } else { | |||||
464 | MemoryInst = MA->getBlock(); | |||||
465 | } | |||||
466 | ValueToMemoryAccess.erase(MemoryInst); | |||||
467 | ||||||
468 | auto AccessIt = PerBlockAccesses.find(MA->getBlock()); | |||||
469 | std::unique_ptr<AccessListType> &Accesses = AccessIt->second; | |||||
470 | Accesses->erase(MA); | |||||
471 | if (Accesses->empty()) | |||||
472 | PerBlockAccesses.erase(AccessIt); | |||||
473 | } | |||||
474 | ||||||
475 | void MemorySSA::removeMemoryAccess(MemoryAccess *MA) { | |||||
476 | assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def")((!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def" ) ? static_cast<void> (0) : __assert_fail ("!isLiveOnEntryDef(MA) && \"Trying to remove the live on entry def\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 476, __PRETTY_FUNCTION__)); | |||||
477 | // We can only delete phi nodes if they have no uses, or we can replace all | |||||
478 | // uses with a single definition. | |||||
479 | MemoryAccess *NewDefTarget = nullptr; | |||||
480 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { | |||||
481 | // Note that it is sufficient to know that all edges of the phi node have | |||||
482 | // the same argument. If they do, by the definition of dominance frontiers | |||||
483 | // (which we used to place this phi), that argument must dominate this phi, | |||||
484 | // and thus, must dominate the phi's uses, and so we will not hit the assert | |||||
485 | // below. | |||||
486 | NewDefTarget = onlySingleValue(MP); | |||||
487 | assert((NewDefTarget || MP->use_empty()) &&(((NewDefTarget || MP->use_empty()) && "We can't delete this memory phi" ) ? static_cast<void> (0) : __assert_fail ("(NewDefTarget || MP->use_empty()) && \"We can't delete this memory phi\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 488, __PRETTY_FUNCTION__)) | |||||
488 | "We can't delete this memory phi")(((NewDefTarget || MP->use_empty()) && "We can't delete this memory phi" ) ? static_cast<void> (0) : __assert_fail ("(NewDefTarget || MP->use_empty()) && \"We can't delete this memory phi\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 488, __PRETTY_FUNCTION__)); | |||||
489 | } else { | |||||
490 | NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); | |||||
491 | } | |||||
492 | ||||||
493 | // Re-point the uses at our defining access | |||||
494 | if (!MA->use_empty()) | |||||
495 | MA->replaceAllUsesWith(NewDefTarget); | |||||
496 | ||||||
497 | // The call below to erase will destroy MA, so we can't change the order we | |||||
498 | // are doing things here | |||||
499 | removeFromLookups(MA); | |||||
500 | } | |||||
501 | ||||||
502 | void MemorySSA::print(raw_ostream &OS) const { | |||||
503 | MemorySSAAnnotatedWriter Writer(this); | |||||
504 | F.print(OS, &Writer); | |||||
505 | } | |||||
506 | ||||||
507 | void MemorySSA::dump() const { | |||||
508 | MemorySSAAnnotatedWriter Writer(this); | |||||
509 | F.print(dbgs(), &Writer); | |||||
510 | } | |||||
511 | ||||||
512 | void MemorySSA::verifyMemorySSA() const { | |||||
513 | verifyDefUses(F); | |||||
514 | verifyDomination(F); | |||||
515 | } | |||||
516 | ||||||
517 | /// \brief Verify the domination properties of MemorySSA by checking that each | |||||
518 | /// definition dominates all of its uses. | |||||
519 | void MemorySSA::verifyDomination(Function &F) const { | |||||
520 | for (BasicBlock &B : F) { | |||||
521 | // Phi nodes are attached to basic blocks | |||||
522 | if (MemoryPhi *MP = getMemoryAccess(&B)) { | |||||
523 | for (User *U : MP->users()) { | |||||
524 | BasicBlock *UseBlock; | |||||
525 | // Phi operands are used on edges, we simulate the right domination by | |||||
526 | // acting as if the use occurred at the end of the predecessor block. | |||||
527 | if (MemoryPhi *P = dyn_cast<MemoryPhi>(U)) { | |||||
528 | for (const auto &Arg : P->operands()) { | |||||
529 | if (Arg == MP) { | |||||
530 | UseBlock = P->getIncomingBlock(Arg); | |||||
531 | break; | |||||
532 | } | |||||
533 | } | |||||
534 | } else { | |||||
535 | UseBlock = cast<MemoryAccess>(U)->getBlock(); | |||||
536 | } | |||||
537 | (void)UseBlock; | |||||
538 | assert(DT->dominates(MP->getBlock(), UseBlock) &&((DT->dominates(MP->getBlock(), UseBlock) && "Memory PHI does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(MP->getBlock(), UseBlock) && \"Memory PHI does not dominate it's uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 539, __PRETTY_FUNCTION__)) | |||||
539 | "Memory PHI does not dominate it's uses")((DT->dominates(MP->getBlock(), UseBlock) && "Memory PHI does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(MP->getBlock(), UseBlock) && \"Memory PHI does not dominate it's uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 539, __PRETTY_FUNCTION__)); | |||||
540 | } | |||||
541 | } | |||||
542 | ||||||
543 | for (Instruction &I : B) { | |||||
544 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); | |||||
545 | if (!MD) | |||||
546 | continue; | |||||
547 | ||||||
548 | for (User *U : MD->users()) { | |||||
549 | BasicBlock *UseBlock; (void)UseBlock; | |||||
550 | // Things are allowed to flow to phi nodes over their predecessor edge. | |||||
551 | if (auto *P = dyn_cast<MemoryPhi>(U)) { | |||||
552 | for (const auto &Arg : P->operands()) { | |||||
553 | if (Arg == MD) { | |||||
554 | UseBlock = P->getIncomingBlock(Arg); | |||||
555 | break; | |||||
556 | } | |||||
557 | } | |||||
558 | } else { | |||||
559 | UseBlock = cast<MemoryAccess>(U)->getBlock(); | |||||
560 | } | |||||
561 | assert(DT->dominates(MD->getBlock(), UseBlock) &&((DT->dominates(MD->getBlock(), UseBlock) && "Memory Def does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(MD->getBlock(), UseBlock) && \"Memory Def does not dominate it's uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 562, __PRETTY_FUNCTION__)) | |||||
| ||||||
562 | "Memory Def does not dominate it's uses")((DT->dominates(MD->getBlock(), UseBlock) && "Memory Def does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(MD->getBlock(), UseBlock) && \"Memory Def does not dominate it's uses\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 562, __PRETTY_FUNCTION__)); | |||||
563 | } | |||||
564 | } | |||||
565 | } | |||||
566 | } | |||||
567 | ||||||
568 | /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use | |||||
569 | /// appears in the use list of \p Def. | |||||
570 | /// | |||||
571 | /// llvm_unreachable is used instead of asserts because this may be called in | |||||
572 | /// a build without asserts. In that case, we don't want this to turn into a | |||||
573 | /// nop. | |||||
574 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | |||||
575 | // The live on entry use may cause us to get a NULL def here | |||||
576 | if (!Def) { | |||||
577 | if (!isLiveOnEntryDef(Use)) | |||||
578 | llvm_unreachable("Null def but use not point to live on entry def")::llvm::llvm_unreachable_internal("Null def but use not point to live on entry def" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 578); | |||||
579 | } else if (std::find(Def->user_begin(), Def->user_end(), Use) == | |||||
580 | Def->user_end()) { | |||||
581 | llvm_unreachable("Did not find use in def's use list")::llvm::llvm_unreachable_internal("Did not find use in def's use list" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 581); | |||||
582 | } | |||||
583 | } | |||||
584 | ||||||
585 | /// \brief Verify the immediate use information, by walking all the memory | |||||
586 | /// accesses and verifying that, for each use, it appears in the | |||||
587 | /// appropriate def's use list | |||||
588 | void MemorySSA::verifyDefUses(Function &F) const { | |||||
589 | for (BasicBlock &B : F) { | |||||
590 | // Phi nodes are attached to basic blocks | |||||
591 | if (MemoryPhi *Phi = getMemoryAccess(&B)) | |||||
592 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) | |||||
593 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | |||||
594 | ||||||
595 | for (Instruction &I : B) { | |||||
596 | if (MemoryAccess *MA = getMemoryAccess(&I)) { | |||||
597 | assert(isa<MemoryUseOrDef>(MA) &&((isa<MemoryUseOrDef>(MA) && "Found a phi node not attached to a bb" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryUseOrDef>(MA) && \"Found a phi node not attached to a bb\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 598, __PRETTY_FUNCTION__)) | |||||
598 | "Found a phi node not attached to a bb")((isa<MemoryUseOrDef>(MA) && "Found a phi node not attached to a bb" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryUseOrDef>(MA) && \"Found a phi node not attached to a bb\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 598, __PRETTY_FUNCTION__)); | |||||
599 | verifyUseInDefs(cast<MemoryUseOrDef>(MA)->getDefiningAccess(), MA); | |||||
600 | } | |||||
601 | } | |||||
602 | } | |||||
603 | } | |||||
604 | ||||||
605 | MemoryAccess *MemorySSA::getMemoryAccess(const Value *I) const { | |||||
606 | return ValueToMemoryAccess.lookup(I); | |||||
607 | } | |||||
608 | ||||||
609 | MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const { | |||||
610 | return cast_or_null<MemoryPhi>(getMemoryAccess((const Value *)BB)); | |||||
611 | } | |||||
612 | ||||||
613 | /// \brief Determine, for two memory accesses in the same block, | |||||
614 | /// whether \p Dominator dominates \p Dominatee. | |||||
615 | /// \returns True if \p Dominator dominates \p Dominatee. | |||||
616 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | |||||
617 | const MemoryAccess *Dominatee) const { | |||||
618 | ||||||
619 | assert((Dominator->getBlock() == Dominatee->getBlock()) &&(((Dominator->getBlock() == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(Dominator->getBlock() == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 620, __PRETTY_FUNCTION__)) | |||||
620 | "Asking for local domination when accesses are in different blocks!")(((Dominator->getBlock() == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(Dominator->getBlock() == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 620, __PRETTY_FUNCTION__)); | |||||
621 | // Get the access list for the block | |||||
622 | const AccessListType *AccessList = getBlockAccesses(Dominator->getBlock()); | |||||
623 | AccessListType::const_reverse_iterator It(Dominator->getIterator()); | |||||
624 | ||||||
625 | // If we hit the beginning of the access list before we hit dominatee, we must | |||||
626 | // dominate it | |||||
627 | return std::none_of(It, AccessList->rend(), | |||||
628 | [&](const MemoryAccess &MA) { return &MA == Dominatee; }); | |||||
629 | } | |||||
630 | ||||||
631 | const static char LiveOnEntryStr[] = "liveOnEntry"; | |||||
632 | ||||||
633 | void MemoryDef::print(raw_ostream &OS) const { | |||||
634 | MemoryAccess *UO = getDefiningAccess(); | |||||
635 | ||||||
636 | OS << getID() << " = MemoryDef("; | |||||
637 | if (UO && UO->getID()) | |||||
638 | OS << UO->getID(); | |||||
639 | else | |||||
640 | OS << LiveOnEntryStr; | |||||
641 | OS << ')'; | |||||
642 | } | |||||
643 | ||||||
644 | void MemoryPhi::print(raw_ostream &OS) const { | |||||
645 | bool First = true; | |||||
646 | OS << getID() << " = MemoryPhi("; | |||||
647 | for (const auto &Op : operands()) { | |||||
648 | BasicBlock *BB = getIncomingBlock(Op); | |||||
649 | MemoryAccess *MA = cast<MemoryAccess>(Op); | |||||
650 | if (!First) | |||||
651 | OS << ','; | |||||
652 | else | |||||
653 | First = false; | |||||
654 | ||||||
655 | OS << '{'; | |||||
656 | if (BB->hasName()) | |||||
657 | OS << BB->getName(); | |||||
658 | else | |||||
659 | BB->printAsOperand(OS, false); | |||||
660 | OS << ','; | |||||
661 | if (unsigned ID = MA->getID()) | |||||
662 | OS << ID; | |||||
663 | else | |||||
664 | OS << LiveOnEntryStr; | |||||
665 | OS << '}'; | |||||
666 | } | |||||
667 | OS << ')'; | |||||
668 | } | |||||
669 | ||||||
670 | MemoryAccess::~MemoryAccess() {} | |||||
671 | ||||||
672 | void MemoryUse::print(raw_ostream &OS) const { | |||||
673 | MemoryAccess *UO = getDefiningAccess(); | |||||
674 | OS << "MemoryUse("; | |||||
675 | if (UO && UO->getID()) | |||||
676 | OS << UO->getID(); | |||||
677 | else | |||||
678 | OS << LiveOnEntryStr; | |||||
679 | OS << ')'; | |||||
680 | } | |||||
681 | ||||||
682 | void MemoryAccess::dump() const { | |||||
683 | print(dbgs()); | |||||
684 | dbgs() << "\n"; | |||||
685 | } | |||||
686 | ||||||
687 | char MemorySSAPrinterPass::ID = 0; | |||||
688 | ||||||
689 | MemorySSAPrinterPass::MemorySSAPrinterPass() : FunctionPass(ID) { | |||||
690 | initializeMemorySSAPrinterPassPass(*PassRegistry::getPassRegistry()); | |||||
691 | } | |||||
692 | ||||||
693 | void MemorySSAPrinterPass::releaseMemory() { | |||||
694 | // Subtlety: Be sure to delete the walker before MSSA, because the walker's | |||||
695 | // dtor may try to access MemorySSA. | |||||
696 | Walker.reset(); | |||||
697 | MSSA.reset(); | |||||
698 | } | |||||
699 | ||||||
700 | void MemorySSAPrinterPass::getAnalysisUsage(AnalysisUsage &AU) const { | |||||
701 | AU.setPreservesAll(); | |||||
702 | AU.addRequired<AAResultsWrapperPass>(); | |||||
703 | AU.addRequired<DominatorTreeWrapperPass>(); | |||||
704 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||||
705 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||||
706 | } | |||||
707 | ||||||
708 | bool MemorySSAPrinterPass::doInitialization(Module &M) { | |||||
709 | VerifyMemorySSA = M.getContext() | |||||
710 | .getOption<bool, MemorySSAPrinterPass, | |||||
711 | &MemorySSAPrinterPass::VerifyMemorySSA>(); | |||||
712 | return false; | |||||
713 | } | |||||
714 | ||||||
715 | void MemorySSAPrinterPass::registerOptions() { | |||||
716 | OptionRegistry::registerOption<bool, MemorySSAPrinterPass, | |||||
717 | &MemorySSAPrinterPass::VerifyMemorySSA>( | |||||
718 | "verify-memoryssa", "Run the Memory SSA verifier", false); | |||||
719 | } | |||||
720 | ||||||
721 | void MemorySSAPrinterPass::print(raw_ostream &OS, const Module *M) const { | |||||
722 | MSSA->print(OS); | |||||
723 | } | |||||
724 | ||||||
725 | bool MemorySSAPrinterPass::runOnFunction(Function &F) { | |||||
726 | this->F = &F; | |||||
727 | MSSA.reset(new MemorySSA(F)); | |||||
728 | AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||||
729 | DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||||
730 | Walker.reset(MSSA->buildMemorySSA(AA, DT)); | |||||
731 | ||||||
732 | if (VerifyMemorySSA) { | |||||
| ||||||
733 | MSSA->verifyMemorySSA(); | |||||
734 | } | |||||
735 | ||||||
736 | return false; | |||||
737 | } | |||||
738 | ||||||
739 | char MemorySSALazy::ID = 0; | |||||
740 | ||||||
741 | MemorySSALazy::MemorySSALazy() : FunctionPass(ID) { | |||||
742 | initializeMemorySSALazyPass(*PassRegistry::getPassRegistry()); | |||||
743 | } | |||||
744 | ||||||
745 | void MemorySSALazy::releaseMemory() { MSSA.reset(); } | |||||
746 | ||||||
747 | bool MemorySSALazy::runOnFunction(Function &F) { | |||||
748 | MSSA.reset(new MemorySSA(F)); | |||||
749 | return false; | |||||
750 | } | |||||
751 | ||||||
752 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | |||||
753 | ||||||
754 | CachingMemorySSAWalker::CachingMemorySSAWalker(MemorySSA *M, AliasAnalysis *A, | |||||
755 | DominatorTree *D) | |||||
756 | : MemorySSAWalker(M), AA(A), DT(D) {} | |||||
757 | ||||||
758 | CachingMemorySSAWalker::~CachingMemorySSAWalker() {} | |||||
759 | ||||||
760 | struct CachingMemorySSAWalker::UpwardsMemoryQuery { | |||||
761 | // True if we saw a phi whose predecessor was a backedge | |||||
762 | bool SawBackedgePhi; | |||||
763 | // True if our original query started off as a call | |||||
764 | bool IsCall; | |||||
765 | // The pointer location we started the query with. This will be empty if | |||||
766 | // IsCall is true. | |||||
767 | MemoryLocation StartingLoc; | |||||
768 | // This is the instruction we were querying about. | |||||
769 | const Instruction *Inst; | |||||
770 | // Set of visited Instructions for this query. | |||||
771 | DenseSet<MemoryAccessPair> Visited; | |||||
772 | // Vector of visited call accesses for this query. This is separated out | |||||
773 | // because you can always cache and lookup the result of call queries (IE when | |||||
774 | // IsCall == true) for every call in the chain. The calls have no AA location | |||||
775 | // associated with them with them, and thus, no context dependence. | |||||
776 | SmallVector<const MemoryAccess *, 32> VisitedCalls; | |||||
777 | // The MemoryAccess we actually got called with, used to test local domination | |||||
778 | const MemoryAccess *OriginalAccess; | |||||
779 | // The Datalayout for the module we started in | |||||
780 | const DataLayout *DL; | |||||
781 | ||||||
782 | UpwardsMemoryQuery() | |||||
783 | : SawBackedgePhi(false), IsCall(false), Inst(nullptr), | |||||
784 | OriginalAccess(nullptr), DL(nullptr) {} | |||||
785 | }; | |||||
786 | ||||||
787 | void CachingMemorySSAWalker::invalidateInfo(MemoryAccess *MA) { | |||||
788 | ||||||
789 | // TODO: We can do much better cache invalidation with differently stored | |||||
790 | // caches. For now, for MemoryUses, we simply remove them | |||||
791 | // from the cache, and kill the entire call/non-call cache for everything | |||||
792 | // else. The problem is for phis or defs, currently we'd need to follow use | |||||
793 | // chains down and invalidate anything below us in the chain that currently | |||||
794 | // terminates at this access. | |||||
795 | ||||||
796 | // See if this is a MemoryUse, if so, just remove the cached info. MemoryUse | |||||
797 | // is by definition never a barrier, so nothing in the cache could point to | |||||
798 | // this use. In that case, we only need invalidate the info for the use | |||||
799 | // itself. | |||||
800 | ||||||
801 | if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) { | |||||
802 | UpwardsMemoryQuery Q; | |||||
803 | Instruction *I = MU->getMemoryInst(); | |||||
804 | Q.IsCall = bool(ImmutableCallSite(I)); | |||||
805 | Q.Inst = I; | |||||
806 | if (!Q.IsCall) | |||||
807 | Q.StartingLoc = MemoryLocation::get(I); | |||||
808 | doCacheRemove(MA, Q, Q.StartingLoc); | |||||
809 | } else { | |||||
810 | // If it is not a use, the best we can do right now is destroy the cache. | |||||
811 | CachedUpwardsClobberingCall.clear(); | |||||
812 | CachedUpwardsClobberingAccess.clear(); | |||||
813 | } | |||||
814 | ||||||
815 | #ifdef EXPENSIVE_CHECKS | |||||
816 | // Run this only when expensive checks are enabled. | |||||
817 | verifyRemoved(MA); | |||||
818 | #endif | |||||
819 | } | |||||
820 | ||||||
821 | void CachingMemorySSAWalker::doCacheRemove(const MemoryAccess *M, | |||||
822 | const UpwardsMemoryQuery &Q, | |||||
823 | const MemoryLocation &Loc) { | |||||
824 | if (Q.IsCall) | |||||
825 | CachedUpwardsClobberingCall.erase(M); | |||||
826 | else | |||||
827 | CachedUpwardsClobberingAccess.erase({M, Loc}); | |||||
828 | } | |||||
829 | ||||||
830 | void CachingMemorySSAWalker::doCacheInsert(const MemoryAccess *M, | |||||
831 | MemoryAccess *Result, | |||||
832 | const UpwardsMemoryQuery &Q, | |||||
833 | const MemoryLocation &Loc) { | |||||
834 | // This is fine for Phis, since there are times where we can't optimize them. | |||||
835 | // Making a def its own clobber is never correct, though. | |||||
836 | assert((Result != M || isa<MemoryPhi>(M)) &&(((Result != M || isa<MemoryPhi>(M)) && "Something can't clobber itself!" ) ? static_cast<void> (0) : __assert_fail ("(Result != M || isa<MemoryPhi>(M)) && \"Something can't clobber itself!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 837, __PRETTY_FUNCTION__)) | |||||
837 | "Something can't clobber itself!")(((Result != M || isa<MemoryPhi>(M)) && "Something can't clobber itself!" ) ? static_cast<void> (0) : __assert_fail ("(Result != M || isa<MemoryPhi>(M)) && \"Something can't clobber itself!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 837, __PRETTY_FUNCTION__)); | |||||
838 | ++NumClobberCacheInserts; | |||||
839 | if (Q.IsCall) | |||||
840 | CachedUpwardsClobberingCall[M] = Result; | |||||
841 | else | |||||
842 | CachedUpwardsClobberingAccess[{M, Loc}] = Result; | |||||
843 | } | |||||
844 | ||||||
845 | MemoryAccess *CachingMemorySSAWalker::doCacheLookup(const MemoryAccess *M, | |||||
846 | const UpwardsMemoryQuery &Q, | |||||
847 | const MemoryLocation &Loc) { | |||||
848 | ++NumClobberCacheLookups; | |||||
849 | MemoryAccess *Result = nullptr; | |||||
850 | ||||||
851 | if (Q.IsCall) | |||||
852 | Result = CachedUpwardsClobberingCall.lookup(M); | |||||
853 | else | |||||
854 | Result = CachedUpwardsClobberingAccess.lookup({M, Loc}); | |||||
855 | ||||||
856 | if (Result) | |||||
857 | ++NumClobberCacheHits; | |||||
858 | return Result; | |||||
859 | } | |||||
860 | ||||||
861 | bool CachingMemorySSAWalker::instructionClobbersQuery( | |||||
862 | const MemoryDef *MD, UpwardsMemoryQuery &Q, | |||||
863 | const MemoryLocation &Loc) const { | |||||
864 | Instruction *DefMemoryInst = MD->getMemoryInst(); | |||||
865 | assert(DefMemoryInst && "Defining instruction not actually an instruction")((DefMemoryInst && "Defining instruction not actually an instruction" ) ? static_cast<void> (0) : __assert_fail ("DefMemoryInst && \"Defining instruction not actually an instruction\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 865, __PRETTY_FUNCTION__)); | |||||
866 | ||||||
867 | if (!Q.IsCall) | |||||
868 | return AA->getModRefInfo(DefMemoryInst, Loc) & MRI_Mod; | |||||
869 | ||||||
870 | // If this is a call, mark it for caching | |||||
871 | if (ImmutableCallSite(DefMemoryInst)) | |||||
872 | Q.VisitedCalls.push_back(MD); | |||||
873 | ModRefInfo I = AA->getModRefInfo(DefMemoryInst, ImmutableCallSite(Q.Inst)); | |||||
874 | return I != MRI_NoModRef; | |||||
875 | } | |||||
876 | ||||||
877 | MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk( | |||||
878 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, | |||||
879 | UpwardsMemoryQuery &Q, bool FollowingBackedge) { | |||||
880 | MemoryAccess *ModifyingAccess = nullptr; | |||||
881 | ||||||
882 | auto DFI = df_begin(StartingAccess); | |||||
883 | for (auto DFE = df_end(StartingAccess); DFI != DFE;) { | |||||
884 | MemoryAccess *CurrAccess = *DFI; | |||||
885 | if (MSSA->isLiveOnEntryDef(CurrAccess)) | |||||
886 | return {CurrAccess, Loc}; | |||||
887 | // If this is a MemoryDef, check whether it clobbers our current query. This | |||||
888 | // needs to be done before consulting the cache, because the cache reports | |||||
889 | // the clobber for CurrAccess. If CurrAccess is a clobber for this query, | |||||
890 | // and we ask the cache for information first, then we might skip this | |||||
891 | // clobber, which is bad. | |||||
892 | if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) { | |||||
893 | // If we hit the top, stop following this path. | |||||
894 | // While we can do lookups, we can't sanely do inserts here unless we were | |||||
895 | // to track everything we saw along the way, since we don't know where we | |||||
896 | // will stop. | |||||
897 | if (instructionClobbersQuery(MD, Q, Loc)) { | |||||
898 | ModifyingAccess = CurrAccess; | |||||
899 | break; | |||||
900 | } | |||||
901 | } | |||||
902 | if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc)) | |||||
903 | return {CacheResult, Loc}; | |||||
904 | ||||||
905 | // We need to know whether it is a phi so we can track backedges. | |||||
906 | // Otherwise, walk all upward defs. | |||||
907 | if (!isa<MemoryPhi>(CurrAccess)) { | |||||
908 | ++DFI; | |||||
909 | continue; | |||||
910 | } | |||||
911 | ||||||
912 | #ifndef NDEBUG | |||||
913 | // The loop below visits the phi's children for us. Because phis are the | |||||
914 | // only things with multiple edges, skipping the children should always lead | |||||
915 | // us to the end of the loop. | |||||
916 | // | |||||
917 | // Use a copy of DFI because skipChildren would kill our search stack, which | |||||
918 | // would make caching anything on the way back impossible. | |||||
919 | auto DFICopy = DFI; | |||||
920 | assert(DFICopy.skipChildren() == DFE &&((DFICopy.skipChildren() == DFE && "Skipping phi's children doesn't end the DFS?" ) ? static_cast<void> (0) : __assert_fail ("DFICopy.skipChildren() == DFE && \"Skipping phi's children doesn't end the DFS?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 921, __PRETTY_FUNCTION__)) | |||||
921 | "Skipping phi's children doesn't end the DFS?")((DFICopy.skipChildren() == DFE && "Skipping phi's children doesn't end the DFS?" ) ? static_cast<void> (0) : __assert_fail ("DFICopy.skipChildren() == DFE && \"Skipping phi's children doesn't end the DFS?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 921, __PRETTY_FUNCTION__)); | |||||
922 | #endif | |||||
923 | ||||||
924 | const MemoryAccessPair PHIPair(CurrAccess, Loc); | |||||
925 | ||||||
926 | // Don't try to optimize this phi again if we've already tried to do so. | |||||
927 | if (!Q.Visited.insert(PHIPair).second) { | |||||
928 | ModifyingAccess = CurrAccess; | |||||
929 | break; | |||||
930 | } | |||||
931 | ||||||
932 | std::size_t InitialVisitedCallSize = Q.VisitedCalls.size(); | |||||
933 | ||||||
934 | // Recurse on PHI nodes, since we need to change locations. | |||||
935 | // TODO: Allow graphtraits on pairs, which would turn this whole function | |||||
936 | // into a normal single depth first walk. | |||||
937 | MemoryAccess *FirstDef = nullptr; | |||||
938 | for (auto MPI = upward_defs_begin(PHIPair), MPE = upward_defs_end(); | |||||
939 | MPI != MPE; ++MPI) { | |||||
940 | bool Backedge = | |||||
941 | !FollowingBackedge && | |||||
942 | DT->dominates(CurrAccess->getBlock(), MPI.getPhiArgBlock()); | |||||
943 | ||||||
944 | MemoryAccessPair CurrentPair = | |||||
945 | UpwardsDFSWalk(MPI->first, MPI->second, Q, Backedge); | |||||
946 | // All the phi arguments should reach the same point if we can bypass | |||||
947 | // this phi. The alternative is that they hit this phi node, which | |||||
948 | // means we can skip this argument. | |||||
949 | if (FirstDef && CurrentPair.first != PHIPair.first && | |||||
950 | CurrentPair.first != FirstDef) { | |||||
951 | ModifyingAccess = CurrAccess; | |||||
952 | break; | |||||
953 | } | |||||
954 | ||||||
955 | if (!FirstDef) | |||||
956 | FirstDef = CurrentPair.first; | |||||
957 | } | |||||
958 | ||||||
959 | // If we exited the loop early, go with the result it gave us. | |||||
960 | if (!ModifyingAccess) { | |||||
961 | assert(FirstDef && "Found a Phi with no upward defs?")((FirstDef && "Found a Phi with no upward defs?") ? static_cast <void> (0) : __assert_fail ("FirstDef && \"Found a Phi with no upward defs?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 961, __PRETTY_FUNCTION__)); | |||||
962 | ModifyingAccess = FirstDef; | |||||
963 | } else { | |||||
964 | // If we can't optimize this Phi, then we can't safely cache any of the | |||||
965 | // calls we visited when trying to optimize it. Wipe them out now. | |||||
966 | Q.VisitedCalls.resize(InitialVisitedCallSize); | |||||
967 | } | |||||
968 | break; | |||||
969 | } | |||||
970 | ||||||
971 | if (!ModifyingAccess) | |||||
972 | return {MSSA->getLiveOnEntryDef(), Q.StartingLoc}; | |||||
973 | ||||||
974 | const BasicBlock *OriginalBlock = StartingAccess->getBlock(); | |||||
975 | assert(DFI.getPathLength() > 0 && "We dropped our path?")((DFI.getPathLength() > 0 && "We dropped our path?" ) ? static_cast<void> (0) : __assert_fail ("DFI.getPathLength() > 0 && \"We dropped our path?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 975, __PRETTY_FUNCTION__)); | |||||
976 | unsigned N = DFI.getPathLength(); | |||||
977 | // If we found a clobbering def, the last element in the path will be our | |||||
978 | // clobber, so we don't want to cache that to itself. OTOH, if we optimized a | |||||
979 | // phi, we can add the last thing in the path to the cache, since that won't | |||||
980 | // be the result. | |||||
981 | if (DFI.getPath(N - 1) == ModifyingAccess) | |||||
982 | --N; | |||||
983 | for (; N > 1; --N) { | |||||
984 | MemoryAccess *CacheAccess = DFI.getPath(N - 1); | |||||
985 | BasicBlock *CurrBlock = CacheAccess->getBlock(); | |||||
986 | if (!FollowingBackedge) | |||||
987 | doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc); | |||||
988 | if (DT->dominates(CurrBlock, OriginalBlock) && | |||||
989 | (CurrBlock != OriginalBlock || !FollowingBackedge || | |||||
990 | MSSA->locallyDominates(CacheAccess, StartingAccess))) | |||||
991 | break; | |||||
992 | } | |||||
993 | ||||||
994 | // Cache everything else on the way back. The caller should cache | |||||
995 | // StartingAccess for us. | |||||
996 | for (; N > 1; --N) { | |||||
997 | MemoryAccess *CacheAccess = DFI.getPath(N - 1); | |||||
998 | doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc); | |||||
999 | } | |||||
1000 | assert(Q.Visited.size() < 1000 && "Visited too much")((Q.Visited.size() < 1000 && "Visited too much") ? static_cast<void> (0) : __assert_fail ("Q.Visited.size() < 1000 && \"Visited too much\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 1000, __PRETTY_FUNCTION__)); | |||||
1001 | ||||||
1002 | return {ModifyingAccess, Loc}; | |||||
1003 | } | |||||
1004 | ||||||
1005 | /// \brief Walk the use-def chains starting at \p MA and find | |||||
1006 | /// the MemoryAccess that actually clobbers Loc. | |||||
1007 | /// | |||||
1008 | /// \returns our clobbering memory access | |||||
1009 | MemoryAccess * | |||||
1010 | CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess, | |||||
1011 | UpwardsMemoryQuery &Q) { | |||||
1012 | return UpwardsDFSWalk(StartingAccess, Q.StartingLoc, Q, false).first; | |||||
1013 | } | |||||
1014 | ||||||
1015 | MemoryAccess * | |||||
1016 | CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess, | |||||
1017 | MemoryLocation &Loc) { | |||||
1018 | if (isa<MemoryPhi>(StartingAccess)) | |||||
1019 | return StartingAccess; | |||||
1020 | ||||||
1021 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); | |||||
1022 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | |||||
1023 | return StartingUseOrDef; | |||||
1024 | ||||||
1025 | Instruction *I = StartingUseOrDef->getMemoryInst(); | |||||
1026 | ||||||
1027 | // Conservatively, fences are always clobbers, so don't perform the walk if we | |||||
1028 | // hit a fence. | |||||
1029 | if (isa<FenceInst>(I)) | |||||
1030 | return StartingUseOrDef; | |||||
1031 | ||||||
1032 | UpwardsMemoryQuery Q; | |||||
1033 | Q.OriginalAccess = StartingUseOrDef; | |||||
1034 | Q.StartingLoc = Loc; | |||||
1035 | Q.Inst = StartingUseOrDef->getMemoryInst(); | |||||
1036 | Q.IsCall = false; | |||||
1037 | Q.DL = &Q.Inst->getModule()->getDataLayout(); | |||||
1038 | ||||||
1039 | if (auto CacheResult = doCacheLookup(StartingUseOrDef, Q, Q.StartingLoc)) | |||||
1040 | return CacheResult; | |||||
1041 | ||||||
1042 | // Unlike the other function, do not walk to the def of a def, because we are | |||||
1043 | // handed something we already believe is the clobbering access. | |||||
1044 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) | |||||
1045 | ? StartingUseOrDef->getDefiningAccess() | |||||
1046 | : StartingUseOrDef; | |||||
1047 | ||||||
1048 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); | |||||
1049 | // Only cache this if it wouldn't make Clobber point to itself. | |||||
1050 | if (Clobber != StartingAccess) | |||||
1051 | doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc); | |||||
1052 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (0); | |||||
1053 | DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingUseOrDef << "\n" ; } } while (0); | |||||
1054 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (0); | |||||
1055 | DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Clobber << "\n"; } } while (0); | |||||
1056 | return Clobber; | |||||
1057 | } | |||||
1058 | ||||||
1059 | MemoryAccess * | |||||
1060 | CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) { | |||||
1061 | // There should be no way to lookup an instruction and get a phi as the | |||||
1062 | // access, since we only map BB's to PHI's. So, this must be a use or def. | |||||
1063 | auto *StartingAccess = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(I)); | |||||
1064 | ||||||
1065 | // We can't sanely do anything with a FenceInst, they conservatively | |||||
1066 | // clobber all memory, and have no locations to get pointers from to | |||||
1067 | // try to disambiguate | |||||
1068 | if (isa<FenceInst>(I)) | |||||
1069 | return StartingAccess; | |||||
1070 | ||||||
1071 | UpwardsMemoryQuery Q; | |||||
1072 | Q.OriginalAccess = StartingAccess; | |||||
1073 | Q.IsCall = bool(ImmutableCallSite(I)); | |||||
1074 | if (!Q.IsCall) | |||||
1075 | Q.StartingLoc = MemoryLocation::get(I); | |||||
1076 | Q.Inst = I; | |||||
1077 | Q.DL = &Q.Inst->getModule()->getDataLayout(); | |||||
1078 | if (auto CacheResult = doCacheLookup(StartingAccess, Q, Q.StartingLoc)) | |||||
1079 | return CacheResult; | |||||
1080 | ||||||
1081 | // Start with the thing we already think clobbers this location | |||||
1082 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | |||||
1083 | ||||||
1084 | // At this point, DefiningAccess may be the live on entry def. | |||||
1085 | // If it is, we will not get a better result. | |||||
1086 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) | |||||
1087 | return DefiningAccess; | |||||
1088 | ||||||
1089 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); | |||||
1090 | // DFS won't cache a result for DefiningAccess. So, if DefiningAccess isn't | |||||
1091 | // our clobber, be sure that it gets a cache entry, too. | |||||
1092 | if (Result != DefiningAccess) | |||||
1093 | doCacheInsert(DefiningAccess, Result, Q, Q.StartingLoc); | |||||
1094 | doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc); | |||||
1095 | // TODO: When this implementation is more mature, we may want to figure out | |||||
1096 | // what this additional caching buys us. It's most likely A Good Thing. | |||||
1097 | if (Q.IsCall) | |||||
1098 | for (const MemoryAccess *MA : Q.VisitedCalls) | |||||
1099 | if (MA != Result) | |||||
1100 | doCacheInsert(MA, Result, Q, Q.StartingLoc); | |||||
1101 | ||||||
1102 | DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (0); | |||||
1103 | DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *DefiningAccess << "\n" ; } } while (0); | |||||
1104 | DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (0); | |||||
1105 | DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Result << "\n"; } } while (0); | |||||
1106 | ||||||
1107 | return Result; | |||||
1108 | } | |||||
1109 | ||||||
1110 | // Verify that MA doesn't exist in any of the caches. | |||||
1111 | void CachingMemorySSAWalker::verifyRemoved(MemoryAccess *MA) { | |||||
1112 | #ifndef NDEBUG | |||||
1113 | for (auto &P : CachedUpwardsClobberingAccess) | |||||
1114 | assert(P.first.first != MA && P.second != MA &&((P.first.first != MA && P.second != MA && "Found removed MemoryAccess in cache." ) ? static_cast<void> (0) : __assert_fail ("P.first.first != MA && P.second != MA && \"Found removed MemoryAccess in cache.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 1115, __PRETTY_FUNCTION__)) | |||||
1115 | "Found removed MemoryAccess in cache.")((P.first.first != MA && P.second != MA && "Found removed MemoryAccess in cache." ) ? static_cast<void> (0) : __assert_fail ("P.first.first != MA && P.second != MA && \"Found removed MemoryAccess in cache.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 1115, __PRETTY_FUNCTION__)); | |||||
1116 | for (auto &P : CachedUpwardsClobberingCall) | |||||
1117 | assert(P.first != MA && P.second != MA &&((P.first != MA && P.second != MA && "Found removed MemoryAccess in cache." ) ? static_cast<void> (0) : __assert_fail ("P.first != MA && P.second != MA && \"Found removed MemoryAccess in cache.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 1118, __PRETTY_FUNCTION__)) | |||||
1118 | "Found removed MemoryAccess in cache.")((P.first != MA && P.second != MA && "Found removed MemoryAccess in cache." ) ? static_cast<void> (0) : __assert_fail ("P.first != MA && P.second != MA && \"Found removed MemoryAccess in cache.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Utils/MemorySSA.cpp" , 1118, __PRETTY_FUNCTION__)); | |||||
1119 | #endif // !NDEBUG | |||||
1120 | } | |||||
1121 | ||||||
1122 | MemoryAccess * | |||||
1123 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) { | |||||
1124 | MemoryAccess *MA = MSSA->getMemoryAccess(I); | |||||
1125 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | |||||
1126 | return Use->getDefiningAccess(); | |||||
1127 | return MA; | |||||
1128 | } | |||||
1129 | ||||||
1130 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | |||||
1131 | MemoryAccess *StartingAccess, MemoryLocation &) { | |||||
1132 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | |||||
1133 | return Use->getDefiningAccess(); | |||||
1134 | return StartingAccess; | |||||
1135 | } | |||||
1136 | } |