File: | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp |
Warning: | line 1288, column 32 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | // | ||||||
9 | // This pass performs various transformations related to eliminating memcpy | ||||||
10 | // calls, or transforming sets of stores into memset's. | ||||||
11 | // | ||||||
12 | //===----------------------------------------------------------------------===// | ||||||
13 | |||||||
14 | #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" | ||||||
15 | #include "llvm/ADT/DenseSet.h" | ||||||
16 | #include "llvm/ADT/None.h" | ||||||
17 | #include "llvm/ADT/STLExtras.h" | ||||||
18 | #include "llvm/ADT/SmallVector.h" | ||||||
19 | #include "llvm/ADT/Statistic.h" | ||||||
20 | #include "llvm/ADT/iterator_range.h" | ||||||
21 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||
22 | #include "llvm/Analysis/AssumptionCache.h" | ||||||
23 | #include "llvm/Analysis/GlobalsModRef.h" | ||||||
24 | #include "llvm/Analysis/Loads.h" | ||||||
25 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" | ||||||
26 | #include "llvm/Analysis/MemoryLocation.h" | ||||||
27 | #include "llvm/Analysis/MemorySSA.h" | ||||||
28 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||||
29 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||
30 | #include "llvm/Analysis/ValueTracking.h" | ||||||
31 | #include "llvm/IR/Argument.h" | ||||||
32 | #include "llvm/IR/BasicBlock.h" | ||||||
33 | #include "llvm/IR/Constants.h" | ||||||
34 | #include "llvm/IR/DataLayout.h" | ||||||
35 | #include "llvm/IR/DerivedTypes.h" | ||||||
36 | #include "llvm/IR/Dominators.h" | ||||||
37 | #include "llvm/IR/Function.h" | ||||||
38 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||||
39 | #include "llvm/IR/GlobalVariable.h" | ||||||
40 | #include "llvm/IR/IRBuilder.h" | ||||||
41 | #include "llvm/IR/InstrTypes.h" | ||||||
42 | #include "llvm/IR/Instruction.h" | ||||||
43 | #include "llvm/IR/Instructions.h" | ||||||
44 | #include "llvm/IR/IntrinsicInst.h" | ||||||
45 | #include "llvm/IR/Intrinsics.h" | ||||||
46 | #include "llvm/IR/LLVMContext.h" | ||||||
47 | #include "llvm/IR/Module.h" | ||||||
48 | #include "llvm/IR/Operator.h" | ||||||
49 | #include "llvm/IR/PassManager.h" | ||||||
50 | #include "llvm/IR/Type.h" | ||||||
51 | #include "llvm/IR/User.h" | ||||||
52 | #include "llvm/IR/Value.h" | ||||||
53 | #include "llvm/InitializePasses.h" | ||||||
54 | #include "llvm/Pass.h" | ||||||
55 | #include "llvm/Support/Casting.h" | ||||||
56 | #include "llvm/Support/Debug.h" | ||||||
57 | #include "llvm/Support/MathExtras.h" | ||||||
58 | #include "llvm/Support/raw_ostream.h" | ||||||
59 | #include "llvm/Transforms/Scalar.h" | ||||||
60 | #include "llvm/Transforms/Utils/Local.h" | ||||||
61 | #include <algorithm> | ||||||
62 | #include <cassert> | ||||||
63 | #include <cstdint> | ||||||
64 | #include <utility> | ||||||
65 | |||||||
66 | using namespace llvm; | ||||||
67 | |||||||
68 | #define DEBUG_TYPE"memcpyopt" "memcpyopt" | ||||||
69 | |||||||
70 | static cl::opt<bool> | ||||||
71 | EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden, | ||||||
72 | cl::desc("Use MemorySSA-backed MemCpyOpt.")); | ||||||
73 | |||||||
74 | STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr" , "Number of memcpy instructions deleted"}; | ||||||
75 | STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer" , "Number of memsets inferred"}; | ||||||
76 | STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy" , "Number of memmoves converted to memcpy"}; | ||||||
77 | STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet" , "Number of memcpys converted to memset"}; | ||||||
78 | STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot" , "Number of call slot optimizations performed"}; | ||||||
79 | |||||||
80 | namespace { | ||||||
81 | |||||||
82 | /// Represents a range of memset'd bytes with the ByteVal value. | ||||||
83 | /// This allows us to analyze stores like: | ||||||
84 | /// store 0 -> P+1 | ||||||
85 | /// store 0 -> P+0 | ||||||
86 | /// store 0 -> P+3 | ||||||
87 | /// store 0 -> P+2 | ||||||
88 | /// which sometimes happens with stores to arrays of structs etc. When we see | ||||||
89 | /// the first store, we make a range [1, 2). The second store extends the range | ||||||
90 | /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the | ||||||
91 | /// two ranges into [0, 3) which is memset'able. | ||||||
92 | struct MemsetRange { | ||||||
93 | // Start/End - A semi range that describes the span that this range covers. | ||||||
94 | // The range is closed at the start and open at the end: [Start, End). | ||||||
95 | int64_t Start, End; | ||||||
96 | |||||||
97 | /// StartPtr - The getelementptr instruction that points to the start of the | ||||||
98 | /// range. | ||||||
99 | Value *StartPtr; | ||||||
100 | |||||||
101 | /// Alignment - The known alignment of the first store. | ||||||
102 | unsigned Alignment; | ||||||
103 | |||||||
104 | /// TheStores - The actual stores that make up this range. | ||||||
105 | SmallVector<Instruction*, 16> TheStores; | ||||||
106 | |||||||
107 | bool isProfitableToUseMemset(const DataLayout &DL) const; | ||||||
108 | }; | ||||||
109 | |||||||
110 | } // end anonymous namespace | ||||||
111 | |||||||
112 | bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { | ||||||
113 | // If we found more than 4 stores to merge or 16 bytes, use memset. | ||||||
114 | if (TheStores.size() >= 4 || End-Start >= 16) return true; | ||||||
115 | |||||||
116 | // If there is nothing to merge, don't do anything. | ||||||
117 | if (TheStores.size() < 2) return false; | ||||||
118 | |||||||
119 | // If any of the stores are a memset, then it is always good to extend the | ||||||
120 | // memset. | ||||||
121 | for (Instruction *SI : TheStores) | ||||||
122 | if (!isa<StoreInst>(SI)) | ||||||
123 | return true; | ||||||
124 | |||||||
125 | // Assume that the code generator is capable of merging pairs of stores | ||||||
126 | // together if it wants to. | ||||||
127 | if (TheStores.size() == 2) return false; | ||||||
128 | |||||||
129 | // If we have fewer than 8 stores, it can still be worthwhile to do this. | ||||||
130 | // For example, merging 4 i8 stores into an i32 store is useful almost always. | ||||||
131 | // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the | ||||||
132 | // memset will be split into 2 32-bit stores anyway) and doing so can | ||||||
133 | // pessimize the llvm optimizer. | ||||||
134 | // | ||||||
135 | // Since we don't have perfect knowledge here, make some assumptions: assume | ||||||
136 | // the maximum GPR width is the same size as the largest legal integer | ||||||
137 | // size. If so, check to see whether we will end up actually reducing the | ||||||
138 | // number of stores used. | ||||||
139 | unsigned Bytes = unsigned(End-Start); | ||||||
140 | unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; | ||||||
141 | if (MaxIntSize == 0) | ||||||
142 | MaxIntSize = 1; | ||||||
143 | unsigned NumPointerStores = Bytes / MaxIntSize; | ||||||
144 | |||||||
145 | // Assume the remaining bytes if any are done a byte at a time. | ||||||
146 | unsigned NumByteStores = Bytes % MaxIntSize; | ||||||
147 | |||||||
148 | // If we will reduce the # stores (according to this heuristic), do the | ||||||
149 | // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 | ||||||
150 | // etc. | ||||||
151 | return TheStores.size() > NumPointerStores+NumByteStores; | ||||||
152 | } | ||||||
153 | |||||||
154 | namespace { | ||||||
155 | |||||||
156 | class MemsetRanges { | ||||||
157 | using range_iterator = SmallVectorImpl<MemsetRange>::iterator; | ||||||
158 | |||||||
159 | /// A sorted list of the memset ranges. | ||||||
160 | SmallVector<MemsetRange, 8> Ranges; | ||||||
161 | |||||||
162 | const DataLayout &DL; | ||||||
163 | |||||||
164 | public: | ||||||
165 | MemsetRanges(const DataLayout &DL) : DL(DL) {} | ||||||
166 | |||||||
167 | using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; | ||||||
168 | |||||||
169 | const_iterator begin() const { return Ranges.begin(); } | ||||||
170 | const_iterator end() const { return Ranges.end(); } | ||||||
171 | bool empty() const { return Ranges.empty(); } | ||||||
172 | |||||||
173 | void addInst(int64_t OffsetFromFirst, Instruction *Inst) { | ||||||
174 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | ||||||
175 | addStore(OffsetFromFirst, SI); | ||||||
176 | else | ||||||
177 | addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); | ||||||
178 | } | ||||||
179 | |||||||
180 | void addStore(int64_t OffsetFromFirst, StoreInst *SI) { | ||||||
181 | int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); | ||||||
182 | |||||||
183 | addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), | ||||||
184 | SI->getAlign().value(), SI); | ||||||
185 | } | ||||||
186 | |||||||
187 | void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { | ||||||
188 | int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); | ||||||
189 | addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); | ||||||
190 | } | ||||||
191 | |||||||
192 | void addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||
193 | unsigned Alignment, Instruction *Inst); | ||||||
194 | }; | ||||||
195 | |||||||
196 | } // end anonymous namespace | ||||||
197 | |||||||
198 | /// Add a new store to the MemsetRanges data structure. This adds a | ||||||
199 | /// new range for the specified store at the specified offset, merging into | ||||||
200 | /// existing ranges as appropriate. | ||||||
201 | void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||
202 | unsigned Alignment, Instruction *Inst) { | ||||||
203 | int64_t End = Start+Size; | ||||||
204 | |||||||
205 | range_iterator I = partition_point( | ||||||
206 | Ranges, [=](const MemsetRange &O) { return O.End < Start; }); | ||||||
207 | |||||||
208 | // We now know that I == E, in which case we didn't find anything to merge | ||||||
209 | // with, or that Start <= I->End. If End < I->Start or I == E, then we need | ||||||
210 | // to insert a new range. Handle this now. | ||||||
211 | if (I == Ranges.end() || End < I->Start) { | ||||||
212 | MemsetRange &R = *Ranges.insert(I, MemsetRange()); | ||||||
213 | R.Start = Start; | ||||||
214 | R.End = End; | ||||||
215 | R.StartPtr = Ptr; | ||||||
216 | R.Alignment = Alignment; | ||||||
217 | R.TheStores.push_back(Inst); | ||||||
218 | return; | ||||||
219 | } | ||||||
220 | |||||||
221 | // This store overlaps with I, add it. | ||||||
222 | I->TheStores.push_back(Inst); | ||||||
223 | |||||||
224 | // At this point, we may have an interval that completely contains our store. | ||||||
225 | // If so, just add it to the interval and return. | ||||||
226 | if (I->Start <= Start && I->End >= End) | ||||||
227 | return; | ||||||
228 | |||||||
229 | // Now we know that Start <= I->End and End >= I->Start so the range overlaps | ||||||
230 | // but is not entirely contained within the range. | ||||||
231 | |||||||
232 | // See if the range extends the start of the range. In this case, it couldn't | ||||||
233 | // possibly cause it to join the prior range, because otherwise we would have | ||||||
234 | // stopped on *it*. | ||||||
235 | if (Start < I->Start) { | ||||||
236 | I->Start = Start; | ||||||
237 | I->StartPtr = Ptr; | ||||||
238 | I->Alignment = Alignment; | ||||||
239 | } | ||||||
240 | |||||||
241 | // Now we know that Start <= I->End and Start >= I->Start (so the startpoint | ||||||
242 | // is in or right at the end of I), and that End >= I->Start. Extend I out to | ||||||
243 | // End. | ||||||
244 | if (End > I->End) { | ||||||
245 | I->End = End; | ||||||
246 | range_iterator NextI = I; | ||||||
247 | while (++NextI != Ranges.end() && End >= NextI->Start) { | ||||||
248 | // Merge the range in. | ||||||
249 | I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); | ||||||
250 | if (NextI->End > I->End) | ||||||
251 | I->End = NextI->End; | ||||||
252 | Ranges.erase(NextI); | ||||||
253 | NextI = I; | ||||||
254 | } | ||||||
255 | } | ||||||
256 | } | ||||||
257 | |||||||
258 | //===----------------------------------------------------------------------===// | ||||||
259 | // MemCpyOptLegacyPass Pass | ||||||
260 | //===----------------------------------------------------------------------===// | ||||||
261 | |||||||
262 | namespace { | ||||||
263 | |||||||
264 | class MemCpyOptLegacyPass : public FunctionPass { | ||||||
265 | MemCpyOptPass Impl; | ||||||
266 | |||||||
267 | public: | ||||||
268 | static char ID; // Pass identification, replacement for typeid | ||||||
269 | |||||||
270 | MemCpyOptLegacyPass() : FunctionPass(ID) { | ||||||
271 | initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||||
272 | } | ||||||
273 | |||||||
274 | bool runOnFunction(Function &F) override; | ||||||
275 | |||||||
276 | private: | ||||||
277 | // This transformation requires dominator postdominator info | ||||||
278 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||
279 | AU.setPreservesCFG(); | ||||||
280 | AU.addRequired<AssumptionCacheTracker>(); | ||||||
281 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||||
282 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||||
283 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||||
284 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||||
285 | if (!EnableMemorySSA) | ||||||
286 | AU.addRequired<MemoryDependenceWrapperPass>(); | ||||||
287 | AU.addPreserved<MemoryDependenceWrapperPass>(); | ||||||
288 | AU.addRequired<AAResultsWrapperPass>(); | ||||||
289 | AU.addPreserved<AAResultsWrapperPass>(); | ||||||
290 | if (EnableMemorySSA) | ||||||
291 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||
292 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||||
293 | } | ||||||
294 | }; | ||||||
295 | |||||||
296 | } // end anonymous namespace | ||||||
297 | |||||||
298 | char MemCpyOptLegacyPass::ID = 0; | ||||||
299 | |||||||
300 | /// The public interface to this file... | ||||||
301 | FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } | ||||||
302 | |||||||
303 | INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||
304 | false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||
305 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||||
306 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||||
307 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); | ||||||
308 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||||
309 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||||
310 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||||
311 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||||
312 | INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||
313 | false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||
314 | |||||||
315 | // Check that V is either not accessible by the caller, or unwinding cannot | ||||||
316 | // occur between Start and End. | ||||||
317 | static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, | ||||||
318 | Instruction *End) { | ||||||
319 | assert(Start->getParent() == End->getParent() && "Must be in same block")(static_cast <bool> (Start->getParent() == End->getParent () && "Must be in same block") ? void (0) : __assert_fail ("Start->getParent() == End->getParent() && \"Must be in same block\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 319, __extension__ __PRETTY_FUNCTION__)); | ||||||
320 | if (!Start->getFunction()->doesNotThrow() && | ||||||
321 | !isa<AllocaInst>(getUnderlyingObject(V))) { | ||||||
322 | for (const Instruction &I : | ||||||
323 | make_range(Start->getIterator(), End->getIterator())) { | ||||||
324 | if (I.mayThrow()) | ||||||
325 | return true; | ||||||
326 | } | ||||||
327 | } | ||||||
328 | return false; | ||||||
329 | } | ||||||
330 | |||||||
331 | void MemCpyOptPass::eraseInstruction(Instruction *I) { | ||||||
332 | if (MSSAU) | ||||||
333 | MSSAU->removeMemoryAccess(I); | ||||||
334 | if (MD) | ||||||
335 | MD->removeInstruction(I); | ||||||
336 | I->eraseFromParent(); | ||||||
337 | } | ||||||
338 | |||||||
339 | // Check for mod or ref of Loc between Start and End, excluding both boundaries. | ||||||
340 | // Start and End must be in the same block | ||||||
341 | static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, | ||||||
342 | const MemoryUseOrDef *Start, | ||||||
343 | const MemoryUseOrDef *End) { | ||||||
344 | assert(Start->getBlock() == End->getBlock() && "Only local supported")(static_cast <bool> (Start->getBlock() == End->getBlock () && "Only local supported") ? void (0) : __assert_fail ("Start->getBlock() == End->getBlock() && \"Only local supported\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 344, __extension__ __PRETTY_FUNCTION__)); | ||||||
345 | for (const MemoryAccess &MA : | ||||||
346 | make_range(++Start->getIterator(), End->getIterator())) { | ||||||
347 | if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), | ||||||
348 | Loc))) | ||||||
349 | return true; | ||||||
350 | } | ||||||
351 | return false; | ||||||
352 | } | ||||||
353 | |||||||
354 | // Check for mod of Loc between Start and End, excluding both boundaries. | ||||||
355 | // Start and End can be in different blocks. | ||||||
356 | static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, | ||||||
357 | const MemoryUseOrDef *Start, | ||||||
358 | const MemoryUseOrDef *End) { | ||||||
359 | // TODO: Only walk until we hit Start. | ||||||
360 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||
361 | End->getDefiningAccess(), Loc); | ||||||
362 | return !MSSA->dominates(Clobber, Start); | ||||||
363 | } | ||||||
364 | |||||||
365 | /// When scanning forward over instructions, we look for some other patterns to | ||||||
366 | /// fold away. In particular, this looks for stores to neighboring locations of | ||||||
367 | /// memory. If it sees enough consecutive ones, it attempts to merge them | ||||||
368 | /// together into a memcpy/memset. | ||||||
369 | Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, | ||||||
370 | Value *StartPtr, | ||||||
371 | Value *ByteVal) { | ||||||
372 | const DataLayout &DL = StartInst->getModule()->getDataLayout(); | ||||||
373 | |||||||
374 | // Okay, so we now have a single store that can be splatable. Scan to find | ||||||
375 | // all subsequent stores of the same value to offset from the same pointer. | ||||||
376 | // Join these together into ranges, so we can decide whether contiguous blocks | ||||||
377 | // are stored. | ||||||
378 | MemsetRanges Ranges(DL); | ||||||
379 | |||||||
380 | BasicBlock::iterator BI(StartInst); | ||||||
381 | |||||||
382 | // Keeps track of the last memory use or def before the insertion point for | ||||||
383 | // the new memset. The new MemoryDef for the inserted memsets will be inserted | ||||||
384 | // after MemInsertPoint. It points to either LastMemDef or to the last user | ||||||
385 | // before the insertion point of the memset, if there are any such users. | ||||||
386 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||
387 | // Keeps track of the last MemoryDef between StartInst and the insertion point | ||||||
388 | // for the new memset. This will become the defining access of the inserted | ||||||
389 | // memsets. | ||||||
390 | MemoryDef *LastMemDef = nullptr; | ||||||
391 | for (++BI; !BI->isTerminator(); ++BI) { | ||||||
392 | if (MSSAU) { | ||||||
393 | auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( | ||||||
394 | MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); | ||||||
395 | if (CurrentAcc) { | ||||||
396 | MemInsertPoint = CurrentAcc; | ||||||
397 | if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) | ||||||
398 | LastMemDef = CurrentDef; | ||||||
399 | } | ||||||
400 | } | ||||||
401 | |||||||
402 | // Calls that only access inaccessible memory do not block merging | ||||||
403 | // accessible stores. | ||||||
404 | if (auto *CB = dyn_cast<CallBase>(BI)) { | ||||||
405 | if (CB->onlyAccessesInaccessibleMemory()) | ||||||
406 | continue; | ||||||
407 | } | ||||||
408 | |||||||
409 | if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { | ||||||
410 | // If the instruction is readnone, ignore it, otherwise bail out. We | ||||||
411 | // don't even allow readonly here because we don't want something like: | ||||||
412 | // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). | ||||||
413 | if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) | ||||||
414 | break; | ||||||
415 | continue; | ||||||
416 | } | ||||||
417 | |||||||
418 | if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { | ||||||
419 | // If this is a store, see if we can merge it in. | ||||||
420 | if (!NextStore->isSimple()) break; | ||||||
421 | |||||||
422 | Value *StoredVal = NextStore->getValueOperand(); | ||||||
423 | |||||||
424 | // Don't convert stores of non-integral pointer types to memsets (which | ||||||
425 | // stores integers). | ||||||
426 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||
427 | break; | ||||||
428 | |||||||
429 | // Check to see if this stored value is of the same byte-splattable value. | ||||||
430 | Value *StoredByte = isBytewiseValue(StoredVal, DL); | ||||||
431 | if (isa<UndefValue>(ByteVal) && StoredByte) | ||||||
432 | ByteVal = StoredByte; | ||||||
433 | if (ByteVal != StoredByte) | ||||||
434 | break; | ||||||
435 | |||||||
436 | // Check to see if this store is to a constant offset from the start ptr. | ||||||
437 | Optional<int64_t> Offset = | ||||||
438 | isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); | ||||||
439 | if (!Offset) | ||||||
440 | break; | ||||||
441 | |||||||
442 | Ranges.addStore(*Offset, NextStore); | ||||||
443 | } else { | ||||||
444 | MemSetInst *MSI = cast<MemSetInst>(BI); | ||||||
445 | |||||||
446 | if (MSI->isVolatile() || ByteVal != MSI->getValue() || | ||||||
447 | !isa<ConstantInt>(MSI->getLength())) | ||||||
448 | break; | ||||||
449 | |||||||
450 | // Check to see if this store is to a constant offset from the start ptr. | ||||||
451 | Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); | ||||||
452 | if (!Offset) | ||||||
453 | break; | ||||||
454 | |||||||
455 | Ranges.addMemSet(*Offset, MSI); | ||||||
456 | } | ||||||
457 | } | ||||||
458 | |||||||
459 | // If we have no ranges, then we just had a single store with nothing that | ||||||
460 | // could be merged in. This is a very common case of course. | ||||||
461 | if (Ranges.empty()) | ||||||
462 | return nullptr; | ||||||
463 | |||||||
464 | // If we had at least one store that could be merged in, add the starting | ||||||
465 | // store as well. We try to avoid this unless there is at least something | ||||||
466 | // interesting as a small compile-time optimization. | ||||||
467 | Ranges.addInst(0, StartInst); | ||||||
468 | |||||||
469 | // If we create any memsets, we put it right before the first instruction that | ||||||
470 | // isn't part of the memset block. This ensure that the memset is dominated | ||||||
471 | // by any addressing instruction needed by the start of the block. | ||||||
472 | IRBuilder<> Builder(&*BI); | ||||||
473 | |||||||
474 | // Now that we have full information about ranges, loop over the ranges and | ||||||
475 | // emit memset's for anything big enough to be worthwhile. | ||||||
476 | Instruction *AMemSet = nullptr; | ||||||
477 | for (const MemsetRange &Range : Ranges) { | ||||||
478 | if (Range.TheStores.size() == 1) continue; | ||||||
479 | |||||||
480 | // If it is profitable to lower this range to memset, do so now. | ||||||
481 | if (!Range.isProfitableToUseMemset(DL)) | ||||||
482 | continue; | ||||||
483 | |||||||
484 | // Otherwise, we do want to transform this! Create a new memset. | ||||||
485 | // Get the starting pointer of the block. | ||||||
486 | StartPtr = Range.StartPtr; | ||||||
487 | |||||||
488 | AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, | ||||||
489 | MaybeAlign(Range.Alignment)); | ||||||
490 | LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||
491 | : Range.TheStores) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||
492 | << *SI << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||
493 | dbgs() << "With: " << *AMemSet << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false); | ||||||
494 | if (!Range.TheStores.empty()) | ||||||
495 | AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); | ||||||
496 | |||||||
497 | if (MSSAU) { | ||||||
498 | assert(LastMemDef && MemInsertPoint &&(static_cast <bool> (LastMemDef && MemInsertPoint && "Both LastMemDef and MemInsertPoint need to be set" ) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 499, __extension__ __PRETTY_FUNCTION__)) | ||||||
499 | "Both LastMemDef and MemInsertPoint need to be set")(static_cast <bool> (LastMemDef && MemInsertPoint && "Both LastMemDef and MemInsertPoint need to be set" ) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 499, __extension__ __PRETTY_FUNCTION__)); | ||||||
500 | auto *NewDef = | ||||||
501 | cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI | ||||||
502 | ? MSSAU->createMemoryAccessBefore( | ||||||
503 | AMemSet, LastMemDef, MemInsertPoint) | ||||||
504 | : MSSAU->createMemoryAccessAfter( | ||||||
505 | AMemSet, LastMemDef, MemInsertPoint)); | ||||||
506 | MSSAU->insertDef(NewDef, /*RenameUses=*/true); | ||||||
507 | LastMemDef = NewDef; | ||||||
508 | MemInsertPoint = NewDef; | ||||||
509 | } | ||||||
510 | |||||||
511 | // Zap all the stores. | ||||||
512 | for (Instruction *SI : Range.TheStores) | ||||||
513 | eraseInstruction(SI); | ||||||
514 | |||||||
515 | ++NumMemSetInfer; | ||||||
516 | } | ||||||
517 | |||||||
518 | return AMemSet; | ||||||
519 | } | ||||||
520 | |||||||
521 | // This method try to lift a store instruction before position P. | ||||||
522 | // It will lift the store and its argument + that anything that | ||||||
523 | // may alias with these. | ||||||
524 | // The method returns true if it was successful. | ||||||
525 | bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { | ||||||
526 | // If the store alias this position, early bail out. | ||||||
527 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||
528 | if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) | ||||||
529 | return false; | ||||||
530 | |||||||
531 | // Keep track of the arguments of all instruction we plan to lift | ||||||
532 | // so we can make sure to lift them as well if appropriate. | ||||||
533 | DenseSet<Instruction*> Args; | ||||||
534 | if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) | ||||||
535 | if (Ptr->getParent() == SI->getParent()) | ||||||
536 | Args.insert(Ptr); | ||||||
537 | |||||||
538 | // Instruction to lift before P. | ||||||
539 | SmallVector<Instruction *, 8> ToLift{SI}; | ||||||
540 | |||||||
541 | // Memory locations of lifted instructions. | ||||||
542 | SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; | ||||||
543 | |||||||
544 | // Lifted calls. | ||||||
545 | SmallVector<const CallBase *, 8> Calls; | ||||||
546 | |||||||
547 | const MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||
548 | |||||||
549 | for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { | ||||||
550 | auto *C = &*I; | ||||||
551 | |||||||
552 | // Make sure hoisting does not perform a store that was not guaranteed to | ||||||
553 | // happen. | ||||||
554 | if (!isGuaranteedToTransferExecutionToSuccessor(C)) | ||||||
555 | return false; | ||||||
556 | |||||||
557 | bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); | ||||||
558 | |||||||
559 | bool NeedLift = false; | ||||||
560 | if (Args.erase(C)) | ||||||
561 | NeedLift = true; | ||||||
562 | else if (MayAlias) { | ||||||
563 | NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { | ||||||
564 | return isModOrRefSet(AA->getModRefInfo(C, ML)); | ||||||
565 | }); | ||||||
566 | |||||||
567 | if (!NeedLift) | ||||||
568 | NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { | ||||||
569 | return isModOrRefSet(AA->getModRefInfo(C, Call)); | ||||||
570 | }); | ||||||
571 | } | ||||||
572 | |||||||
573 | if (!NeedLift) | ||||||
574 | continue; | ||||||
575 | |||||||
576 | if (MayAlias) { | ||||||
577 | // Since LI is implicitly moved downwards past the lifted instructions, | ||||||
578 | // none of them may modify its source. | ||||||
579 | if (isModSet(AA->getModRefInfo(C, LoadLoc))) | ||||||
580 | return false; | ||||||
581 | else if (const auto *Call = dyn_cast<CallBase>(C)) { | ||||||
582 | // If we can't lift this before P, it's game over. | ||||||
583 | if (isModOrRefSet(AA->getModRefInfo(P, Call))) | ||||||
584 | return false; | ||||||
585 | |||||||
586 | Calls.push_back(Call); | ||||||
587 | } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { | ||||||
588 | // If we can't lift this before P, it's game over. | ||||||
589 | auto ML = MemoryLocation::get(C); | ||||||
590 | if (isModOrRefSet(AA->getModRefInfo(P, ML))) | ||||||
591 | return false; | ||||||
592 | |||||||
593 | MemLocs.push_back(ML); | ||||||
594 | } else | ||||||
595 | // We don't know how to lift this instruction. | ||||||
596 | return false; | ||||||
597 | } | ||||||
598 | |||||||
599 | ToLift.push_back(C); | ||||||
600 | for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) | ||||||
601 | if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { | ||||||
602 | if (A->getParent() == SI->getParent()) { | ||||||
603 | // Cannot hoist user of P above P | ||||||
604 | if(A == P) return false; | ||||||
605 | Args.insert(A); | ||||||
606 | } | ||||||
607 | } | ||||||
608 | } | ||||||
609 | |||||||
610 | // Find MSSA insertion point. Normally P will always have a corresponding | ||||||
611 | // memory access before which we can insert. However, with non-standard AA | ||||||
612 | // pipelines, there may be a mismatch between AA and MSSA, in which case we | ||||||
613 | // will scan for a memory access before P. In either case, we know for sure | ||||||
614 | // that at least the load will have a memory access. | ||||||
615 | // TODO: Simplify this once P will be determined by MSSA, in which case the | ||||||
616 | // discrepancy can no longer occur. | ||||||
617 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||
618 | if (MSSAU) { | ||||||
619 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { | ||||||
620 | MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); | ||||||
621 | } else { | ||||||
622 | const Instruction *ConstP = P; | ||||||
623 | for (const Instruction &I : make_range(++ConstP->getReverseIterator(), | ||||||
624 | ++LI->getReverseIterator())) { | ||||||
625 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||||
626 | MemInsertPoint = MA; | ||||||
627 | break; | ||||||
628 | } | ||||||
629 | } | ||||||
630 | } | ||||||
631 | } | ||||||
632 | |||||||
633 | // We made it, we need to lift. | ||||||
634 | for (auto *I : llvm::reverse(ToLift)) { | ||||||
635 | LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Lifting " << *I << " before " << *P << "\n"; } } while (false); | ||||||
636 | I->moveBefore(P); | ||||||
637 | if (MSSAU) { | ||||||
638 | assert(MemInsertPoint && "Must have found insert point")(static_cast <bool> (MemInsertPoint && "Must have found insert point" ) ? void (0) : __assert_fail ("MemInsertPoint && \"Must have found insert point\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 638, __extension__ __PRETTY_FUNCTION__)); | ||||||
639 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { | ||||||
640 | MSSAU->moveAfter(MA, MemInsertPoint); | ||||||
641 | MemInsertPoint = MA; | ||||||
642 | } | ||||||
643 | } | ||||||
644 | } | ||||||
645 | |||||||
646 | return true; | ||||||
647 | } | ||||||
648 | |||||||
649 | bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { | ||||||
650 | if (!SI->isSimple()) return false; | ||||||
651 | |||||||
652 | // Avoid merging nontemporal stores since the resulting | ||||||
653 | // memcpy/memset would not be able to preserve the nontemporal hint. | ||||||
654 | // In theory we could teach how to propagate the !nontemporal metadata to | ||||||
655 | // memset calls. However, that change would force the backend to | ||||||
656 | // conservatively expand !nontemporal memset calls back to sequences of | ||||||
657 | // store instructions (effectively undoing the merging). | ||||||
658 | if (SI->getMetadata(LLVMContext::MD_nontemporal)) | ||||||
659 | return false; | ||||||
660 | |||||||
661 | const DataLayout &DL = SI->getModule()->getDataLayout(); | ||||||
662 | |||||||
663 | Value *StoredVal = SI->getValueOperand(); | ||||||
664 | |||||||
665 | // Not all the transforms below are correct for non-integral pointers, bail | ||||||
666 | // until we've audited the individual pieces. | ||||||
667 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||
668 | return false; | ||||||
669 | |||||||
670 | // Load to store forwarding can be interpreted as memcpy. | ||||||
671 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { | ||||||
672 | if (LI->isSimple() && LI->hasOneUse() && | ||||||
673 | LI->getParent() == SI->getParent()) { | ||||||
674 | |||||||
675 | auto *T = LI->getType(); | ||||||
676 | if (T->isAggregateType()) { | ||||||
677 | MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||
678 | |||||||
679 | // We use alias analysis to check if an instruction may store to | ||||||
680 | // the memory we load from in between the load and the store. If | ||||||
681 | // such an instruction is found, we try to promote there instead | ||||||
682 | // of at the store position. | ||||||
683 | // TODO: Can use MSSA for this. | ||||||
684 | Instruction *P = SI; | ||||||
685 | for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { | ||||||
686 | if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { | ||||||
687 | P = &I; | ||||||
688 | break; | ||||||
689 | } | ||||||
690 | } | ||||||
691 | |||||||
692 | // We found an instruction that may write to the loaded memory. | ||||||
693 | // We can try to promote at this position instead of the store | ||||||
694 | // position if nothing aliases the store memory after this and the store | ||||||
695 | // destination is not in the range. | ||||||
696 | if (P && P != SI) { | ||||||
697 | if (!moveUp(SI, P, LI)) | ||||||
698 | P = nullptr; | ||||||
699 | } | ||||||
700 | |||||||
701 | // If a valid insertion position is found, then we can promote | ||||||
702 | // the load/store pair to a memcpy. | ||||||
703 | if (P) { | ||||||
704 | // If we load from memory that may alias the memory we store to, | ||||||
705 | // memmove must be used to preserve semantic. If not, memcpy can | ||||||
706 | // be used. | ||||||
707 | bool UseMemMove = false; | ||||||
708 | if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) | ||||||
709 | UseMemMove = true; | ||||||
710 | |||||||
711 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||
712 | |||||||
713 | IRBuilder<> Builder(P); | ||||||
714 | Instruction *M; | ||||||
715 | if (UseMemMove) | ||||||
716 | M = Builder.CreateMemMove( | ||||||
717 | SI->getPointerOperand(), SI->getAlign(), | ||||||
718 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||
719 | else | ||||||
720 | M = Builder.CreateMemCpy( | ||||||
721 | SI->getPointerOperand(), SI->getAlign(), | ||||||
722 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||
723 | |||||||
724 | LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M << "\n" ; } } while (false) | ||||||
725 | << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M << "\n" ; } } while (false); | ||||||
726 | |||||||
727 | if (MSSAU) { | ||||||
728 | auto *LastDef = | ||||||
729 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||
730 | auto *NewAccess = | ||||||
731 | MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||
732 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
733 | } | ||||||
734 | |||||||
735 | eraseInstruction(SI); | ||||||
736 | eraseInstruction(LI); | ||||||
737 | ++NumMemCpyInstr; | ||||||
738 | |||||||
739 | // Make sure we do not invalidate the iterator. | ||||||
740 | BBI = M->getIterator(); | ||||||
741 | return true; | ||||||
742 | } | ||||||
743 | } | ||||||
744 | |||||||
745 | // Detect cases where we're performing call slot forwarding, but | ||||||
746 | // happen to be using a load-store pair to implement it, rather than | ||||||
747 | // a memcpy. | ||||||
748 | CallInst *C = nullptr; | ||||||
749 | if (EnableMemorySSA) { | ||||||
750 | if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( | ||||||
751 | MSSA->getWalker()->getClobberingMemoryAccess(LI))) { | ||||||
752 | // The load most post-dom the call. Limit to the same block for now. | ||||||
753 | // TODO: Support non-local call-slot optimization? | ||||||
754 | if (LoadClobber->getBlock() == SI->getParent()) | ||||||
755 | C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); | ||||||
756 | } | ||||||
757 | } else { | ||||||
758 | MemDepResult ldep = MD->getDependency(LI); | ||||||
759 | if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) | ||||||
760 | C = dyn_cast<CallInst>(ldep.getInst()); | ||||||
761 | } | ||||||
762 | |||||||
763 | if (C) { | ||||||
764 | // Check that nothing touches the dest of the "copy" between | ||||||
765 | // the call and the store. | ||||||
766 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||
767 | if (EnableMemorySSA) { | ||||||
768 | if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), | ||||||
769 | MSSA->getMemoryAccess(SI))) | ||||||
770 | C = nullptr; | ||||||
771 | } else { | ||||||
772 | for (BasicBlock::iterator I = --SI->getIterator(), | ||||||
773 | E = C->getIterator(); | ||||||
774 | I != E; --I) { | ||||||
775 | if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { | ||||||
776 | C = nullptr; | ||||||
777 | break; | ||||||
778 | } | ||||||
779 | } | ||||||
780 | } | ||||||
781 | } | ||||||
782 | |||||||
783 | if (C) { | ||||||
784 | bool changed = performCallSlotOptzn( | ||||||
785 | LI, SI, SI->getPointerOperand()->stripPointerCasts(), | ||||||
786 | LI->getPointerOperand()->stripPointerCasts(), | ||||||
787 | DL.getTypeStoreSize(SI->getOperand(0)->getType()), | ||||||
788 | commonAlignment(SI->getAlign(), LI->getAlign()), C); | ||||||
789 | if (changed) { | ||||||
790 | eraseInstruction(SI); | ||||||
791 | eraseInstruction(LI); | ||||||
792 | ++NumMemCpyInstr; | ||||||
793 | return true; | ||||||
794 | } | ||||||
795 | } | ||||||
796 | } | ||||||
797 | } | ||||||
798 | |||||||
799 | // There are two cases that are interesting for this code to handle: memcpy | ||||||
800 | // and memset. Right now we only handle memset. | ||||||
801 | |||||||
802 | // Ensure that the value being stored is something that can be memset'able a | ||||||
803 | // byte at a time like "0" or "-1" or any width, as well as things like | ||||||
804 | // 0xA0A0A0A0 and 0.0. | ||||||
805 | auto *V = SI->getOperand(0); | ||||||
806 | if (Value *ByteVal = isBytewiseValue(V, DL)) { | ||||||
807 | if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), | ||||||
808 | ByteVal)) { | ||||||
809 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||
810 | return true; | ||||||
811 | } | ||||||
812 | |||||||
813 | // If we have an aggregate, we try to promote it to memset regardless | ||||||
814 | // of opportunity for merging as it can expose optimization opportunities | ||||||
815 | // in subsequent passes. | ||||||
816 | auto *T = V->getType(); | ||||||
817 | if (T->isAggregateType()) { | ||||||
818 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||
819 | IRBuilder<> Builder(SI); | ||||||
820 | auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, | ||||||
821 | SI->getAlign()); | ||||||
822 | |||||||
823 | LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *SI << " to " << *M << "\n"; } } while (false); | ||||||
824 | |||||||
825 | if (MSSAU) { | ||||||
826 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA ()->getMemoryAccess(SI))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 826, __extension__ __PRETTY_FUNCTION__)); | ||||||
827 | auto *LastDef = | ||||||
828 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||
829 | auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||
830 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
831 | } | ||||||
832 | |||||||
833 | eraseInstruction(SI); | ||||||
834 | NumMemSetInfer++; | ||||||
835 | |||||||
836 | // Make sure we do not invalidate the iterator. | ||||||
837 | BBI = M->getIterator(); | ||||||
838 | return true; | ||||||
839 | } | ||||||
840 | } | ||||||
841 | |||||||
842 | return false; | ||||||
843 | } | ||||||
844 | |||||||
845 | bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { | ||||||
846 | // See if there is another memset or store neighboring this memset which | ||||||
847 | // allows us to widen out the memset to do a single larger store. | ||||||
848 | if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) | ||||||
849 | if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), | ||||||
850 | MSI->getValue())) { | ||||||
851 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||
852 | return true; | ||||||
853 | } | ||||||
854 | return false; | ||||||
855 | } | ||||||
856 | |||||||
857 | /// Takes a memcpy and a call that it depends on, | ||||||
858 | /// and checks for the possibility of a call slot optimization by having | ||||||
859 | /// the call write its result directly into the destination of the memcpy. | ||||||
860 | bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, | ||||||
861 | Instruction *cpyStore, Value *cpyDest, | ||||||
862 | Value *cpySrc, uint64_t cpyLen, | ||||||
863 | Align cpyAlign, CallInst *C) { | ||||||
864 | // The general transformation to keep in mind is | ||||||
865 | // | ||||||
866 | // call @func(..., src, ...) | ||||||
867 | // memcpy(dest, src, ...) | ||||||
868 | // | ||||||
869 | // -> | ||||||
870 | // | ||||||
871 | // memcpy(dest, src, ...) | ||||||
872 | // call @func(..., dest, ...) | ||||||
873 | // | ||||||
874 | // Since moving the memcpy is technically awkward, we additionally check that | ||||||
875 | // src only holds uninitialized values at the moment of the call, meaning that | ||||||
876 | // the memcpy can be discarded rather than moved. | ||||||
877 | |||||||
878 | // Lifetime marks shouldn't be operated on. | ||||||
879 | if (Function *F = C->getCalledFunction()) | ||||||
880 | if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||
881 | return false; | ||||||
882 | |||||||
883 | // Require that src be an alloca. This simplifies the reasoning considerably. | ||||||
884 | AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); | ||||||
885 | if (!srcAlloca) | ||||||
886 | return false; | ||||||
887 | |||||||
888 | ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); | ||||||
889 | if (!srcArraySize) | ||||||
890 | return false; | ||||||
891 | |||||||
892 | const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); | ||||||
893 | uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * | ||||||
894 | srcArraySize->getZExtValue(); | ||||||
895 | |||||||
896 | if (cpyLen < srcSize) | ||||||
897 | return false; | ||||||
898 | |||||||
899 | // Check that accessing the first srcSize bytes of dest will not cause a | ||||||
900 | // trap. Otherwise the transform is invalid since it might cause a trap | ||||||
901 | // to occur earlier than it otherwise would. | ||||||
902 | if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), | ||||||
903 | DL, C, DT)) | ||||||
904 | return false; | ||||||
905 | |||||||
906 | // Make sure that nothing can observe cpyDest being written early. There are | ||||||
907 | // a number of cases to consider: | ||||||
908 | // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of | ||||||
909 | // the transform. | ||||||
910 | // 2. C itself may not access cpyDest (prior to the transform). This is | ||||||
911 | // checked further below. | ||||||
912 | // 3. If cpyDest is accessible to the caller of this function (potentially | ||||||
913 | // captured and not based on an alloca), we need to ensure that we cannot | ||||||
914 | // unwind between C and cpyStore. This is checked here. | ||||||
915 | // 4. If cpyDest is potentially captured, there may be accesses to it from | ||||||
916 | // another thread. In this case, we need to check that cpyStore is | ||||||
917 | // guaranteed to be executed if C is. As it is a non-atomic access, it | ||||||
918 | // renders accesses from other threads undefined. | ||||||
919 | // TODO: This is currently not checked. | ||||||
920 | if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) | ||||||
921 | return false; | ||||||
922 | |||||||
923 | // Check that dest points to memory that is at least as aligned as src. | ||||||
924 | Align srcAlign = srcAlloca->getAlign(); | ||||||
925 | bool isDestSufficientlyAligned = srcAlign <= cpyAlign; | ||||||
926 | // If dest is not aligned enough and we can't increase its alignment then | ||||||
927 | // bail out. | ||||||
928 | if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) | ||||||
929 | return false; | ||||||
930 | |||||||
931 | // Check that src is not accessed except via the call and the memcpy. This | ||||||
932 | // guarantees that it holds only undefined values when passed in (so the final | ||||||
933 | // memcpy can be dropped), that it is not read or written between the call and | ||||||
934 | // the memcpy, and that writing beyond the end of it is undefined. | ||||||
935 | SmallVector<User *, 8> srcUseList(srcAlloca->users()); | ||||||
936 | while (!srcUseList.empty()) { | ||||||
937 | User *U = srcUseList.pop_back_val(); | ||||||
938 | |||||||
939 | if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { | ||||||
940 | append_range(srcUseList, U->users()); | ||||||
941 | continue; | ||||||
942 | } | ||||||
943 | if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { | ||||||
944 | if (!G->hasAllZeroIndices()) | ||||||
945 | return false; | ||||||
946 | |||||||
947 | append_range(srcUseList, U->users()); | ||||||
948 | continue; | ||||||
949 | } | ||||||
950 | if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) | ||||||
951 | if (IT->isLifetimeStartOrEnd()) | ||||||
952 | continue; | ||||||
953 | |||||||
954 | if (U != C && U != cpyLoad) | ||||||
955 | return false; | ||||||
956 | } | ||||||
957 | |||||||
958 | // Check that src isn't captured by the called function since the | ||||||
959 | // transformation can cause aliasing issues in that case. | ||||||
960 | for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) | ||||||
961 | if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) | ||||||
962 | return false; | ||||||
963 | |||||||
964 | // Since we're changing the parameter to the callsite, we need to make sure | ||||||
965 | // that what would be the new parameter dominates the callsite. | ||||||
966 | if (!DT->dominates(cpyDest, C)) { | ||||||
967 | // Support moving a constant index GEP before the call. | ||||||
968 | auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); | ||||||
969 | if (GEP && GEP->hasAllConstantIndices() && | ||||||
970 | DT->dominates(GEP->getPointerOperand(), C)) | ||||||
971 | GEP->moveBefore(C); | ||||||
972 | else | ||||||
973 | return false; | ||||||
974 | } | ||||||
975 | |||||||
976 | // In addition to knowing that the call does not access src in some | ||||||
977 | // unexpected manner, for example via a global, which we deduce from | ||||||
978 | // the use analysis, we also need to know that it does not sneakily | ||||||
979 | // access dest. We rely on AA to figure this out for us. | ||||||
980 | ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); | ||||||
981 | // If necessary, perform additional analysis. | ||||||
982 | if (isModOrRefSet(MR)) | ||||||
983 | MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); | ||||||
984 | if (isModOrRefSet(MR)) | ||||||
985 | return false; | ||||||
986 | |||||||
987 | // We can't create address space casts here because we don't know if they're | ||||||
988 | // safe for the target. | ||||||
989 | if (cpySrc->getType()->getPointerAddressSpace() != | ||||||
990 | cpyDest->getType()->getPointerAddressSpace()) | ||||||
991 | return false; | ||||||
992 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||
993 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && | ||||||
994 | cpySrc->getType()->getPointerAddressSpace() != | ||||||
995 | C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) | ||||||
996 | return false; | ||||||
997 | |||||||
998 | // All the checks have passed, so do the transformation. | ||||||
999 | bool changedArgument = false; | ||||||
1000 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||
1001 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { | ||||||
1002 | Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest | ||||||
1003 | : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), | ||||||
1004 | cpyDest->getName(), C); | ||||||
1005 | changedArgument = true; | ||||||
1006 | if (C->getArgOperand(ArgI)->getType() == Dest->getType()) | ||||||
1007 | C->setArgOperand(ArgI, Dest); | ||||||
1008 | else | ||||||
1009 | C->setArgOperand(ArgI, CastInst::CreatePointerCast( | ||||||
1010 | Dest, C->getArgOperand(ArgI)->getType(), | ||||||
1011 | Dest->getName(), C)); | ||||||
1012 | } | ||||||
1013 | |||||||
1014 | if (!changedArgument) | ||||||
1015 | return false; | ||||||
1016 | |||||||
1017 | // If the destination wasn't sufficiently aligned then increase its alignment. | ||||||
1018 | if (!isDestSufficientlyAligned) { | ||||||
1019 | assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")(static_cast <bool> (isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!") ? void (0) : __assert_fail ("isa<AllocaInst>(cpyDest) && \"Can only increase alloca alignment!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1019, __extension__ __PRETTY_FUNCTION__)); | ||||||
1020 | cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); | ||||||
1021 | } | ||||||
1022 | |||||||
1023 | // Drop any cached information about the call, because we may have changed | ||||||
1024 | // its dependence information by changing its parameter. | ||||||
1025 | if (MD) | ||||||
1026 | MD->removeInstruction(C); | ||||||
1027 | |||||||
1028 | // Update AA metadata | ||||||
1029 | // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be | ||||||
1030 | // handled here, but combineMetadata doesn't support them yet | ||||||
1031 | unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | ||||||
1032 | LLVMContext::MD_noalias, | ||||||
1033 | LLVMContext::MD_invariant_group, | ||||||
1034 | LLVMContext::MD_access_group}; | ||||||
1035 | combineMetadata(C, cpyLoad, KnownIDs, true); | ||||||
1036 | |||||||
1037 | ++NumCallSlot; | ||||||
1038 | return true; | ||||||
1039 | } | ||||||
1040 | |||||||
1041 | /// We've found that the (upward scanning) memory dependence of memcpy 'M' is | ||||||
1042 | /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. | ||||||
1043 | bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, | ||||||
1044 | MemCpyInst *MDep) { | ||||||
1045 | // We can only transforms memcpy's where the dest of one is the source of the | ||||||
1046 | // other. | ||||||
1047 | if (M->getSource() != MDep->getDest() || MDep->isVolatile()) | ||||||
1048 | return false; | ||||||
1049 | |||||||
1050 | // If dep instruction is reading from our current input, then it is a noop | ||||||
1051 | // transfer and substituting the input won't change this instruction. Just | ||||||
1052 | // ignore the input and let someone else zap MDep. This handles cases like: | ||||||
1053 | // memcpy(a <- a) | ||||||
1054 | // memcpy(b <- a) | ||||||
1055 | if (M->getSource() == MDep->getSource()) | ||||||
1056 | return false; | ||||||
1057 | |||||||
1058 | // Second, the length of the memcpy's must be the same, or the preceding one | ||||||
1059 | // must be larger than the following one. | ||||||
1060 | if (MDep->getLength() != M->getLength()) { | ||||||
1061 | ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||
1062 | ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ||||||
1063 | if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | ||||||
1064 | return false; | ||||||
1065 | } | ||||||
1066 | |||||||
1067 | // Verify that the copied-from memory doesn't change in between the two | ||||||
1068 | // transfers. For example, in: | ||||||
1069 | // memcpy(a <- b) | ||||||
1070 | // *b = 42; | ||||||
1071 | // memcpy(c <- a) | ||||||
1072 | // It would be invalid to transform the second memcpy into memcpy(c <- b). | ||||||
1073 | // | ||||||
1074 | // TODO: If the code between M and MDep is transparent to the destination "c", | ||||||
1075 | // then we could still perform the xform by moving M up to the first memcpy. | ||||||
1076 | if (EnableMemorySSA) { | ||||||
1077 | // TODO: It would be sufficient to check the MDep source up to the memcpy | ||||||
1078 | // size of M, rather than MDep. | ||||||
1079 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||
1080 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) | ||||||
1081 | return false; | ||||||
1082 | } else { | ||||||
1083 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||
1084 | // not just the defining memcpy. | ||||||
1085 | MemDepResult SourceDep = | ||||||
1086 | MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, | ||||||
1087 | M->getIterator(), M->getParent()); | ||||||
1088 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||
1089 | return false; | ||||||
1090 | } | ||||||
1091 | |||||||
1092 | // If the dest of the second might alias the source of the first, then the | ||||||
1093 | // source and dest might overlap. We still want to eliminate the intermediate | ||||||
1094 | // value, but we have to generate a memmove instead of memcpy. | ||||||
1095 | bool UseMemMove = false; | ||||||
1096 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||
1097 | MemoryLocation::getForSource(MDep))) | ||||||
1098 | UseMemMove = true; | ||||||
1099 | |||||||
1100 | // If all checks passed, then we can transform M. | ||||||
1101 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" << *MDep << '\n' << *M << '\n'; } } while (false) | ||||||
1102 | << *MDep << '\n' << *M << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" << *MDep << '\n' << *M << '\n'; } } while (false); | ||||||
1103 | |||||||
1104 | // TODO: Is this worth it if we're creating a less aligned memcpy? For | ||||||
1105 | // example we could be moving from movaps -> movq on x86. | ||||||
1106 | IRBuilder<> Builder(M); | ||||||
1107 | Instruction *NewM; | ||||||
1108 | if (UseMemMove) | ||||||
1109 | NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), | ||||||
1110 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||
1111 | M->getLength(), M->isVolatile()); | ||||||
1112 | else if (isa<MemCpyInlineInst>(M)) { | ||||||
1113 | // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is | ||||||
1114 | // never allowed since that would allow the latter to be lowered as a call | ||||||
1115 | // to an external function. | ||||||
1116 | NewM = Builder.CreateMemCpyInline( | ||||||
1117 | M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), | ||||||
1118 | MDep->getSourceAlign(), M->getLength(), M->isVolatile()); | ||||||
1119 | } else | ||||||
1120 | NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), | ||||||
1121 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||
1122 | M->getLength(), M->isVolatile()); | ||||||
1123 | |||||||
1124 | if (MSSAU) { | ||||||
1125 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA ()->getMemoryAccess(M))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1125, __extension__ __PRETTY_FUNCTION__)); | ||||||
1126 | auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||
1127 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||
1128 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
1129 | } | ||||||
1130 | |||||||
1131 | // Remove the instruction we're replacing. | ||||||
1132 | eraseInstruction(M); | ||||||
1133 | ++NumMemCpyInstr; | ||||||
1134 | return true; | ||||||
1135 | } | ||||||
1136 | |||||||
1137 | /// We've found that the (upward scanning) memory dependence of \p MemCpy is | ||||||
1138 | /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that | ||||||
1139 | /// weren't copied over by \p MemCpy. | ||||||
1140 | /// | ||||||
1141 | /// In other words, transform: | ||||||
1142 | /// \code | ||||||
1143 | /// memset(dst, c, dst_size); | ||||||
1144 | /// memcpy(dst, src, src_size); | ||||||
1145 | /// \endcode | ||||||
1146 | /// into: | ||||||
1147 | /// \code | ||||||
1148 | /// memcpy(dst, src, src_size); | ||||||
1149 | /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); | ||||||
1150 | /// \endcode | ||||||
1151 | bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, | ||||||
1152 | MemSetInst *MemSet) { | ||||||
1153 | // We can only transform memset/memcpy with the same destination. | ||||||
1154 | if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest())) | ||||||
1155 | return false; | ||||||
1156 | |||||||
1157 | // Check that src and dst of the memcpy aren't the same. While memcpy | ||||||
1158 | // operands cannot partially overlap, exact equality is allowed. | ||||||
1159 | if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), | ||||||
1160 | LocationSize::precise(1)), | ||||||
1161 | MemoryLocation(MemCpy->getDest(), | ||||||
1162 | LocationSize::precise(1)))) | ||||||
1163 | return false; | ||||||
1164 | |||||||
1165 | if (EnableMemorySSA) { | ||||||
1166 | // We know that dst up to src_size is not written. We now need to make sure | ||||||
1167 | // that dst up to dst_size is not accessed. (If we did not move the memset, | ||||||
1168 | // checking for reads would be sufficient.) | ||||||
1169 | if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), | ||||||
1170 | MSSA->getMemoryAccess(MemSet), | ||||||
1171 | MSSA->getMemoryAccess(MemCpy))) { | ||||||
1172 | return false; | ||||||
1173 | } | ||||||
1174 | } else { | ||||||
1175 | // We have already checked that dst up to src_size is not accessed. We | ||||||
1176 | // need to make sure that there are no accesses up to dst_size either. | ||||||
1177 | MemDepResult DstDepInfo = MD->getPointerDependencyFrom( | ||||||
1178 | MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(), | ||||||
1179 | MemCpy->getParent()); | ||||||
1180 | if (DstDepInfo.getInst() != MemSet) | ||||||
1181 | return false; | ||||||
1182 | } | ||||||
1183 | |||||||
1184 | // Use the same i8* dest as the memcpy, killing the memset dest if different. | ||||||
1185 | Value *Dest = MemCpy->getRawDest(); | ||||||
1186 | Value *DestSize = MemSet->getLength(); | ||||||
1187 | Value *SrcSize = MemCpy->getLength(); | ||||||
1188 | |||||||
1189 | if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) | ||||||
1190 | return false; | ||||||
1191 | |||||||
1192 | // If the sizes are the same, simply drop the memset instead of generating | ||||||
1193 | // a replacement with zero size. | ||||||
1194 | if (DestSize == SrcSize) { | ||||||
1195 | eraseInstruction(MemSet); | ||||||
1196 | return true; | ||||||
1197 | } | ||||||
1198 | |||||||
1199 | // By default, create an unaligned memset. | ||||||
1200 | unsigned Align = 1; | ||||||
1201 | // If Dest is aligned, and SrcSize is constant, use the minimum alignment | ||||||
1202 | // of the sum. | ||||||
1203 | const unsigned DestAlign = | ||||||
1204 | std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); | ||||||
1205 | if (DestAlign > 1) | ||||||
1206 | if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) | ||||||
1207 | Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); | ||||||
1208 | |||||||
1209 | IRBuilder<> Builder(MemCpy); | ||||||
1210 | |||||||
1211 | // If the sizes have different types, zext the smaller one. | ||||||
1212 | if (DestSize->getType() != SrcSize->getType()) { | ||||||
1213 | if (DestSize->getType()->getIntegerBitWidth() > | ||||||
1214 | SrcSize->getType()->getIntegerBitWidth()) | ||||||
1215 | SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); | ||||||
1216 | else | ||||||
1217 | DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); | ||||||
1218 | } | ||||||
1219 | |||||||
1220 | Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); | ||||||
1221 | Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); | ||||||
1222 | Value *MemsetLen = Builder.CreateSelect( | ||||||
1223 | Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); | ||||||
1224 | unsigned DestAS = Dest->getType()->getPointerAddressSpace(); | ||||||
1225 | Instruction *NewMemSet = Builder.CreateMemSet( | ||||||
1226 | Builder.CreateGEP(Builder.getInt8Ty(), | ||||||
1227 | Builder.CreatePointerCast(Dest, | ||||||
1228 | Builder.getInt8PtrTy(DestAS)), | ||||||
1229 | SrcSize), | ||||||
1230 | MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); | ||||||
1231 | |||||||
1232 | if (MSSAU) { | ||||||
1233 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA ()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef" ) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1234, __extension__ __PRETTY_FUNCTION__)) | ||||||
1234 | "MemCpy must be a MemoryDef")(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA ()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef" ) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1234, __extension__ __PRETTY_FUNCTION__)); | ||||||
1235 | // The new memset is inserted after the memcpy, but it is known that its | ||||||
1236 | // defining access is the memset about to be removed which immediately | ||||||
1237 | // precedes the memcpy. | ||||||
1238 | auto *LastDef = | ||||||
1239 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||
1240 | auto *NewAccess = MSSAU->createMemoryAccessBefore( | ||||||
1241 | NewMemSet, LastDef->getDefiningAccess(), LastDef); | ||||||
1242 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
1243 | } | ||||||
1244 | |||||||
1245 | eraseInstruction(MemSet); | ||||||
1246 | return true; | ||||||
1247 | } | ||||||
1248 | |||||||
1249 | /// Determine whether the instruction has undefined content for the given Size, | ||||||
1250 | /// either because it was freshly alloca'd or started its lifetime. | ||||||
1251 | static bool hasUndefContents(Instruction *I, Value *Size) { | ||||||
1252 | if (isa<AllocaInst>(I)) | ||||||
1253 | return true; | ||||||
1254 | |||||||
1255 | if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | ||||||
1256 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||||
1257 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||
1258 | if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) | ||||||
1259 | if (LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||||
1260 | return true; | ||||||
1261 | } | ||||||
1262 | |||||||
1263 | return false; | ||||||
1264 | } | ||||||
1265 | |||||||
1266 | static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | ||||||
1267 | MemoryDef *Def, Value *Size) { | ||||||
1268 | if (MSSA->isLiveOnEntryDef(Def)) | ||||||
1269 | return isa<AllocaInst>(getUnderlyingObject(V)); | ||||||
1270 | |||||||
1271 | if (IntrinsicInst *II
| ||||||
1272 | dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | ||||||
1273 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | ||||||
1274 | ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||||
1275 | |||||||
1276 | if (ConstantInt *CSize
| ||||||
1277 | if (AA->isMustAlias(V, II->getArgOperand(1)) && | ||||||
1278 | LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||||
1279 | return true; | ||||||
1280 | } | ||||||
1281 | |||||||
1282 | // If the lifetime.start covers a whole alloca (as it almost always | ||||||
1283 | // does) and we're querying a pointer based on that alloca, then we know | ||||||
1284 | // the memory is definitely undef, regardless of how exactly we alias. | ||||||
1285 | // The size also doesn't matter, as an out-of-bounds access would be UB. | ||||||
1286 | AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)); | ||||||
1287 | if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { | ||||||
1288 | const DataLayout &DL = Alloca->getModule()->getDataLayout(); | ||||||
| |||||||
1289 | if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL)) | ||||||
1290 | if (*AllocaSize == LTSize->getValue() * 8) | ||||||
1291 | return true; | ||||||
1292 | } | ||||||
1293 | } | ||||||
1294 | } | ||||||
1295 | |||||||
1296 | return false; | ||||||
1297 | } | ||||||
1298 | |||||||
1299 | /// Transform memcpy to memset when its source was just memset. | ||||||
1300 | /// In other words, turn: | ||||||
1301 | /// \code | ||||||
1302 | /// memset(dst1, c, dst1_size); | ||||||
1303 | /// memcpy(dst2, dst1, dst2_size); | ||||||
1304 | /// \endcode | ||||||
1305 | /// into: | ||||||
1306 | /// \code | ||||||
1307 | /// memset(dst1, c, dst1_size); | ||||||
1308 | /// memset(dst2, c, dst2_size); | ||||||
1309 | /// \endcode | ||||||
1310 | /// When dst2_size <= dst1_size. | ||||||
1311 | bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | ||||||
1312 | MemSetInst *MemSet) { | ||||||
1313 | // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | ||||||
1314 | // memcpying from the same address. Otherwise it is hard to reason about. | ||||||
1315 | if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | ||||||
| |||||||
1316 | return false; | ||||||
1317 | |||||||
1318 | Value *MemSetSize = MemSet->getLength(); | ||||||
1319 | Value *CopySize = MemCpy->getLength(); | ||||||
1320 | |||||||
1321 | if (MemSetSize
| ||||||
1322 | // Make sure the memcpy doesn't read any more than what the memset wrote. | ||||||
1323 | // Don't worry about sizes larger than i64. | ||||||
1324 | |||||||
1325 | // A known memset size is required. | ||||||
1326 | ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); | ||||||
1327 | if (!CMemSetSize) | ||||||
1328 | return false; | ||||||
1329 | |||||||
1330 | // A known memcpy size is also required. | ||||||
1331 | ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize); | ||||||
1332 | if (!CCopySize
| ||||||
1333 | return false; | ||||||
1334 | if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { | ||||||
1335 | // If the memcpy is larger than the memset, but the memory was undef prior | ||||||
1336 | // to the memset, we can just ignore the tail. Technically we're only | ||||||
1337 | // interested in the bytes from MemSetSize..CopySize here, but as we can't | ||||||
1338 | // easily represent this location, we use the full 0..CopySize range. | ||||||
1339 | MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | ||||||
1340 | bool CanReduceSize = false; | ||||||
1341 | if (EnableMemorySSA) { | ||||||
1342 | MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | ||||||
1343 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||
1344 | MemSetAccess->getDefiningAccess(), MemCpyLoc); | ||||||
1345 | if (auto *MD
| ||||||
1346 | if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | ||||||
1347 | CanReduceSize = true; | ||||||
1348 | } else { | ||||||
1349 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||
1350 | MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); | ||||||
1351 | if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) | ||||||
1352 | CanReduceSize = true; | ||||||
1353 | } | ||||||
1354 | |||||||
1355 | if (!CanReduceSize) | ||||||
1356 | return false; | ||||||
1357 | CopySize = MemSetSize; | ||||||
1358 | } | ||||||
1359 | } | ||||||
1360 | |||||||
1361 | IRBuilder<> Builder(MemCpy); | ||||||
1362 | Instruction *NewM = | ||||||
1363 | Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | ||||||
1364 | CopySize, MaybeAlign(MemCpy->getDestAlignment())); | ||||||
1365 | if (MSSAU) { | ||||||
1366 | auto *LastDef = | ||||||
1367 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||
1368 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||
1369 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
1370 | } | ||||||
1371 | |||||||
1372 | return true; | ||||||
1373 | } | ||||||
1374 | |||||||
1375 | /// Perform simplification of memcpy's. If we have memcpy A | ||||||
1376 | /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite | ||||||
1377 | /// B to be a memcpy from X to Z (or potentially a memmove, depending on | ||||||
1378 | /// circumstances). This allows later passes to remove the first memcpy | ||||||
1379 | /// altogether. | ||||||
1380 | bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { | ||||||
1381 | // We can only optimize non-volatile memcpy's. | ||||||
1382 | if (M->isVolatile()) return false; | ||||||
1383 | |||||||
1384 | // If the source and destination of the memcpy are the same, then zap it. | ||||||
1385 | if (M->getSource() == M->getDest()) { | ||||||
1386 | ++BBI; | ||||||
1387 | eraseInstruction(M); | ||||||
1388 | return true; | ||||||
1389 | } | ||||||
1390 | |||||||
1391 | // If copying from a constant, try to turn the memcpy into a memset. | ||||||
1392 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) | ||||||
1393 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) | ||||||
1394 | if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), | ||||||
1395 | M->getModule()->getDataLayout())) { | ||||||
1396 | IRBuilder<> Builder(M); | ||||||
1397 | Instruction *NewM = | ||||||
1398 | Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), | ||||||
1399 | MaybeAlign(M->getDestAlignment()), false); | ||||||
1400 | if (MSSAU) { | ||||||
1401 | auto *LastDef = | ||||||
1402 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||
1403 | auto *NewAccess = | ||||||
1404 | MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||
1405 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||
1406 | } | ||||||
1407 | |||||||
1408 | eraseInstruction(M); | ||||||
1409 | ++NumCpyToSet; | ||||||
1410 | return true; | ||||||
1411 | } | ||||||
1412 | |||||||
1413 | if (EnableMemorySSA) { | ||||||
1414 | MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); | ||||||
1415 | MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); | ||||||
1416 | MemoryLocation DestLoc = MemoryLocation::getForDest(M); | ||||||
1417 | const MemoryAccess *DestClobber = | ||||||
1418 | MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); | ||||||
1419 | |||||||
1420 | // Try to turn a partially redundant memset + memcpy into | ||||||
1421 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||
1422 | // The memcpy most post-dom the memset, so limit this to the same basic | ||||||
1423 | // block. A non-local generalization is likely not worthwhile. | ||||||
1424 | if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | ||||||
1425 | if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | ||||||
1426 | if (DestClobber->getBlock() == M->getParent()) | ||||||
1427 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||
1428 | return true; | ||||||
1429 | |||||||
1430 | MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||
1431 | AnyClobber, MemoryLocation::getForSource(M)); | ||||||
1432 | |||||||
1433 | // There are four possible optimizations we can do for memcpy: | ||||||
1434 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||
1435 | // b) call-memcpy xform for return slot optimization. | ||||||
1436 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||
1437 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||
1438 | // the memcpy in favor of the data that was already at the destination. | ||||||
1439 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||
1440 | if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | ||||||
1441 | if (Instruction *MI = MD->getMemoryInst()) { | ||||||
1442 | if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | ||||||
1443 | if (auto *C = dyn_cast<CallInst>(MI)) { | ||||||
1444 | // The memcpy must post-dom the call. Limit to the same block for | ||||||
1445 | // now. Additionally, we need to ensure that there are no accesses | ||||||
1446 | // to dest between the call and the memcpy. Accesses to src will be | ||||||
1447 | // checked by performCallSlotOptzn(). | ||||||
1448 | // TODO: Support non-local call-slot optimization? | ||||||
1449 | if (C->getParent() == M->getParent() && | ||||||
1450 | !accessedBetween(*AA, DestLoc, MD, MA)) { | ||||||
1451 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||
1452 | // of conservatively taking the minimum? | ||||||
1453 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||
1454 | M->getSourceAlign().valueOrOne()); | ||||||
1455 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||||
1456 | CopySize->getZExtValue(), Alignment, | ||||||
1457 | C)) { | ||||||
1458 | LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false) | ||||||
1459 | << " call: " << *C << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false) | ||||||
1460 | << " memcpy: " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false); | ||||||
1461 | eraseInstruction(M); | ||||||
1462 | ++NumMemCpyInstr; | ||||||
1463 | return true; | ||||||
1464 | } | ||||||
1465 | } | ||||||
1466 | } | ||||||
1467 | } | ||||||
1468 | if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | ||||||
1469 | return processMemCpyMemCpyDependence(M, MDep); | ||||||
1470 | if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | ||||||
1471 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||
1472 | LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Converted memcpy to memset\n" ; } } while (false); | ||||||
1473 | eraseInstruction(M); | ||||||
1474 | ++NumCpyToSet; | ||||||
1475 | return true; | ||||||
1476 | } | ||||||
1477 | } | ||||||
1478 | } | ||||||
1479 | |||||||
1480 | if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) { | ||||||
1481 | LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Removed memcpy from undef\n" ; } } while (false); | ||||||
1482 | eraseInstruction(M); | ||||||
1483 | ++NumMemCpyInstr; | ||||||
1484 | return true; | ||||||
1485 | } | ||||||
1486 | } | ||||||
1487 | } else { | ||||||
1488 | MemDepResult DepInfo = MD->getDependency(M); | ||||||
1489 | |||||||
1490 | // Try to turn a partially redundant memset + memcpy into | ||||||
1491 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||
1492 | if (DepInfo.isClobber()) | ||||||
1493 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) | ||||||
1494 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||
1495 | return true; | ||||||
1496 | |||||||
1497 | // There are four possible optimizations we can do for memcpy: | ||||||
1498 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||
1499 | // b) call-memcpy xform for return slot optimization. | ||||||
1500 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||
1501 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||
1502 | // the memcpy in favor of the data that was already at the destination. | ||||||
1503 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||
1504 | if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | ||||||
1505 | if (DepInfo.isClobber()) { | ||||||
1506 | if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { | ||||||
1507 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||
1508 | // of conservatively taking the minimum? | ||||||
1509 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||
1510 | M->getSourceAlign().valueOrOne()); | ||||||
1511 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||||
1512 | CopySize->getZExtValue(), Alignment, C)) { | ||||||
1513 | eraseInstruction(M); | ||||||
1514 | ++NumMemCpyInstr; | ||||||
1515 | return true; | ||||||
1516 | } | ||||||
1517 | } | ||||||
1518 | } | ||||||
1519 | } | ||||||
1520 | |||||||
1521 | MemoryLocation SrcLoc = MemoryLocation::getForSource(M); | ||||||
1522 | MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( | ||||||
1523 | SrcLoc, true, M->getIterator(), M->getParent()); | ||||||
1524 | |||||||
1525 | if (SrcDepInfo.isClobber()) { | ||||||
1526 | if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) | ||||||
1527 | return processMemCpyMemCpyDependence(M, MDep); | ||||||
1528 | } else if (SrcDepInfo.isDef()) { | ||||||
1529 | if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) { | ||||||
1530 | eraseInstruction(M); | ||||||
1531 | ++NumMemCpyInstr; | ||||||
1532 | return true; | ||||||
1533 | } | ||||||
1534 | } | ||||||
1535 | |||||||
1536 | if (SrcDepInfo.isClobber()) | ||||||
1537 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) | ||||||
1538 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||
1539 | eraseInstruction(M); | ||||||
1540 | ++NumCpyToSet; | ||||||
1541 | return true; | ||||||
1542 | } | ||||||
1543 | } | ||||||
1544 | |||||||
1545 | return false; | ||||||
1546 | } | ||||||
1547 | |||||||
1548 | /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed | ||||||
1549 | /// not to alias. | ||||||
1550 | bool MemCpyOptPass::processMemMove(MemMoveInst *M) { | ||||||
1551 | if (!TLI->has(LibFunc_memmove)) | ||||||
1552 | return false; | ||||||
1553 | |||||||
1554 | // See if the pointers alias. | ||||||
1555 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||
1556 | MemoryLocation::getForSource(M))) | ||||||
1557 | return false; | ||||||
1558 | |||||||
1559 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M << "\n"; } } while (false) | ||||||
1560 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M << "\n"; } } while (false); | ||||||
1561 | |||||||
1562 | // If not, then we know we can transform this. | ||||||
1563 | Type *ArgTys[3] = { M->getRawDest()->getType(), | ||||||
1564 | M->getRawSource()->getType(), | ||||||
1565 | M->getLength()->getType() }; | ||||||
1566 | M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), | ||||||
1567 | Intrinsic::memcpy, ArgTys)); | ||||||
1568 | |||||||
1569 | // For MemorySSA nothing really changes (except that memcpy may imply stricter | ||||||
1570 | // aliasing guarantees). | ||||||
1571 | |||||||
1572 | // MemDep may have over conservative information about this instruction, just | ||||||
1573 | // conservatively flush it from the cache. | ||||||
1574 | if (MD) | ||||||
1575 | MD->removeInstruction(M); | ||||||
1576 | |||||||
1577 | ++NumMoveToCpy; | ||||||
1578 | return true; | ||||||
1579 | } | ||||||
1580 | |||||||
1581 | /// This is called on every byval argument in call sites. | ||||||
1582 | bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { | ||||||
1583 | const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); | ||||||
1584 | // Find out what feeds this byval argument. | ||||||
1585 | Value *ByValArg = CB.getArgOperand(ArgNo); | ||||||
1586 | Type *ByValTy = CB.getParamByValType(ArgNo); | ||||||
1587 | uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); | ||||||
1588 | MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); | ||||||
1589 | MemCpyInst *MDep = nullptr; | ||||||
1590 | if (EnableMemorySSA) { | ||||||
1591 | MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); | ||||||
1592 | if (!CallAccess) | ||||||
1593 | return false; | ||||||
1594 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||
1595 | CallAccess->getDefiningAccess(), Loc); | ||||||
1596 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||||
1597 | MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); | ||||||
1598 | } else { | ||||||
1599 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||
1600 | Loc, true, CB.getIterator(), CB.getParent()); | ||||||
1601 | if (!DepInfo.isClobber()) | ||||||
1602 | return false; | ||||||
1603 | MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); | ||||||
1604 | } | ||||||
1605 | |||||||
1606 | // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by | ||||||
1607 | // a memcpy, see if we can byval from the source of the memcpy instead of the | ||||||
1608 | // result. | ||||||
1609 | if (!MDep || MDep->isVolatile() || | ||||||
1610 | ByValArg->stripPointerCasts() != MDep->getDest()) | ||||||
1611 | return false; | ||||||
1612 | |||||||
1613 | // The length of the memcpy must be larger or equal to the size of the byval. | ||||||
1614 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||
1615 | if (!C1 || C1->getValue().getZExtValue() < ByValSize) | ||||||
1616 | return false; | ||||||
1617 | |||||||
1618 | // Get the alignment of the byval. If the call doesn't specify the alignment, | ||||||
1619 | // then it is some target specific value that we can't know. | ||||||
1620 | MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); | ||||||
1621 | if (!ByValAlign) return false; | ||||||
1622 | |||||||
1623 | // If it is greater than the memcpy, then we check to see if we can force the | ||||||
1624 | // source of the memcpy to the alignment we need. If we fail, we bail out. | ||||||
1625 | MaybeAlign MemDepAlign = MDep->getSourceAlign(); | ||||||
1626 | if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && | ||||||
1627 | getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, | ||||||
1628 | DT) < *ByValAlign) | ||||||
1629 | return false; | ||||||
1630 | |||||||
1631 | // The address space of the memcpy source must match the byval argument | ||||||
1632 | if (MDep->getSource()->getType()->getPointerAddressSpace() != | ||||||
1633 | ByValArg->getType()->getPointerAddressSpace()) | ||||||
1634 | return false; | ||||||
1635 | |||||||
1636 | // Verify that the copied-from memory doesn't change in between the memcpy and | ||||||
1637 | // the byval call. | ||||||
1638 | // memcpy(a <- b) | ||||||
1639 | // *b = 42; | ||||||
1640 | // foo(*a) | ||||||
1641 | // It would be invalid to transform the second memcpy into foo(*b). | ||||||
1642 | if (EnableMemorySSA) { | ||||||
1643 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||
1644 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) | ||||||
1645 | return false; | ||||||
1646 | } else { | ||||||
1647 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||
1648 | // not just the defining memcpy. | ||||||
1649 | MemDepResult SourceDep = MD->getPointerDependencyFrom( | ||||||
1650 | MemoryLocation::getForSource(MDep), false, | ||||||
1651 | CB.getIterator(), MDep->getParent()); | ||||||
1652 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||
1653 | return false; | ||||||
1654 | } | ||||||
1655 | |||||||
1656 | Value *TmpCast = MDep->getSource(); | ||||||
1657 | if (MDep->getSource()->getType() != ByValArg->getType()) { | ||||||
1658 | BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), | ||||||
1659 | "tmpcast", &CB); | ||||||
1660 | // Set the tmpcast's DebugLoc to MDep's | ||||||
1661 | TmpBitCast->setDebugLoc(MDep->getDebugLoc()); | ||||||
1662 | TmpCast = TmpBitCast; | ||||||
1663 | } | ||||||
1664 | |||||||
1665 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false) | ||||||
1666 | << " " << *MDep << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false) | ||||||
1667 | << " " << CB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false); | ||||||
1668 | |||||||
1669 | // Otherwise we're good! Update the byval argument. | ||||||
1670 | CB.setArgOperand(ArgNo, TmpCast); | ||||||
1671 | ++NumMemCpyInstr; | ||||||
1672 | return true; | ||||||
1673 | } | ||||||
1674 | |||||||
1675 | /// Executes one iteration of MemCpyOptPass. | ||||||
1676 | bool MemCpyOptPass::iterateOnFunction(Function &F) { | ||||||
1677 | bool MadeChange = false; | ||||||
1678 | |||||||
1679 | // Walk all instruction in the function. | ||||||
1680 | for (BasicBlock &BB : F) { | ||||||
1681 | // Skip unreachable blocks. For example processStore assumes that an | ||||||
1682 | // instruction in a BB can't be dominated by a later instruction in the | ||||||
1683 | // same BB (which is a scenario that can happen for an unreachable BB that | ||||||
1684 | // has itself as a predecessor). | ||||||
1685 | if (!DT->isReachableFromEntry(&BB)) | ||||||
1686 | continue; | ||||||
1687 | |||||||
1688 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | ||||||
1689 | // Avoid invalidating the iterator. | ||||||
1690 | Instruction *I = &*BI++; | ||||||
1691 | |||||||
1692 | bool RepeatInstruction = false; | ||||||
1693 | |||||||
1694 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | ||||||
1695 | MadeChange |= processStore(SI, BI); | ||||||
1696 | else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) | ||||||
1697 | RepeatInstruction = processMemSet(M, BI); | ||||||
1698 | else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) | ||||||
1699 | RepeatInstruction = processMemCpy(M, BI); | ||||||
1700 | else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) | ||||||
1701 | RepeatInstruction = processMemMove(M); | ||||||
1702 | else if (auto *CB = dyn_cast<CallBase>(I)) { | ||||||
1703 | for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) | ||||||
1704 | if (CB->isByValArgument(i)) | ||||||
1705 | MadeChange |= processByValArgument(*CB, i); | ||||||
1706 | } | ||||||
1707 | |||||||
1708 | // Reprocess the instruction if desired. | ||||||
1709 | if (RepeatInstruction) { | ||||||
1710 | if (BI != BB.begin()) | ||||||
1711 | --BI; | ||||||
1712 | MadeChange = true; | ||||||
1713 | } | ||||||
1714 | } | ||||||
1715 | } | ||||||
1716 | |||||||
1717 | return MadeChange; | ||||||
1718 | } | ||||||
1719 | |||||||
1720 | PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||||
1721 | auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F) | ||||||
1722 | : AM.getCachedResult<MemoryDependenceAnalysis>(F); | ||||||
1723 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||||
1724 | auto *AA = &AM.getResult<AAManager>(F); | ||||||
1725 | auto *AC = &AM.getResult<AssumptionAnalysis>(F); | ||||||
1726 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); | ||||||
1727 | auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F) | ||||||
1728 | : AM.getCachedResult<MemorySSAAnalysis>(F); | ||||||
1729 | |||||||
1730 | bool MadeChange = | ||||||
1731 | runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); | ||||||
1732 | if (!MadeChange) | ||||||
1733 | return PreservedAnalyses::all(); | ||||||
1734 | |||||||
1735 | PreservedAnalyses PA; | ||||||
1736 | PA.preserveSet<CFGAnalyses>(); | ||||||
1737 | if (MD) | ||||||
1738 | PA.preserve<MemoryDependenceAnalysis>(); | ||||||
1739 | if (MSSA) | ||||||
1740 | PA.preserve<MemorySSAAnalysis>(); | ||||||
1741 | return PA; | ||||||
1742 | } | ||||||
1743 | |||||||
1744 | bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, | ||||||
1745 | TargetLibraryInfo *TLI_, AliasAnalysis *AA_, | ||||||
1746 | AssumptionCache *AC_, DominatorTree *DT_, | ||||||
1747 | MemorySSA *MSSA_) { | ||||||
1748 | bool MadeChange = false; | ||||||
1749 | MD = MD_; | ||||||
1750 | TLI = TLI_; | ||||||
1751 | AA = AA_; | ||||||
1752 | AC = AC_; | ||||||
1753 | DT = DT_; | ||||||
1754 | MSSA = MSSA_; | ||||||
1755 | MemorySSAUpdater MSSAU_(MSSA_); | ||||||
1756 | MSSAU = MSSA_ ? &MSSAU_ : nullptr; | ||||||
1757 | // If we don't have at least memset and memcpy, there is little point of doing | ||||||
1758 | // anything here. These are required by a freestanding implementation, so if | ||||||
1759 | // even they are disabled, there is no point in trying hard. | ||||||
1760 | if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) | ||||||
1761 | return false; | ||||||
1762 | |||||||
1763 | while (true) { | ||||||
1764 | if (!iterateOnFunction(F)) | ||||||
1765 | break; | ||||||
1766 | MadeChange = true; | ||||||
1767 | } | ||||||
1768 | |||||||
1769 | if (MSSA_ && VerifyMemorySSA) | ||||||
1770 | MSSA_->verifyMemorySSA(); | ||||||
1771 | |||||||
1772 | MD = nullptr; | ||||||
1773 | return MadeChange; | ||||||
1774 | } | ||||||
1775 | |||||||
1776 | /// This is the main transformation entry point for a function. | ||||||
1777 | bool MemCpyOptLegacyPass::runOnFunction(Function &F) { | ||||||
1778 | if (skipFunction(F)) | ||||||
1779 | return false; | ||||||
1780 | |||||||
1781 | auto *MDWP = !EnableMemorySSA | ||||||
1782 | ? &getAnalysis<MemoryDependenceWrapperPass>() | ||||||
1783 | : getAnalysisIfAvailable<MemoryDependenceWrapperPass>(); | ||||||
1784 | auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||||
1785 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||||
1786 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||||
1787 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||||
1788 | auto *MSSAWP = EnableMemorySSA | ||||||
1789 | ? &getAnalysis<MemorySSAWrapperPass>() | ||||||
1790 | : getAnalysisIfAvailable<MemorySSAWrapperPass>(); | ||||||
1791 | |||||||
1792 | return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT, | ||||||
1793 | MSSAWP ? &MSSAWP->getMSSA() : nullptr); | ||||||
1794 | } |
1 | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// This file exposes an interface to building/using memory SSA to |
11 | /// walk memory instructions using a use/def graph. |
12 | /// |
13 | /// Memory SSA class builds an SSA form that links together memory access |
14 | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
15 | /// does a trivial form of "heap versioning" Every time the memory state changes |
16 | /// in the program, we generate a new heap version. It generates |
17 | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
18 | /// |
19 | /// As a trivial example, |
20 | /// define i32 @main() #0 { |
21 | /// entry: |
22 | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
23 | /// %0 = bitcast i8* %call to i32* |
24 | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
25 | /// %1 = bitcast i8* %call1 to i32* |
26 | /// store i32 5, i32* %0, align 4 |
27 | /// store i32 7, i32* %1, align 4 |
28 | /// %2 = load i32* %0, align 4 |
29 | /// %3 = load i32* %1, align 4 |
30 | /// %add = add nsw i32 %2, %3 |
31 | /// ret i32 %add |
32 | /// } |
33 | /// |
34 | /// Will become |
35 | /// define i32 @main() #0 { |
36 | /// entry: |
37 | /// ; 1 = MemoryDef(0) |
38 | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
39 | /// %2 = bitcast i8* %call to i32* |
40 | /// ; 2 = MemoryDef(1) |
41 | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
42 | /// %4 = bitcast i8* %call1 to i32* |
43 | /// ; 3 = MemoryDef(2) |
44 | /// store i32 5, i32* %2, align 4 |
45 | /// ; 4 = MemoryDef(3) |
46 | /// store i32 7, i32* %4, align 4 |
47 | /// ; MemoryUse(3) |
48 | /// %7 = load i32* %2, align 4 |
49 | /// ; MemoryUse(4) |
50 | /// %8 = load i32* %4, align 4 |
51 | /// %add = add nsw i32 %7, %8 |
52 | /// ret i32 %add |
53 | /// } |
54 | /// |
55 | /// Given this form, all the stores that could ever effect the load at %8 can be |
56 | /// gotten by using the MemoryUse associated with it, and walking from use to |
57 | /// def until you hit the top of the function. |
58 | /// |
59 | /// Each def also has a list of users associated with it, so you can walk from |
60 | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
61 | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
62 | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
63 | /// store, all the MemoryUses on its use lists are may-aliases of that store |
64 | /// (but the MemoryDefs on its use list may not be). |
65 | /// |
66 | /// MemoryDefs are not disambiguated because it would require multiple reaching |
67 | /// definitions, which would require multiple phis, and multiple memoryaccesses |
68 | /// per instruction. |
69 | // |
70 | //===----------------------------------------------------------------------===// |
71 | |
72 | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
73 | #define LLVM_ANALYSIS_MEMORYSSA_H |
74 | |
75 | #include "llvm/ADT/DenseMap.h" |
76 | #include "llvm/ADT/GraphTraits.h" |
77 | #include "llvm/ADT/SmallPtrSet.h" |
78 | #include "llvm/ADT/SmallVector.h" |
79 | #include "llvm/ADT/ilist.h" |
80 | #include "llvm/ADT/ilist_node.h" |
81 | #include "llvm/ADT/iterator.h" |
82 | #include "llvm/ADT/iterator_range.h" |
83 | #include "llvm/ADT/simple_ilist.h" |
84 | #include "llvm/Analysis/AliasAnalysis.h" |
85 | #include "llvm/Analysis/MemoryLocation.h" |
86 | #include "llvm/Analysis/PHITransAddr.h" |
87 | #include "llvm/IR/BasicBlock.h" |
88 | #include "llvm/IR/DerivedUser.h" |
89 | #include "llvm/IR/Dominators.h" |
90 | #include "llvm/IR/Module.h" |
91 | #include "llvm/IR/Operator.h" |
92 | #include "llvm/IR/Type.h" |
93 | #include "llvm/IR/Use.h" |
94 | #include "llvm/IR/User.h" |
95 | #include "llvm/IR/Value.h" |
96 | #include "llvm/IR/ValueHandle.h" |
97 | #include "llvm/Pass.h" |
98 | #include "llvm/Support/Casting.h" |
99 | #include "llvm/Support/CommandLine.h" |
100 | #include <algorithm> |
101 | #include <cassert> |
102 | #include <cstddef> |
103 | #include <iterator> |
104 | #include <memory> |
105 | #include <utility> |
106 | |
107 | namespace llvm { |
108 | |
109 | /// Enables memory ssa as a dependency for loop passes. |
110 | extern cl::opt<bool> EnableMSSALoopDependency; |
111 | |
112 | class AllocaInst; |
113 | class Function; |
114 | class Instruction; |
115 | class MemoryAccess; |
116 | class MemorySSAWalker; |
117 | class LLVMContext; |
118 | class raw_ostream; |
119 | |
120 | namespace MSSAHelpers { |
121 | |
122 | struct AllAccessTag {}; |
123 | struct DefsOnlyTag {}; |
124 | |
125 | } // end namespace MSSAHelpers |
126 | |
127 | enum : unsigned { |
128 | // Used to signify what the default invalid ID is for MemoryAccess's |
129 | // getID() |
130 | INVALID_MEMORYACCESS_ID = -1U |
131 | }; |
132 | |
133 | template <class T> class memoryaccess_def_iterator_base; |
134 | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
135 | using const_memoryaccess_def_iterator = |
136 | memoryaccess_def_iterator_base<const MemoryAccess>; |
137 | |
138 | // The base for all memory accesses. All memory accesses in a block are |
139 | // linked together using an intrusive list. |
140 | class MemoryAccess |
141 | : public DerivedUser, |
142 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
143 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
144 | public: |
145 | using AllAccessType = |
146 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
147 | using DefsOnlyType = |
148 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
149 | |
150 | MemoryAccess(const MemoryAccess &) = delete; |
151 | MemoryAccess &operator=(const MemoryAccess &) = delete; |
152 | |
153 | void *operator new(size_t) = delete; |
154 | |
155 | // Methods for support type inquiry through isa, cast, and |
156 | // dyn_cast |
157 | static bool classof(const Value *V) { |
158 | unsigned ID = V->getValueID(); |
159 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
160 | } |
161 | |
162 | BasicBlock *getBlock() const { return Block; } |
163 | |
164 | void print(raw_ostream &OS) const; |
165 | void dump() const; |
166 | |
167 | /// The user iterators for a memory access |
168 | using iterator = user_iterator; |
169 | using const_iterator = const_user_iterator; |
170 | |
171 | /// This iterator walks over all of the defs in a given |
172 | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
173 | /// MemoryUse/MemoryDef, this walks the defining access. |
174 | memoryaccess_def_iterator defs_begin(); |
175 | const_memoryaccess_def_iterator defs_begin() const; |
176 | memoryaccess_def_iterator defs_end(); |
177 | const_memoryaccess_def_iterator defs_end() const; |
178 | |
179 | /// Get the iterators for the all access list and the defs only list |
180 | /// We default to the all access list. |
181 | AllAccessType::self_iterator getIterator() { |
182 | return this->AllAccessType::getIterator(); |
183 | } |
184 | AllAccessType::const_self_iterator getIterator() const { |
185 | return this->AllAccessType::getIterator(); |
186 | } |
187 | AllAccessType::reverse_self_iterator getReverseIterator() { |
188 | return this->AllAccessType::getReverseIterator(); |
189 | } |
190 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
191 | return this->AllAccessType::getReverseIterator(); |
192 | } |
193 | DefsOnlyType::self_iterator getDefsIterator() { |
194 | return this->DefsOnlyType::getIterator(); |
195 | } |
196 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
197 | return this->DefsOnlyType::getIterator(); |
198 | } |
199 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
200 | return this->DefsOnlyType::getReverseIterator(); |
201 | } |
202 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
203 | return this->DefsOnlyType::getReverseIterator(); |
204 | } |
205 | |
206 | protected: |
207 | friend class MemoryDef; |
208 | friend class MemoryPhi; |
209 | friend class MemorySSA; |
210 | friend class MemoryUse; |
211 | friend class MemoryUseOrDef; |
212 | |
213 | /// Used by MemorySSA to change the block of a MemoryAccess when it is |
214 | /// moved. |
215 | void setBlock(BasicBlock *BB) { Block = BB; } |
216 | |
217 | /// Used for debugging and tracking things about MemoryAccesses. |
218 | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
219 | inline unsigned getID() const; |
220 | |
221 | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
222 | BasicBlock *BB, unsigned NumOperands) |
223 | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
224 | Block(BB) {} |
225 | |
226 | // Use deleteValue() to delete a generic MemoryAccess. |
227 | ~MemoryAccess() = default; |
228 | |
229 | private: |
230 | BasicBlock *Block; |
231 | }; |
232 | |
233 | template <> |
234 | struct ilist_alloc_traits<MemoryAccess> { |
235 | static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } |
236 | }; |
237 | |
238 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
239 | MA.print(OS); |
240 | return OS; |
241 | } |
242 | |
243 | /// Class that has the common methods + fields of memory uses/defs. It's |
244 | /// a little awkward to have, but there are many cases where we want either a |
245 | /// use or def, and there are many cases where uses are needed (defs aren't |
246 | /// acceptable), and vice-versa. |
247 | /// |
248 | /// This class should never be instantiated directly; make a MemoryUse or |
249 | /// MemoryDef instead. |
250 | class MemoryUseOrDef : public MemoryAccess { |
251 | public: |
252 | void *operator new(size_t) = delete; |
253 | |
254 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
255 | |
256 | /// Get the instruction that this MemoryUse represents. |
257 | Instruction *getMemoryInst() const { return MemoryInstruction; } |
258 | |
259 | /// Get the access that produces the memory state used by this Use. |
260 | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
261 | |
262 | static bool classof(const Value *MA) { |
263 | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
264 | } |
265 | |
266 | // Sadly, these have to be public because they are needed in some of the |
267 | // iterators. |
268 | inline bool isOptimized() const; |
269 | inline MemoryAccess *getOptimized() const; |
270 | inline void setOptimized(MemoryAccess *); |
271 | |
272 | // Retrieve AliasResult type of the optimized access. Ideally this would be |
273 | // returned by the caching walker and may go away in the future. |
274 | Optional<AliasResult> getOptimizedAccessType() const { |
275 | return isOptimized() ? OptimizedAccessAlias : None; |
276 | } |
277 | |
278 | /// Reset the ID of what this MemoryUse was optimized to, causing it to |
279 | /// be rewalked by the walker if necessary. |
280 | /// This really should only be called by tests. |
281 | inline void resetOptimized(); |
282 | |
283 | protected: |
284 | friend class MemorySSA; |
285 | friend class MemorySSAUpdater; |
286 | |
287 | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
288 | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, |
289 | unsigned NumOperands) |
290 | : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), |
291 | MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) { |
292 | setDefiningAccess(DMA); |
293 | } |
294 | |
295 | // Use deleteValue() to delete a generic MemoryUseOrDef. |
296 | ~MemoryUseOrDef() = default; |
297 | |
298 | void setOptimizedAccessType(Optional<AliasResult> AR) { |
299 | OptimizedAccessAlias = AR; |
300 | } |
301 | |
302 | void setDefiningAccess( |
303 | MemoryAccess *DMA, bool Optimized = false, |
304 | Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) { |
305 | if (!Optimized) { |
306 | setOperand(0, DMA); |
307 | return; |
308 | } |
309 | setOptimized(DMA); |
310 | setOptimizedAccessType(AR); |
311 | } |
312 | |
313 | private: |
314 | Instruction *MemoryInstruction; |
315 | Optional<AliasResult> OptimizedAccessAlias; |
316 | }; |
317 | |
318 | /// Represents read-only accesses to memory |
319 | /// |
320 | /// In particular, the set of Instructions that will be represented by |
321 | /// MemoryUse's is exactly the set of Instructions for which |
322 | /// AliasAnalysis::getModRefInfo returns "Ref". |
323 | class MemoryUse final : public MemoryUseOrDef { |
324 | public: |
325 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
326 | |
327 | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
328 | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, |
329 | /*NumOperands=*/1) {} |
330 | |
331 | // allocate space for exactly one operand |
332 | void *operator new(size_t S) { return User::operator new(S, 1); } |
333 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
334 | |
335 | static bool classof(const Value *MA) { |
336 | return MA->getValueID() == MemoryUseVal; |
337 | } |
338 | |
339 | void print(raw_ostream &OS) const; |
340 | |
341 | void setOptimized(MemoryAccess *DMA) { |
342 | OptimizedID = DMA->getID(); |
343 | setOperand(0, DMA); |
344 | } |
345 | |
346 | bool isOptimized() const { |
347 | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
348 | } |
349 | |
350 | MemoryAccess *getOptimized() const { |
351 | return getDefiningAccess(); |
352 | } |
353 | |
354 | void resetOptimized() { |
355 | OptimizedID = INVALID_MEMORYACCESS_ID; |
356 | } |
357 | |
358 | protected: |
359 | friend class MemorySSA; |
360 | |
361 | private: |
362 | static void deleteMe(DerivedUser *Self); |
363 | |
364 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
365 | }; |
366 | |
367 | template <> |
368 | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
369 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits <MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator MemoryUse::op_begin() const { return OperandTraits<MemoryUse >::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse ::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse >::op_end(this); } MemoryUse::const_op_iterator MemoryUse:: op_end() const { return OperandTraits<MemoryUse>::op_end (const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUse>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 369, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryUse>::op_begin (const_cast<MemoryUse*>(this))[i_nocapture].get()); } void MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryUse>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 369, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUse::getNumOperands() const { return OperandTraits <MemoryUse>::operands(this); } template <int Idx_nocapture > Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryUse::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
370 | |
371 | /// Represents a read-write access to memory, whether it is a must-alias, |
372 | /// or a may-alias. |
373 | /// |
374 | /// In particular, the set of Instructions that will be represented by |
375 | /// MemoryDef's is exactly the set of Instructions for which |
376 | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
377 | /// Note that, in order to provide def-def chains, all defs also have a use |
378 | /// associated with them. This use points to the nearest reaching |
379 | /// MemoryDef/MemoryPhi. |
380 | class MemoryDef final : public MemoryUseOrDef { |
381 | public: |
382 | friend class MemorySSA; |
383 | |
384 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
385 | |
386 | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
387 | unsigned Ver) |
388 | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, |
389 | /*NumOperands=*/2), |
390 | ID(Ver) {} |
391 | |
392 | // allocate space for exactly two operands |
393 | void *operator new(size_t S) { return User::operator new(S, 2); } |
394 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
395 | |
396 | static bool classof(const Value *MA) { |
397 | return MA->getValueID() == MemoryDefVal; |
398 | } |
399 | |
400 | void setOptimized(MemoryAccess *MA) { |
401 | setOperand(1, MA); |
402 | OptimizedID = MA->getID(); |
403 | } |
404 | |
405 | MemoryAccess *getOptimized() const { |
406 | return cast_or_null<MemoryAccess>(getOperand(1)); |
407 | } |
408 | |
409 | bool isOptimized() const { |
410 | return getOptimized() && OptimizedID == getOptimized()->getID(); |
411 | } |
412 | |
413 | void resetOptimized() { |
414 | OptimizedID = INVALID_MEMORYACCESS_ID; |
415 | setOperand(1, nullptr); |
416 | } |
417 | |
418 | void print(raw_ostream &OS) const; |
419 | |
420 | unsigned getID() const { return ID; } |
421 | |
422 | private: |
423 | static void deleteMe(DerivedUser *Self); |
424 | |
425 | const unsigned ID; |
426 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
427 | }; |
428 | |
429 | template <> |
430 | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; |
431 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits <MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator MemoryDef::op_begin() const { return OperandTraits<MemoryDef >::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef ::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef >::op_end(this); } MemoryDef::const_op_iterator MemoryDef:: op_end() const { return OperandTraits<MemoryDef>::op_end (const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 431, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryDef>::op_begin (const_cast<MemoryDef*>(this))[i_nocapture].get()); } void MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryDef>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 431, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryDef::getNumOperands() const { return OperandTraits <MemoryDef>::operands(this); } template <int Idx_nocapture > Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryDef::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
432 | |
433 | template <> |
434 | struct OperandTraits<MemoryUseOrDef> { |
435 | static Use *op_begin(MemoryUseOrDef *MUD) { |
436 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
437 | return OperandTraits<MemoryUse>::op_begin(MU); |
438 | return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); |
439 | } |
440 | |
441 | static Use *op_end(MemoryUseOrDef *MUD) { |
442 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
443 | return OperandTraits<MemoryUse>::op_end(MU); |
444 | return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); |
445 | } |
446 | |
447 | static unsigned operands(const MemoryUseOrDef *MUD) { |
448 | if (const auto *MU = dyn_cast<MemoryUse>(MUD)) |
449 | return OperandTraits<MemoryUse>::operands(MU); |
450 | return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); |
451 | } |
452 | }; |
453 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef ::const_op_iterator MemoryUseOrDef::op_begin() const { return OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef ::op_end() { return OperandTraits<MemoryUseOrDef>::op_end (this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end () const { return OperandTraits<MemoryUseOrDef>::op_end (const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUseOrDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 453, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin (const_cast<MemoryUseOrDef*>(this))[i_nocapture].get()) ; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 453, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture ; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits <MemoryUseOrDef>::operands(this); } template <int Idx_nocapture > Use &MemoryUseOrDef::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryUseOrDef::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
454 | |
455 | /// Represents phi nodes for memory accesses. |
456 | /// |
457 | /// These have the same semantic as regular phi nodes, with the exception that |
458 | /// only one phi will ever exist in a given basic block. |
459 | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
460 | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
461 | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
462 | /// a MemoryPhi's operands. |
463 | /// That is, given |
464 | /// if (a) { |
465 | /// store %a |
466 | /// store %b |
467 | /// } |
468 | /// it *must* be transformed into |
469 | /// if (a) { |
470 | /// 1 = MemoryDef(liveOnEntry) |
471 | /// store %a |
472 | /// 2 = MemoryDef(1) |
473 | /// store %b |
474 | /// } |
475 | /// and *not* |
476 | /// if (a) { |
477 | /// 1 = MemoryDef(liveOnEntry) |
478 | /// store %a |
479 | /// 2 = MemoryDef(liveOnEntry) |
480 | /// store %b |
481 | /// } |
482 | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
483 | /// end of the branch, and if there are not two phi nodes, one will be |
484 | /// disconnected completely from the SSA graph below that point. |
485 | /// Because MemoryUse's do not generate new definitions, they do not have this |
486 | /// issue. |
487 | class MemoryPhi final : public MemoryAccess { |
488 | // allocate space for exactly zero operands |
489 | void *operator new(size_t S) { return User::operator new(S); } |
490 | |
491 | public: |
492 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
493 | |
494 | /// Provide fast operand accessors |
495 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
496 | |
497 | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
498 | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
499 | ReservedSpace(NumPreds) { |
500 | allocHungoffUses(ReservedSpace); |
501 | } |
502 | |
503 | // Block iterator interface. This provides access to the list of incoming |
504 | // basic blocks, which parallels the list of incoming values. |
505 | using block_iterator = BasicBlock **; |
506 | using const_block_iterator = BasicBlock *const *; |
507 | |
508 | block_iterator block_begin() { |
509 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
510 | } |
511 | |
512 | const_block_iterator block_begin() const { |
513 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
514 | } |
515 | |
516 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
517 | |
518 | const_block_iterator block_end() const { |
519 | return block_begin() + getNumOperands(); |
520 | } |
521 | |
522 | iterator_range<block_iterator> blocks() { |
523 | return make_range(block_begin(), block_end()); |
524 | } |
525 | |
526 | iterator_range<const_block_iterator> blocks() const { |
527 | return make_range(block_begin(), block_end()); |
528 | } |
529 | |
530 | op_range incoming_values() { return operands(); } |
531 | |
532 | const_op_range incoming_values() const { return operands(); } |
533 | |
534 | /// Return the number of incoming edges |
535 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
536 | |
537 | /// Return incoming value number x |
538 | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
539 | void setIncomingValue(unsigned I, MemoryAccess *V) { |
540 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 540, __extension__ __PRETTY_FUNCTION__)); |
541 | setOperand(I, V); |
542 | } |
543 | |
544 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
545 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
546 | |
547 | /// Return incoming basic block number @p i. |
548 | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
549 | |
550 | /// Return incoming basic block corresponding |
551 | /// to an operand of the PHI. |
552 | BasicBlock *getIncomingBlock(const Use &U) const { |
553 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 553, __extension__ __PRETTY_FUNCTION__)); |
554 | return getIncomingBlock(unsigned(&U - op_begin())); |
555 | } |
556 | |
557 | /// Return incoming basic block corresponding |
558 | /// to value use iterator. |
559 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
560 | return getIncomingBlock(I.getUse()); |
561 | } |
562 | |
563 | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
564 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 564, __extension__ __PRETTY_FUNCTION__)); |
565 | block_begin()[I] = BB; |
566 | } |
567 | |
568 | /// Add an incoming value to the end of the PHI list |
569 | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
570 | if (getNumOperands() == ReservedSpace) |
571 | growOperands(); // Get more space! |
572 | // Initialize some new operands. |
573 | setNumHungOffUseOperands(getNumOperands() + 1); |
574 | setIncomingValue(getNumOperands() - 1, V); |
575 | setIncomingBlock(getNumOperands() - 1, BB); |
576 | } |
577 | |
578 | /// Return the first index of the specified basic |
579 | /// block in the value list for this PHI. Returns -1 if no instance. |
580 | int getBasicBlockIndex(const BasicBlock *BB) const { |
581 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
582 | if (block_begin()[I] == BB) |
583 | return I; |
584 | return -1; |
585 | } |
586 | |
587 | MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { |
588 | int Idx = getBasicBlockIndex(BB); |
589 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 589, __extension__ __PRETTY_FUNCTION__)); |
590 | return getIncomingValue(Idx); |
591 | } |
592 | |
593 | // After deleting incoming position I, the order of incoming may be changed. |
594 | void unorderedDeleteIncoming(unsigned I) { |
595 | unsigned E = getNumOperands(); |
596 | assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry." ) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 596, __extension__ __PRETTY_FUNCTION__)); |
597 | // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi |
598 | // itself should be deleted. |
599 | assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 600, __extension__ __PRETTY_FUNCTION__)) |
600 | "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 600, __extension__ __PRETTY_FUNCTION__)); |
601 | setIncomingValue(I, getIncomingValue(E - 1)); |
602 | setIncomingBlock(I, block_begin()[E - 1]); |
603 | setOperand(E - 1, nullptr); |
604 | block_begin()[E - 1] = nullptr; |
605 | setNumHungOffUseOperands(getNumOperands() - 1); |
606 | } |
607 | |
608 | // After deleting entries that satisfy Pred, remaining entries may have |
609 | // changed order. |
610 | template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { |
611 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
612 | if (Pred(getIncomingValue(I), getIncomingBlock(I))) { |
613 | unorderedDeleteIncoming(I); |
614 | E = getNumOperands(); |
615 | --I; |
616 | } |
617 | assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 618, __extension__ __PRETTY_FUNCTION__)) |
618 | "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 618, __extension__ __PRETTY_FUNCTION__)); |
619 | } |
620 | |
621 | // After deleting incoming block BB, the incoming blocks order may be changed. |
622 | void unorderedDeleteIncomingBlock(const BasicBlock *BB) { |
623 | unorderedDeleteIncomingIf( |
624 | [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); |
625 | } |
626 | |
627 | // After deleting incoming memory access MA, the incoming accesses order may |
628 | // be changed. |
629 | void unorderedDeleteIncomingValue(const MemoryAccess *MA) { |
630 | unorderedDeleteIncomingIf( |
631 | [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); |
632 | } |
633 | |
634 | static bool classof(const Value *V) { |
635 | return V->getValueID() == MemoryPhiVal; |
636 | } |
637 | |
638 | void print(raw_ostream &OS) const; |
639 | |
640 | unsigned getID() const { return ID; } |
641 | |
642 | protected: |
643 | friend class MemorySSA; |
644 | |
645 | /// this is more complicated than the generic |
646 | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
647 | /// values and pointers to the incoming blocks, all in one allocation. |
648 | void allocHungoffUses(unsigned N) { |
649 | User::allocHungoffUses(N, /* IsPhi */ true); |
650 | } |
651 | |
652 | private: |
653 | // For debugging only |
654 | const unsigned ID; |
655 | unsigned ReservedSpace; |
656 | |
657 | /// This grows the operand list in response to a push_back style of |
658 | /// operation. This grows the number of ops by 1.5 times. |
659 | void growOperands() { |
660 | unsigned E = getNumOperands(); |
661 | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
662 | ReservedSpace = std::max(E + E / 2, 2u); |
663 | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
664 | } |
665 | |
666 | static void deleteMe(DerivedUser *Self); |
667 | }; |
668 | |
669 | inline unsigned MemoryAccess::getID() const { |
670 | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 671, __extension__ __PRETTY_FUNCTION__)) |
671 | "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 671, __extension__ __PRETTY_FUNCTION__)); |
672 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
673 | return MD->getID(); |
674 | return cast<MemoryPhi>(this)->getID(); |
675 | } |
676 | |
677 | inline bool MemoryUseOrDef::isOptimized() const { |
678 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
679 | return MD->isOptimized(); |
680 | return cast<MemoryUse>(this)->isOptimized(); |
681 | } |
682 | |
683 | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
684 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
685 | return MD->getOptimized(); |
686 | return cast<MemoryUse>(this)->getOptimized(); |
687 | } |
688 | |
689 | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
690 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
691 | MD->setOptimized(MA); |
692 | else |
693 | cast<MemoryUse>(this)->setOptimized(MA); |
694 | } |
695 | |
696 | inline void MemoryUseOrDef::resetOptimized() { |
697 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
698 | MD->resetOptimized(); |
699 | else |
700 | cast<MemoryUse>(this)->resetOptimized(); |
701 | } |
702 | |
703 | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
704 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits <MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi >::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi ::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi >::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi:: op_end() const { return OperandTraits<MemoryPhi>::op_end (const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryPhi>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 704, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryPhi>::op_begin (const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryPhi>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 704, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryPhi::getNumOperands() const { return OperandTraits <MemoryPhi>::operands(this); } template <int Idx_nocapture > Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
705 | |
706 | /// Encapsulates MemorySSA, including all data associated with memory |
707 | /// accesses. |
708 | class MemorySSA { |
709 | public: |
710 | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
711 | |
712 | // MemorySSA must remain where it's constructed; Walkers it creates store |
713 | // pointers to it. |
714 | MemorySSA(MemorySSA &&) = delete; |
715 | |
716 | ~MemorySSA(); |
717 | |
718 | MemorySSAWalker *getWalker(); |
719 | MemorySSAWalker *getSkipSelfWalker(); |
720 | |
721 | /// Given a memory Mod/Ref'ing instruction, get the MemorySSA |
722 | /// access associated with it. If passed a basic block gets the memory phi |
723 | /// node that exists for that block, if there is one. Otherwise, this will get |
724 | /// a MemoryUseOrDef. |
725 | MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { |
726 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); |
727 | } |
728 | |
729 | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { |
730 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); |
731 | } |
732 | |
733 | DominatorTree &getDomTree() const { return *DT; } |
734 | |
735 | void dump() const; |
736 | void print(raw_ostream &) const; |
737 | |
738 | /// Return true if \p MA represents the live on entry value |
739 | /// |
740 | /// Loads and stores from pointer arguments and other global values may be |
741 | /// defined by memory operations that do not occur in the current function, so |
742 | /// they may be live on entry to the function. MemorySSA represents such |
743 | /// memory state by the live on entry definition, which is guaranteed to occur |
744 | /// before any other memory access in the function. |
745 | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
746 | return MA == LiveOnEntryDef.get(); |
747 | } |
748 | |
749 | inline MemoryAccess *getLiveOnEntryDef() const { |
750 | return LiveOnEntryDef.get(); |
751 | } |
752 | |
753 | // Sadly, iplists, by default, owns and deletes pointers added to the |
754 | // list. It's not currently possible to have two iplists for the same type, |
755 | // where one owns the pointers, and one does not. This is because the traits |
756 | // are per-type, not per-tag. If this ever changes, we should make the |
757 | // DefList an iplist. |
758 | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
759 | using DefsList = |
760 | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
761 | |
762 | /// Return the list of MemoryAccess's for a given basic block. |
763 | /// |
764 | /// This list is not modifiable by the user. |
765 | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
766 | return getWritableBlockAccesses(BB); |
767 | } |
768 | |
769 | /// Return the list of MemoryDef's and MemoryPhi's for a given basic |
770 | /// block. |
771 | /// |
772 | /// This list is not modifiable by the user. |
773 | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
774 | return getWritableBlockDefs(BB); |
775 | } |
776 | |
777 | /// Given two memory accesses in the same basic block, determine |
778 | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
779 | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
780 | |
781 | /// Given two memory accesses in potentially different blocks, |
782 | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
783 | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
784 | |
785 | /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
786 | /// dominates Use \p B. |
787 | bool dominates(const MemoryAccess *A, const Use &B) const; |
788 | |
789 | /// Verify that MemorySSA is self consistent (IE definitions dominate |
790 | /// all uses, uses appear in the right places). This is used by unit tests. |
791 | void verifyMemorySSA() const; |
792 | |
793 | /// Used in various insertion functions to specify whether we are talking |
794 | /// about the beginning or end of a block. |
795 | enum InsertionPlace { Beginning, End, BeforeTerminator }; |
796 | |
797 | protected: |
798 | // Used by Memory SSA annotater, dumpers, and wrapper pass |
799 | friend class MemorySSAAnnotatedWriter; |
800 | friend class MemorySSAPrinterLegacyPass; |
801 | friend class MemorySSAUpdater; |
802 | |
803 | void verifyOrderingDominationAndDefUses(Function &F) const; |
804 | void verifyDominationNumbers(const Function &F) const; |
805 | void verifyPrevDefInPhis(Function &F) const; |
806 | |
807 | // This is used by the use optimizer and updater. |
808 | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
809 | auto It = PerBlockAccesses.find(BB); |
810 | return It == PerBlockAccesses.end() ? nullptr : It->second.get(); |
811 | } |
812 | |
813 | // This is used by the use optimizer and updater. |
814 | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
815 | auto It = PerBlockDefs.find(BB); |
816 | return It == PerBlockDefs.end() ? nullptr : It->second.get(); |
817 | } |
818 | |
819 | // These is used by the updater to perform various internal MemorySSA |
820 | // machinsations. They do not always leave the IR in a correct state, and |
821 | // relies on the updater to fixup what it breaks, so it is not public. |
822 | |
823 | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
824 | void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); |
825 | |
826 | // Rename the dominator tree branch rooted at BB. |
827 | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
828 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
829 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
830 | } |
831 | |
832 | void removeFromLookups(MemoryAccess *); |
833 | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
834 | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
835 | InsertionPlace); |
836 | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
837 | AccessList::iterator); |
838 | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, |
839 | const MemoryUseOrDef *Template = nullptr, |
840 | bool CreationMustSucceed = true); |
841 | |
842 | private: |
843 | template <class AliasAnalysisType> class ClobberWalkerBase; |
844 | template <class AliasAnalysisType> class CachingWalker; |
845 | template <class AliasAnalysisType> class SkipSelfWalker; |
846 | class OptimizeUses; |
847 | |
848 | CachingWalker<AliasAnalysis> *getWalkerImpl(); |
849 | void buildMemorySSA(BatchAAResults &BAA); |
850 | |
851 | void prepareForMoveTo(MemoryAccess *, BasicBlock *); |
852 | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
853 | |
854 | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
855 | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
856 | |
857 | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
858 | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
859 | template <typename AliasAnalysisType> |
860 | MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, |
861 | const MemoryUseOrDef *Template = nullptr); |
862 | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); |
863 | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
864 | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
865 | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
866 | SmallPtrSetImpl<BasicBlock *> &Visited, |
867 | bool SkipVisited = false, bool RenameAllUses = false); |
868 | AccessList *getOrCreateAccessList(const BasicBlock *); |
869 | DefsList *getOrCreateDefsList(const BasicBlock *); |
870 | void renumberBlock(const BasicBlock *) const; |
871 | AliasAnalysis *AA; |
872 | DominatorTree *DT; |
873 | Function &F; |
874 | |
875 | // Memory SSA mappings |
876 | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
877 | |
878 | // These two mappings contain the main block to access/def mappings for |
879 | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
880 | // MemoryAccesses. |
881 | // Both maps maintain the invariant that if a block is found in them, the |
882 | // corresponding list is not empty, and if a block is not found in them, the |
883 | // corresponding list is empty. |
884 | AccessMap PerBlockAccesses; |
885 | DefsMap PerBlockDefs; |
886 | std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; |
887 | |
888 | // Domination mappings |
889 | // Note that the numbering is local to a block, even though the map is |
890 | // global. |
891 | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
892 | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
893 | |
894 | // Memory SSA building info |
895 | std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; |
896 | std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; |
897 | std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; |
898 | unsigned NextID; |
899 | }; |
900 | |
901 | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
902 | class MemorySSAUtil { |
903 | protected: |
904 | friend class GVNHoist; |
905 | friend class MemorySSAWalker; |
906 | |
907 | // This function should not be used by new passes. |
908 | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
909 | AliasAnalysis &AA); |
910 | }; |
911 | |
912 | // This pass does eager building and then printing of MemorySSA. It is used by |
913 | // the tests to be able to build, dump, and verify Memory SSA. |
914 | class MemorySSAPrinterLegacyPass : public FunctionPass { |
915 | public: |
916 | MemorySSAPrinterLegacyPass(); |
917 | |
918 | bool runOnFunction(Function &) override; |
919 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
920 | |
921 | static char ID; |
922 | }; |
923 | |
924 | /// An analysis that produces \c MemorySSA for a function. |
925 | /// |
926 | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
927 | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
928 | |
929 | static AnalysisKey Key; |
930 | |
931 | public: |
932 | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
933 | // pointers after construction. Use a wrapper class instead of plain |
934 | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
935 | struct Result { |
936 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
937 | |
938 | MemorySSA &getMSSA() { return *MSSA.get(); } |
939 | |
940 | std::unique_ptr<MemorySSA> MSSA; |
941 | |
942 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
943 | FunctionAnalysisManager::Invalidator &Inv); |
944 | }; |
945 | |
946 | Result run(Function &F, FunctionAnalysisManager &AM); |
947 | }; |
948 | |
949 | /// Printer pass for \c MemorySSA. |
950 | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
951 | raw_ostream &OS; |
952 | |
953 | public: |
954 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
955 | |
956 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
957 | }; |
958 | |
959 | /// Verifier pass for \c MemorySSA. |
960 | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
961 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
962 | }; |
963 | |
964 | /// Legacy analysis pass which computes \c MemorySSA. |
965 | class MemorySSAWrapperPass : public FunctionPass { |
966 | public: |
967 | MemorySSAWrapperPass(); |
968 | |
969 | static char ID; |
970 | |
971 | bool runOnFunction(Function &) override; |
972 | void releaseMemory() override; |
973 | MemorySSA &getMSSA() { return *MSSA; } |
974 | const MemorySSA &getMSSA() const { return *MSSA; } |
975 | |
976 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
977 | |
978 | void verifyAnalysis() const override; |
979 | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
980 | |
981 | private: |
982 | std::unique_ptr<MemorySSA> MSSA; |
983 | }; |
984 | |
985 | /// This is the generic walker interface for walkers of MemorySSA. |
986 | /// Walkers are used to be able to further disambiguate the def-use chains |
987 | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
988 | /// you. |
989 | /// In particular, while the def-use chains provide basic information, and are |
990 | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
991 | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
992 | /// information. In particular, they may want to use SCEV info to further |
993 | /// disambiguate memory accesses, or they may want the nearest dominating |
994 | /// may-aliasing MemoryDef for a call or a store. This API enables a |
995 | /// standardized interface to getting and using that info. |
996 | class MemorySSAWalker { |
997 | public: |
998 | MemorySSAWalker(MemorySSA *); |
999 | virtual ~MemorySSAWalker() = default; |
1000 | |
1001 | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
1002 | |
1003 | /// Given a memory Mod/Ref/ModRef'ing instruction, calling this |
1004 | /// will give you the nearest dominating MemoryAccess that Mod's the location |
1005 | /// the instruction accesses (by skipping any def which AA can prove does not |
1006 | /// alias the location(s) accessed by the instruction given). |
1007 | /// |
1008 | /// Note that this will return a single access, and it must dominate the |
1009 | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
1010 | /// this will return the MemoryPhi, not the operand. This means that |
1011 | /// given: |
1012 | /// if (a) { |
1013 | /// 1 = MemoryDef(liveOnEntry) |
1014 | /// store %a |
1015 | /// } else { |
1016 | /// 2 = MemoryDef(liveOnEntry) |
1017 | /// store %b |
1018 | /// } |
1019 | /// 3 = MemoryPhi(2, 1) |
1020 | /// MemoryUse(3) |
1021 | /// load %a |
1022 | /// |
1023 | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
1024 | /// in the if (a) branch. |
1025 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
1026 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
1027 | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?" ) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1027, __extension__ __PRETTY_FUNCTION__)); |
1028 | return getClobberingMemoryAccess(MA); |
1029 | } |
1030 | |
1031 | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
1032 | /// but takes a MemoryAccess instead of an Instruction. |
1033 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; |
1034 | |
1035 | /// Given a potentially clobbering memory access and a new location, |
1036 | /// calling this will give you the nearest dominating clobbering MemoryAccess |
1037 | /// (by skipping non-aliasing def links). |
1038 | /// |
1039 | /// This version of the function is mainly used to disambiguate phi translated |
1040 | /// pointers, where the value of a pointer may have changed from the initial |
1041 | /// memory access. Note that this expects to be handed either a MemoryUse, |
1042 | /// or an already potentially clobbering access. Unlike the above API, if |
1043 | /// given a MemoryDef that clobbers the pointer as the starting access, it |
1044 | /// will return that MemoryDef, whereas the above would return the clobber |
1045 | /// starting from the use side of the memory def. |
1046 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1047 | const MemoryLocation &) = 0; |
1048 | |
1049 | /// Given a memory access, invalidate anything this walker knows about |
1050 | /// that access. |
1051 | /// This API is used by walkers that store information to perform basic cache |
1052 | /// invalidation. This will be called by MemorySSA at appropriate times for |
1053 | /// the walker it uses or returns. |
1054 | virtual void invalidateInfo(MemoryAccess *) {} |
1055 | |
1056 | protected: |
1057 | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
1058 | // constructor. |
1059 | MemorySSA *MSSA; |
1060 | }; |
1061 | |
1062 | /// A MemorySSAWalker that does no alias queries, or anything else. It |
1063 | /// simply returns the links as they were constructed by the builder. |
1064 | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
1065 | public: |
1066 | // Keep the overrides below from hiding the Instruction overload of |
1067 | // getClobberingMemoryAccess. |
1068 | using MemorySSAWalker::getClobberingMemoryAccess; |
1069 | |
1070 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
1071 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1072 | const MemoryLocation &) override; |
1073 | }; |
1074 | |
1075 | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
1076 | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
1077 | |
1078 | /// Iterator base class used to implement const and non-const iterators |
1079 | /// over the defining accesses of a MemoryAccess. |
1080 | template <class T> |
1081 | class memoryaccess_def_iterator_base |
1082 | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
1083 | std::forward_iterator_tag, T, ptrdiff_t, T *, |
1084 | T *> { |
1085 | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
1086 | |
1087 | public: |
1088 | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
1089 | memoryaccess_def_iterator_base() = default; |
1090 | |
1091 | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
1092 | return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); |
1093 | } |
1094 | |
1095 | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
1096 | // block from the operand in constant time (In a PHINode, the uselist has |
1097 | // both, so it's just subtraction). We provide it as part of the |
1098 | // iterator to avoid callers having to linear walk to get the block. |
1099 | // If the operation becomes constant time on MemoryPHI's, this bit of |
1100 | // abstraction breaking should be removed. |
1101 | BasicBlock *getPhiArgBlock() const { |
1102 | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
1103 | assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI" ) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1103, __extension__ __PRETTY_FUNCTION__)); |
1104 | return MP->getIncomingBlock(ArgNo); |
1105 | } |
1106 | |
1107 | typename std::iterator_traits<BaseT>::pointer operator*() const { |
1108 | assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1108, __extension__ __PRETTY_FUNCTION__)); |
1109 | // Go to the first argument for phis, and the defining access for everything |
1110 | // else. |
1111 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
1112 | return MP->getIncomingValue(ArgNo); |
1113 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1114 | } |
1115 | |
1116 | using BaseT::operator++; |
1117 | memoryaccess_def_iterator_base &operator++() { |
1118 | assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator" ) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1118, __extension__ __PRETTY_FUNCTION__)); |
1119 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { |
1120 | if (++ArgNo >= MP->getNumIncomingValues()) { |
1121 | ArgNo = 0; |
1122 | Access = nullptr; |
1123 | } |
1124 | } else { |
1125 | Access = nullptr; |
1126 | } |
1127 | return *this; |
1128 | } |
1129 | |
1130 | private: |
1131 | T *Access = nullptr; |
1132 | unsigned ArgNo = 0; |
1133 | }; |
1134 | |
1135 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1136 | return memoryaccess_def_iterator(this); |
1137 | } |
1138 | |
1139 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1140 | return const_memoryaccess_def_iterator(this); |
1141 | } |
1142 | |
1143 | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1144 | return memoryaccess_def_iterator(); |
1145 | } |
1146 | |
1147 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1148 | return const_memoryaccess_def_iterator(); |
1149 | } |
1150 | |
1151 | /// GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1152 | /// and uses in the inverse case. |
1153 | template <> struct GraphTraits<MemoryAccess *> { |
1154 | using NodeRef = MemoryAccess *; |
1155 | using ChildIteratorType = memoryaccess_def_iterator; |
1156 | |
1157 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1158 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1159 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1160 | }; |
1161 | |
1162 | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1163 | using NodeRef = MemoryAccess *; |
1164 | using ChildIteratorType = MemoryAccess::iterator; |
1165 | |
1166 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1167 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1168 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1169 | }; |
1170 | |
1171 | /// Provide an iterator that walks defs, giving both the memory access, |
1172 | /// and the current pointer location, updating the pointer location as it |
1173 | /// changes due to phi node translation. |
1174 | /// |
1175 | /// This iterator, while somewhat specialized, is what most clients actually |
1176 | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1177 | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1178 | /// memory location through phi nodes for the user. |
1179 | class upward_defs_iterator |
1180 | : public iterator_facade_base<upward_defs_iterator, |
1181 | std::forward_iterator_tag, |
1182 | const MemoryAccessPair> { |
1183 | using BaseT = upward_defs_iterator::iterator_facade_base; |
1184 | |
1185 | public: |
1186 | upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT, |
1187 | bool *PerformedPhiTranslation = nullptr) |
1188 | : DefIterator(Info.first), Location(Info.second), |
1189 | OriginalAccess(Info.first), DT(DT), |
1190 | PerformedPhiTranslation(PerformedPhiTranslation) { |
1191 | CurrentPair.first = nullptr; |
1192 | |
1193 | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1194 | fillInCurrentPair(); |
1195 | } |
1196 | |
1197 | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1198 | |
1199 | bool operator==(const upward_defs_iterator &Other) const { |
1200 | return DefIterator == Other.DefIterator; |
1201 | } |
1202 | |
1203 | typename std::iterator_traits<BaseT>::reference operator*() const { |
1204 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1205, __extension__ __PRETTY_FUNCTION__)) |
1205 | "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1205, __extension__ __PRETTY_FUNCTION__)); |
1206 | return CurrentPair; |
1207 | } |
1208 | |
1209 | using BaseT::operator++; |
1210 | upward_defs_iterator &operator++() { |
1211 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1212, __extension__ __PRETTY_FUNCTION__)) |
1212 | "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1212, __extension__ __PRETTY_FUNCTION__)); |
1213 | ++DefIterator; |
1214 | if (DefIterator != OriginalAccess->defs_end()) |
1215 | fillInCurrentPair(); |
1216 | return *this; |
1217 | } |
1218 | |
1219 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1220 | |
1221 | private: |
1222 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible |
1223 | /// loop. In particular, this guarantees that it only references a single |
1224 | /// MemoryLocation during execution of the containing function. |
1225 | bool IsGuaranteedLoopInvariant(Value *Ptr) const; |
1226 | |
1227 | void fillInCurrentPair() { |
1228 | CurrentPair.first = *DefIterator; |
1229 | CurrentPair.second = Location; |
1230 | if (WalkingPhi && Location.Ptr) { |
1231 | // Mark size as unknown, if the location is not guaranteed to be |
1232 | // loop-invariant for any possible loop in the function. Setting the size |
1233 | // to unknown guarantees that any memory accesses that access locations |
1234 | // after the pointer are considered as clobbers, which is important to |
1235 | // catch loop carried dependences. |
1236 | if (Location.Ptr && |
1237 | !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr))) |
1238 | CurrentPair.second = |
1239 | Location.getWithNewSize(LocationSize::beforeOrAfterPointer()); |
1240 | PHITransAddr Translator( |
1241 | const_cast<Value *>(Location.Ptr), |
1242 | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1243 | |
1244 | if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), |
1245 | DefIterator.getPhiArgBlock(), DT, |
1246 | true)) { |
1247 | Value *TransAddr = Translator.getAddr(); |
1248 | if (TransAddr != Location.Ptr) { |
1249 | CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr); |
1250 | |
1251 | if (TransAddr && |
1252 | !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr))) |
1253 | CurrentPair.second = CurrentPair.second.getWithNewSize( |
1254 | LocationSize::beforeOrAfterPointer()); |
1255 | |
1256 | if (PerformedPhiTranslation) |
1257 | *PerformedPhiTranslation = true; |
1258 | } |
1259 | } |
1260 | } |
1261 | } |
1262 | |
1263 | MemoryAccessPair CurrentPair; |
1264 | memoryaccess_def_iterator DefIterator; |
1265 | MemoryLocation Location; |
1266 | MemoryAccess *OriginalAccess = nullptr; |
1267 | DominatorTree *DT = nullptr; |
1268 | bool WalkingPhi = false; |
1269 | bool *PerformedPhiTranslation = nullptr; |
1270 | }; |
1271 | |
1272 | inline upward_defs_iterator |
1273 | upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT, |
1274 | bool *PerformedPhiTranslation = nullptr) { |
1275 | return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation); |
1276 | } |
1277 | |
1278 | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1279 | |
1280 | inline iterator_range<upward_defs_iterator> |
1281 | upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { |
1282 | return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); |
1283 | } |
1284 | |
1285 | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1286 | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1287 | /// comparing against a null def_chain_iterator, this will compare equal only |
1288 | /// after walking said Phi/liveOnEntry. |
1289 | /// |
1290 | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1291 | /// access chain, or all the accesses. |
1292 | /// |
1293 | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1294 | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1295 | /// a phi node. The optimized chain walks the clobbering access of a store. |
1296 | /// So if you are just trying to find, given a store, what the next |
1297 | /// thing that would clobber the same memory is, you want the optimized chain. |
1298 | template <class T, bool UseOptimizedChain = false> |
1299 | struct def_chain_iterator |
1300 | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1301 | std::forward_iterator_tag, MemoryAccess *> { |
1302 | def_chain_iterator() : MA(nullptr) {} |
1303 | def_chain_iterator(T MA) : MA(MA) {} |
1304 | |
1305 | T operator*() const { return MA; } |
1306 | |
1307 | def_chain_iterator &operator++() { |
1308 | // N.B. liveOnEntry has a null defining access. |
1309 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1310 | if (UseOptimizedChain && MUD->isOptimized()) |
1311 | MA = MUD->getOptimized(); |
1312 | else |
1313 | MA = MUD->getDefiningAccess(); |
1314 | } else { |
1315 | MA = nullptr; |
1316 | } |
1317 | |
1318 | return *this; |
1319 | } |
1320 | |
1321 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
1322 | |
1323 | private: |
1324 | T MA; |
1325 | }; |
1326 | |
1327 | template <class T> |
1328 | inline iterator_range<def_chain_iterator<T>> |
1329 | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1330 | #ifdef EXPENSIVE_CHECKS |
1331 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1332, __extension__ __PRETTY_FUNCTION__)) |
1332 | "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/MemorySSA.h" , 1332, __extension__ __PRETTY_FUNCTION__)); |
1333 | #endif |
1334 | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1335 | } |
1336 | |
1337 | template <class T> |
1338 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1339 | return make_range(def_chain_iterator<T, true>(MA), |
1340 | def_chain_iterator<T, true>(nullptr)); |
1341 | } |
1342 | |
1343 | } // end namespace llvm |
1344 | |
1345 | #endif // LLVM_ANALYSIS_MEMORYSSA_H |
1 | //===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the generic AliasAnalysis interface, which is used as the |
10 | // common interface used by all clients of alias analysis information, and |
11 | // implemented by all alias analysis implementations. Mod/Ref information is |
12 | // also captured by this interface. |
13 | // |
14 | // Implementations of this interface must implement the various virtual methods, |
15 | // which automatically provides functionality for the entire suite of client |
16 | // APIs. |
17 | // |
18 | // This API identifies memory regions with the MemoryLocation class. The pointer |
19 | // component specifies the base memory address of the region. The Size specifies |
20 | // the maximum size (in address units) of the memory region, or |
21 | // MemoryLocation::UnknownSize if the size is not known. The TBAA tag |
22 | // identifies the "type" of the memory reference; see the |
23 | // TypeBasedAliasAnalysis class for details. |
24 | // |
25 | // Some non-obvious details include: |
26 | // - Pointers that point to two completely different objects in memory never |
27 | // alias, regardless of the value of the Size component. |
28 | // - NoAlias doesn't imply inequal pointers. The most obvious example of this |
29 | // is two pointers to constant memory. Even if they are equal, constant |
30 | // memory is never stored to, so there will never be any dependencies. |
31 | // In this and other situations, the pointers may be both NoAlias and |
32 | // MustAlias at the same time. The current API can only return one result, |
33 | // though this is rarely a problem in practice. |
34 | // |
35 | //===----------------------------------------------------------------------===// |
36 | |
37 | #ifndef LLVM_ANALYSIS_ALIASANALYSIS_H |
38 | #define LLVM_ANALYSIS_ALIASANALYSIS_H |
39 | |
40 | #include "llvm/ADT/DenseMap.h" |
41 | #include "llvm/ADT/None.h" |
42 | #include "llvm/ADT/Optional.h" |
43 | #include "llvm/ADT/SmallVector.h" |
44 | #include "llvm/Analysis/MemoryLocation.h" |
45 | #include "llvm/IR/PassManager.h" |
46 | #include "llvm/Pass.h" |
47 | #include <cstdint> |
48 | #include <functional> |
49 | #include <memory> |
50 | #include <vector> |
51 | |
52 | namespace llvm { |
53 | |
54 | class AnalysisUsage; |
55 | class AtomicCmpXchgInst; |
56 | class BasicAAResult; |
57 | class BasicBlock; |
58 | class CatchPadInst; |
59 | class CatchReturnInst; |
60 | class DominatorTree; |
61 | class FenceInst; |
62 | class Function; |
63 | class InvokeInst; |
64 | class PreservedAnalyses; |
65 | class TargetLibraryInfo; |
66 | class Value; |
67 | |
68 | /// The possible results of an alias query. |
69 | /// |
70 | /// These results are always computed between two MemoryLocation objects as |
71 | /// a query to some alias analysis. |
72 | /// |
73 | /// Note that these are unscoped enumerations because we would like to support |
74 | /// implicitly testing a result for the existence of any possible aliasing with |
75 | /// a conversion to bool, but an "enum class" doesn't support this. The |
76 | /// canonical names from the literature are suffixed and unique anyways, and so |
77 | /// they serve as global constants in LLVM for these results. |
78 | /// |
79 | /// See docs/AliasAnalysis.html for more information on the specific meanings |
80 | /// of these values. |
81 | class AliasResult { |
82 | private: |
83 | static const int OffsetBits = 23; |
84 | static const int AliasBits = 8; |
85 | static_assert(AliasBits + 1 + OffsetBits <= 32, |
86 | "AliasResult size is intended to be 4 bytes!"); |
87 | |
88 | unsigned int Alias : AliasBits; |
89 | unsigned int HasOffset : 1; |
90 | signed int Offset : OffsetBits; |
91 | |
92 | public: |
93 | enum Kind : uint8_t { |
94 | /// The two locations do not alias at all. |
95 | /// |
96 | /// This value is arranged to convert to false, while all other values |
97 | /// convert to true. This allows a boolean context to convert the result to |
98 | /// a binary flag indicating whether there is the possibility of aliasing. |
99 | NoAlias = 0, |
100 | /// The two locations may or may not alias. This is the least precise |
101 | /// result. |
102 | MayAlias, |
103 | /// The two locations alias, but only due to a partial overlap. |
104 | PartialAlias, |
105 | /// The two locations precisely alias each other. |
106 | MustAlias, |
107 | }; |
108 | static_assert(MustAlias < (1 << AliasBits), |
109 | "Not enough bit field size for the enum!"); |
110 | |
111 | explicit AliasResult() = delete; |
112 | constexpr AliasResult(const Kind &Alias) |
113 | : Alias(Alias), HasOffset(false), Offset(0) {} |
114 | |
115 | operator Kind() const { return static_cast<Kind>(Alias); } |
116 | |
117 | constexpr bool hasOffset() const { return HasOffset; } |
118 | constexpr int32_t getOffset() const { |
119 | assert(HasOffset && "No offset!")(static_cast <bool> (HasOffset && "No offset!") ? void (0) : __assert_fail ("HasOffset && \"No offset!\"" , "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/Analysis/AliasAnalysis.h" , 119, __extension__ __PRETTY_FUNCTION__)); |
120 | return Offset; |
121 | } |
122 | void setOffset(int32_t NewOffset) { |
123 | if (isInt<OffsetBits>(NewOffset)) { |
124 | HasOffset = true; |
125 | Offset = NewOffset; |
126 | } |
127 | } |
128 | |
129 | /// Helper for processing AliasResult for swapped memory location pairs. |
130 | void swap(bool DoSwap = true) { |
131 | if (DoSwap && hasOffset()) |
132 | setOffset(-getOffset()); |
133 | } |
134 | }; |
135 | |
136 | static_assert(sizeof(AliasResult) == 4, |
137 | "AliasResult size is intended to be 4 bytes!"); |
138 | |
139 | /// << operator for AliasResult. |
140 | raw_ostream &operator<<(raw_ostream &OS, AliasResult AR); |
141 | |
142 | /// Flags indicating whether a memory access modifies or references memory. |
143 | /// |
144 | /// This is no access at all, a modification, a reference, or both |
145 | /// a modification and a reference. These are specifically structured such that |
146 | /// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must' |
147 | /// work with any of the possible values. |
148 | enum class ModRefInfo : uint8_t { |
149 | /// Must is provided for completeness, but no routines will return only |
150 | /// Must today. See definition of Must below. |
151 | Must = 0, |
152 | /// The access may reference the value stored in memory, |
153 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
154 | MustRef = 1, |
155 | /// The access may modify the value stored in memory, |
156 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
157 | MustMod = 2, |
158 | /// The access may reference, modify or both the value stored in memory, |
159 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
160 | MustModRef = MustRef | MustMod, |
161 | /// The access neither references nor modifies the value stored in memory. |
162 | NoModRef = 4, |
163 | /// The access may reference the value stored in memory. |
164 | Ref = NoModRef | MustRef, |
165 | /// The access may modify the value stored in memory. |
166 | Mod = NoModRef | MustMod, |
167 | /// The access may reference and may modify the value stored in memory. |
168 | ModRef = Ref | Mod, |
169 | |
170 | /// About Must: |
171 | /// Must is set in a best effort manner. |
172 | /// We usually do not try our best to infer Must, instead it is merely |
173 | /// another piece of "free" information that is presented when available. |
174 | /// Must set means there was certainly a MustAlias found. For calls, |
175 | /// where multiple arguments are checked (argmemonly), this translates to |
176 | /// only MustAlias or NoAlias was found. |
177 | /// Must is not set for RAR accesses, even if the two locations must |
178 | /// alias. The reason is that two read accesses translate to an early return |
179 | /// of NoModRef. An additional alias check to set Must may be |
180 | /// expensive. Other cases may also not set Must(e.g. callCapturesBefore). |
181 | /// We refer to Must being *set* when the most significant bit is *cleared*. |
182 | /// Conversely we *clear* Must information by *setting* the Must bit to 1. |
183 | }; |
184 | |
185 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isNoModRef(const ModRefInfo MRI) { |
186 | return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) == |
187 | static_cast<int>(ModRefInfo::Must); |
188 | } |
189 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModOrRefSet(const ModRefInfo MRI) { |
190 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef); |
191 | } |
192 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModAndRefSet(const ModRefInfo MRI) { |
193 | return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) == |
194 | static_cast<int>(ModRefInfo::MustModRef); |
195 | } |
196 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModSet(const ModRefInfo MRI) { |
197 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod); |
198 | } |
199 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isRefSet(const ModRefInfo MRI) { |
200 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef); |
201 | } |
202 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isMustSet(const ModRefInfo MRI) { |
203 | return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef)); |
204 | } |
205 | |
206 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMod(const ModRefInfo MRI) { |
207 | return ModRefInfo(static_cast<int>(MRI) | |
208 | static_cast<int>(ModRefInfo::MustMod)); |
209 | } |
210 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setRef(const ModRefInfo MRI) { |
211 | return ModRefInfo(static_cast<int>(MRI) | |
212 | static_cast<int>(ModRefInfo::MustRef)); |
213 | } |
214 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMust(const ModRefInfo MRI) { |
215 | return ModRefInfo(static_cast<int>(MRI) & |
216 | static_cast<int>(ModRefInfo::MustModRef)); |
217 | } |
218 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setModAndRef(const ModRefInfo MRI) { |
219 | return ModRefInfo(static_cast<int>(MRI) | |
220 | static_cast<int>(ModRefInfo::MustModRef)); |
221 | } |
222 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMod(const ModRefInfo MRI) { |
223 | return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref)); |
224 | } |
225 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearRef(const ModRefInfo MRI) { |
226 | return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod)); |
227 | } |
228 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMust(const ModRefInfo MRI) { |
229 | return ModRefInfo(static_cast<int>(MRI) | |
230 | static_cast<int>(ModRefInfo::NoModRef)); |
231 | } |
232 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo unionModRef(const ModRefInfo MRI1, |
233 | const ModRefInfo MRI2) { |
234 | return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2)); |
235 | } |
236 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo intersectModRef(const ModRefInfo MRI1, |
237 | const ModRefInfo MRI2) { |
238 | return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2)); |
239 | } |
240 | |
241 | /// The locations at which a function might access memory. |
242 | /// |
243 | /// These are primarily used in conjunction with the \c AccessKind bits to |
244 | /// describe both the nature of access and the locations of access for a |
245 | /// function call. |
246 | enum FunctionModRefLocation { |
247 | /// Base case is no access to memory. |
248 | FMRL_Nowhere = 0, |
249 | /// Access to memory via argument pointers. |
250 | FMRL_ArgumentPointees = 8, |
251 | /// Memory that is inaccessible via LLVM IR. |
252 | FMRL_InaccessibleMem = 16, |
253 | /// Access to any memory. |
254 | FMRL_Anywhere = 32 | FMRL_InaccessibleMem | FMRL_ArgumentPointees |
255 | }; |
256 | |
257 | /// Summary of how a function affects memory in the program. |
258 | /// |
259 | /// Loads from constant globals are not considered memory accesses for this |
260 | /// interface. Also, functions may freely modify stack space local to their |
261 | /// invocation without having to report it through these interfaces. |
262 | enum FunctionModRefBehavior { |
263 | /// This function does not perform any non-local loads or stores to memory. |
264 | /// |
265 | /// This property corresponds to the GCC 'const' attribute. |
266 | /// This property corresponds to the LLVM IR 'readnone' attribute. |
267 | /// This property corresponds to the IntrNoMem LLVM intrinsic flag. |
268 | FMRB_DoesNotAccessMemory = |
269 | FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef), |
270 | |
271 | /// The only memory references in this function (if it has any) are |
272 | /// non-volatile loads from objects pointed to by its pointer-typed |
273 | /// arguments, with arbitrary offsets. |
274 | /// |
275 | /// This property corresponds to the combination of the IntrReadMem |
276 | /// and IntrArgMemOnly LLVM intrinsic flags. |
277 | FMRB_OnlyReadsArgumentPointees = |
278 | FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref), |
279 | |
280 | /// The only memory references in this function (if it has any) are |
281 | /// non-volatile stores from objects pointed to by its pointer-typed |
282 | /// arguments, with arbitrary offsets. |
283 | /// |
284 | /// This property corresponds to the combination of the IntrWriteMem |
285 | /// and IntrArgMemOnly LLVM intrinsic flags. |
286 | FMRB_OnlyWritesArgumentPointees = |
287 | FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Mod), |
288 | |
289 | /// The only memory references in this function (if it has any) are |
290 | /// non-volatile loads and stores from objects pointed to by its |
291 | /// pointer-typed arguments, with arbitrary offsets. |
292 | /// |
293 | /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag. |
294 | FMRB_OnlyAccessesArgumentPointees = |
295 | FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef), |
296 | |
297 | /// The only memory references in this function (if it has any) are |
298 | /// reads of memory that is otherwise inaccessible via LLVM IR. |
299 | /// |
300 | /// This property corresponds to the LLVM IR inaccessiblememonly attribute. |
301 | FMRB_OnlyReadsInaccessibleMem = |
302 | FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::Ref), |
303 | |
304 | /// The only memory references in this function (if it has any) are |
305 | /// writes to memory that is otherwise inaccessible via LLVM IR. |
306 | /// |
307 | /// This property corresponds to the LLVM IR inaccessiblememonly attribute. |
308 | FMRB_OnlyWritesInaccessibleMem = |
309 | FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::Mod), |
310 | |
311 | /// The only memory references in this function (if it has any) are |
312 | /// references of memory that is otherwise inaccessible via LLVM IR. |
313 | /// |
314 | /// This property corresponds to the LLVM IR inaccessiblememonly attribute. |
315 | FMRB_OnlyAccessesInaccessibleMem = |
316 | FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef), |
317 | |
318 | /// The function may perform non-volatile loads from objects pointed |
319 | /// to by its pointer-typed arguments, with arbitrary offsets, and |
320 | /// it may also perform loads of memory that is otherwise |
321 | /// inaccessible via LLVM IR. |
322 | /// |
323 | /// This property corresponds to the LLVM IR |
324 | /// inaccessiblemem_or_argmemonly attribute. |
325 | FMRB_OnlyReadsInaccessibleOrArgMem = FMRL_InaccessibleMem | |
326 | FMRL_ArgumentPointees | |
327 | static_cast<int>(ModRefInfo::Ref), |
328 | |
329 | /// The function may perform non-volatile stores to objects pointed |
330 | /// to by its pointer-typed arguments, with arbitrary offsets, and |
331 | /// it may also perform stores of memory that is otherwise |
332 | /// inaccessible via LLVM IR. |
333 | /// |
334 | /// This property corresponds to the LLVM IR |
335 | /// inaccessiblemem_or_argmemonly attribute. |
336 | FMRB_OnlyWritesInaccessibleOrArgMem = FMRL_InaccessibleMem | |
337 | FMRL_ArgumentPointees | |
338 | static_cast<int>(ModRefInfo::Mod), |
339 | |
340 | /// The function may perform non-volatile loads and stores of objects |
341 | /// pointed to by its pointer-typed arguments, with arbitrary offsets, and |
342 | /// it may also perform loads and stores of memory that is otherwise |
343 | /// inaccessible via LLVM IR. |
344 | /// |
345 | /// This property corresponds to the LLVM IR |
346 | /// inaccessiblemem_or_argmemonly attribute. |
347 | FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem | |
348 | FMRL_ArgumentPointees | |
349 | static_cast<int>(ModRefInfo::ModRef), |
350 | |
351 | /// This function does not perform any non-local stores or volatile loads, |
352 | /// but may read from any memory location. |
353 | /// |
354 | /// This property corresponds to the GCC 'pure' attribute. |
355 | /// This property corresponds to the LLVM IR 'readonly' attribute. |
356 | /// This property corresponds to the IntrReadMem LLVM intrinsic flag. |
357 | FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref), |
358 | |
359 | // This function does not read from memory anywhere, but may write to any |
360 | // memory location. |
361 | // |
362 | // This property corresponds to the LLVM IR 'writeonly' attribute. |
363 | // This property corresponds to the IntrWriteMem LLVM intrinsic flag. |
364 | FMRB_OnlyWritesMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod), |
365 | |
366 | /// This indicates that the function could not be classified into one of the |
367 | /// behaviors above. |
368 | FMRB_UnknownModRefBehavior = |
369 | FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef) |
370 | }; |
371 | |
372 | // Wrapper method strips bits significant only in FunctionModRefBehavior, |
373 | // to obtain a valid ModRefInfo. The benefit of using the wrapper is that if |
374 | // ModRefInfo enum changes, the wrapper can be updated to & with the new enum |
375 | // entry with all bits set to 1. |
376 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo |
377 | createModRefInfo(const FunctionModRefBehavior FMRB) { |
378 | return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef)); |
379 | } |
380 | |
381 | /// Reduced version of MemoryLocation that only stores a pointer and size. |
382 | /// Used for caching AATags independent BasicAA results. |
383 | struct AACacheLoc { |
384 | const Value *Ptr; |
385 | LocationSize Size; |
386 | }; |
387 | |
388 | template <> struct DenseMapInfo<AACacheLoc> { |
389 | static inline AACacheLoc getEmptyKey() { |
390 | return {DenseMapInfo<const Value *>::getEmptyKey(), |
391 | DenseMapInfo<LocationSize>::getEmptyKey()}; |
392 | } |
393 | static inline AACacheLoc getTombstoneKey() { |
394 | return {DenseMapInfo<const Value *>::getTombstoneKey(), |
395 | DenseMapInfo<LocationSize>::getTombstoneKey()}; |
396 | } |
397 | static unsigned getHashValue(const AACacheLoc &Val) { |
398 | return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^ |
399 | DenseMapInfo<LocationSize>::getHashValue(Val.Size); |
400 | } |
401 | static bool isEqual(const AACacheLoc &LHS, const AACacheLoc &RHS) { |
402 | return LHS.Ptr == RHS.Ptr && LHS.Size == RHS.Size; |
403 | } |
404 | }; |
405 | |
406 | /// This class stores info we want to provide to or retain within an alias |
407 | /// query. By default, the root query is stateless and starts with a freshly |
408 | /// constructed info object. Specific alias analyses can use this query info to |
409 | /// store per-query state that is important for recursive or nested queries to |
410 | /// avoid recomputing. To enable preserving this state across multiple queries |
411 | /// where safe (due to the IR not changing), use a `BatchAAResults` wrapper. |
412 | /// The information stored in an `AAQueryInfo` is currently limitted to the |
413 | /// caches used by BasicAA, but can further be extended to fit other AA needs. |
414 | class AAQueryInfo { |
415 | public: |
416 | using LocPair = std::pair<AACacheLoc, AACacheLoc>; |
417 | struct CacheEntry { |
418 | AliasResult Result; |
419 | /// Number of times a NoAlias assumption has been used. |
420 | /// 0 for assumptions that have not been used, -1 for definitive results. |
421 | int NumAssumptionUses; |
422 | /// Whether this is a definitive (non-assumption) result. |
423 | bool isDefinitive() const { return NumAssumptionUses < 0; } |
424 | }; |
425 | using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>; |
426 | AliasCacheT AliasCache; |
427 | |
428 | using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>; |
429 | IsCapturedCacheT IsCapturedCache; |
430 | |
431 | /// Query depth used to distinguish recursive queries. |
432 | unsigned Depth = 0; |
433 | |
434 | /// How many active NoAlias assumption uses there are. |
435 | int NumAssumptionUses = 0; |
436 | |
437 | /// Location pairs for which an assumption based result is currently stored. |
438 | /// Used to remove all potentially incorrect results from the cache if an |
439 | /// assumption is disproven. |
440 | SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults; |
441 | |
442 | AAQueryInfo() : AliasCache(), IsCapturedCache() {} |
443 | |
444 | /// Create a new AAQueryInfo based on this one, but with the cache cleared. |
445 | /// This is used for recursive queries across phis, where cache results may |
446 | /// not be valid. |
447 | AAQueryInfo withEmptyCache() { |
448 | AAQueryInfo NewAAQI; |
449 | NewAAQI.Depth = Depth; |
450 | return NewAAQI; |
451 | } |
452 | }; |
453 | |
454 | class BatchAAResults; |
455 | |
456 | class AAResults { |
457 | public: |
458 | // Make these results default constructable and movable. We have to spell |
459 | // these out because MSVC won't synthesize them. |
460 | AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {} |
461 | AAResults(AAResults &&Arg); |
462 | ~AAResults(); |
463 | |
464 | /// Register a specific AA result. |
465 | template <typename AAResultT> void addAAResult(AAResultT &AAResult) { |
466 | // FIXME: We should use a much lighter weight system than the usual |
467 | // polymorphic pattern because we don't own AAResult. It should |
468 | // ideally involve two pointers and no separate allocation. |
469 | AAs.emplace_back(new Model<AAResultT>(AAResult, *this)); |
470 | } |
471 | |
472 | /// Register a function analysis ID that the results aggregation depends on. |
473 | /// |
474 | /// This is used in the new pass manager to implement the invalidation logic |
475 | /// where we must invalidate the results aggregation if any of our component |
476 | /// analyses become invalid. |
477 | void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); } |
478 | |
479 | /// Handle invalidation events in the new pass manager. |
480 | /// |
481 | /// The aggregation is invalidated if any of the underlying analyses is |
482 | /// invalidated. |
483 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
484 | FunctionAnalysisManager::Invalidator &Inv); |
485 | |
486 | //===--------------------------------------------------------------------===// |
487 | /// \name Alias Queries |
488 | /// @{ |
489 | |
490 | /// The main low level interface to the alias analysis implementation. |
491 | /// Returns an AliasResult indicating whether the two pointers are aliased to |
492 | /// each other. This is the interface that must be implemented by specific |
493 | /// alias analysis implementations. |
494 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); |
495 | |
496 | /// A convenience wrapper around the primary \c alias interface. |
497 | AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2, |
498 | LocationSize V2Size) { |
499 | return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); |
500 | } |
501 | |
502 | /// A convenience wrapper around the primary \c alias interface. |
503 | AliasResult alias(const Value *V1, const Value *V2) { |
504 | return alias(MemoryLocation::getBeforeOrAfter(V1), |
505 | MemoryLocation::getBeforeOrAfter(V2)); |
506 | } |
507 | |
508 | /// A trivial helper function to check to see if the specified pointers are |
509 | /// no-alias. |
510 | bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
511 | return alias(LocA, LocB) == AliasResult::NoAlias; |
512 | } |
513 | |
514 | /// A convenience wrapper around the \c isNoAlias helper interface. |
515 | bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2, |
516 | LocationSize V2Size) { |
517 | return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); |
518 | } |
519 | |
520 | /// A convenience wrapper around the \c isNoAlias helper interface. |
521 | bool isNoAlias(const Value *V1, const Value *V2) { |
522 | return isNoAlias(MemoryLocation::getBeforeOrAfter(V1), |
523 | MemoryLocation::getBeforeOrAfter(V2)); |
524 | } |
525 | |
526 | /// A trivial helper function to check to see if the specified pointers are |
527 | /// must-alias. |
528 | bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
529 | return alias(LocA, LocB) == AliasResult::MustAlias; |
530 | } |
531 | |
532 | /// A convenience wrapper around the \c isMustAlias helper interface. |
533 | bool isMustAlias(const Value *V1, const Value *V2) { |
534 | return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) == |
535 | AliasResult::MustAlias; |
536 | } |
537 | |
538 | /// Checks whether the given location points to constant memory, or if |
539 | /// \p OrLocal is true whether it points to a local alloca. |
540 | bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false); |
541 | |
542 | /// A convenience wrapper around the primary \c pointsToConstantMemory |
543 | /// interface. |
544 | bool pointsToConstantMemory(const Value *P, bool OrLocal = false) { |
545 | return pointsToConstantMemory(MemoryLocation::getBeforeOrAfter(P), OrLocal); |
546 | } |
547 | |
548 | /// @} |
549 | //===--------------------------------------------------------------------===// |
550 | /// \name Simple mod/ref information |
551 | /// @{ |
552 | |
553 | /// Get the ModRef info associated with a pointer argument of a call. The |
554 | /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note |
555 | /// that these bits do not necessarily account for the overall behavior of |
556 | /// the function, but rather only provide additional per-argument |
557 | /// information. This never sets ModRefInfo::Must. |
558 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx); |
559 | |
560 | /// Return the behavior of the given call site. |
561 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call); |
562 | |
563 | /// Return the behavior when calling the given function. |
564 | FunctionModRefBehavior getModRefBehavior(const Function *F); |
565 | |
566 | /// Checks if the specified call is known to never read or write memory. |
567 | /// |
568 | /// Note that if the call only reads from known-constant memory, it is also |
569 | /// legal to return true. Also, calls that unwind the stack are legal for |
570 | /// this predicate. |
571 | /// |
572 | /// Many optimizations (such as CSE and LICM) can be performed on such calls |
573 | /// without worrying about aliasing properties, and many calls have this |
574 | /// property (e.g. calls to 'sin' and 'cos'). |
575 | /// |
576 | /// This property corresponds to the GCC 'const' attribute. |
577 | bool doesNotAccessMemory(const CallBase *Call) { |
578 | return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory; |
579 | } |
580 | |
581 | /// Checks if the specified function is known to never read or write memory. |
582 | /// |
583 | /// Note that if the function only reads from known-constant memory, it is |
584 | /// also legal to return true. Also, function that unwind the stack are legal |
585 | /// for this predicate. |
586 | /// |
587 | /// Many optimizations (such as CSE and LICM) can be performed on such calls |
588 | /// to such functions without worrying about aliasing properties, and many |
589 | /// functions have this property (e.g. 'sin' and 'cos'). |
590 | /// |
591 | /// This property corresponds to the GCC 'const' attribute. |
592 | bool doesNotAccessMemory(const Function *F) { |
593 | return getModRefBehavior(F) == FMRB_DoesNotAccessMemory; |
594 | } |
595 | |
596 | /// Checks if the specified call is known to only read from non-volatile |
597 | /// memory (or not access memory at all). |
598 | /// |
599 | /// Calls that unwind the stack are legal for this predicate. |
600 | /// |
601 | /// This property allows many common optimizations to be performed in the |
602 | /// absence of interfering store instructions, such as CSE of strlen calls. |
603 | /// |
604 | /// This property corresponds to the GCC 'pure' attribute. |
605 | bool onlyReadsMemory(const CallBase *Call) { |
606 | return onlyReadsMemory(getModRefBehavior(Call)); |
607 | } |
608 | |
609 | /// Checks if the specified function is known to only read from non-volatile |
610 | /// memory (or not access memory at all). |
611 | /// |
612 | /// Functions that unwind the stack are legal for this predicate. |
613 | /// |
614 | /// This property allows many common optimizations to be performed in the |
615 | /// absence of interfering store instructions, such as CSE of strlen calls. |
616 | /// |
617 | /// This property corresponds to the GCC 'pure' attribute. |
618 | bool onlyReadsMemory(const Function *F) { |
619 | return onlyReadsMemory(getModRefBehavior(F)); |
620 | } |
621 | |
622 | /// Checks if functions with the specified behavior are known to only read |
623 | /// from non-volatile memory (or not access memory at all). |
624 | static bool onlyReadsMemory(FunctionModRefBehavior MRB) { |
625 | return !isModSet(createModRefInfo(MRB)); |
626 | } |
627 | |
628 | /// Checks if functions with the specified behavior are known to only write |
629 | /// memory (or not access memory at all). |
630 | static bool doesNotReadMemory(FunctionModRefBehavior MRB) { |
631 | return !isRefSet(createModRefInfo(MRB)); |
632 | } |
633 | |
634 | /// Checks if functions with the specified behavior are known to read and |
635 | /// write at most from objects pointed to by their pointer-typed arguments |
636 | /// (with arbitrary offsets). |
637 | static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) { |
638 | return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees); |
639 | } |
640 | |
641 | /// Checks if functions with the specified behavior are known to potentially |
642 | /// read or write from objects pointed to be their pointer-typed arguments |
643 | /// (with arbitrary offsets). |
644 | static bool doesAccessArgPointees(FunctionModRefBehavior MRB) { |
645 | return isModOrRefSet(createModRefInfo(MRB)) && |
646 | ((unsigned)MRB & FMRL_ArgumentPointees); |
647 | } |
648 | |
649 | /// Checks if functions with the specified behavior are known to read and |
650 | /// write at most from memory that is inaccessible from LLVM IR. |
651 | static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) { |
652 | return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem); |
653 | } |
654 | |
655 | /// Checks if functions with the specified behavior are known to potentially |
656 | /// read or write from memory that is inaccessible from LLVM IR. |
657 | static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) { |
658 | return isModOrRefSet(createModRefInfo(MRB)) && |
659 | ((unsigned)MRB & FMRL_InaccessibleMem); |
660 | } |
661 | |
662 | /// Checks if functions with the specified behavior are known to read and |
663 | /// write at most from memory that is inaccessible from LLVM IR or objects |
664 | /// pointed to by their pointer-typed arguments (with arbitrary offsets). |
665 | static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) { |
666 | return !((unsigned)MRB & FMRL_Anywhere & |
667 | ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees)); |
668 | } |
669 | |
670 | /// getModRefInfo (for call sites) - Return information about whether |
671 | /// a particular call site modifies or reads the specified memory location. |
672 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); |
673 | |
674 | /// getModRefInfo (for call sites) - A convenience wrapper. |
675 | ModRefInfo getModRefInfo(const CallBase *Call, const Value *P, |
676 | LocationSize Size) { |
677 | return getModRefInfo(Call, MemoryLocation(P, Size)); |
678 | } |
679 | |
680 | /// getModRefInfo (for loads) - Return information about whether |
681 | /// a particular load modifies or reads the specified memory location. |
682 | ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc); |
683 | |
684 | /// getModRefInfo (for loads) - A convenience wrapper. |
685 | ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, |
686 | LocationSize Size) { |
687 | return getModRefInfo(L, MemoryLocation(P, Size)); |
688 | } |
689 | |
690 | /// getModRefInfo (for stores) - Return information about whether |
691 | /// a particular store modifies or reads the specified memory location. |
692 | ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc); |
693 | |
694 | /// getModRefInfo (for stores) - A convenience wrapper. |
695 | ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, |
696 | LocationSize Size) { |
697 | return getModRefInfo(S, MemoryLocation(P, Size)); |
698 | } |
699 | |
700 | /// getModRefInfo (for fences) - Return information about whether |
701 | /// a particular store modifies or reads the specified memory location. |
702 | ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc); |
703 | |
704 | /// getModRefInfo (for fences) - A convenience wrapper. |
705 | ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, |
706 | LocationSize Size) { |
707 | return getModRefInfo(S, MemoryLocation(P, Size)); |
708 | } |
709 | |
710 | /// getModRefInfo (for cmpxchges) - Return information about whether |
711 | /// a particular cmpxchg modifies or reads the specified memory location. |
712 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, |
713 | const MemoryLocation &Loc); |
714 | |
715 | /// getModRefInfo (for cmpxchges) - A convenience wrapper. |
716 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P, |
717 | LocationSize Size) { |
718 | return getModRefInfo(CX, MemoryLocation(P, Size)); |
719 | } |
720 | |
721 | /// getModRefInfo (for atomicrmws) - Return information about whether |
722 | /// a particular atomicrmw modifies or reads the specified memory location. |
723 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc); |
724 | |
725 | /// getModRefInfo (for atomicrmws) - A convenience wrapper. |
726 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P, |
727 | LocationSize Size) { |
728 | return getModRefInfo(RMW, MemoryLocation(P, Size)); |
729 | } |
730 | |
731 | /// getModRefInfo (for va_args) - Return information about whether |
732 | /// a particular va_arg modifies or reads the specified memory location. |
733 | ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc); |
734 | |
735 | /// getModRefInfo (for va_args) - A convenience wrapper. |
736 | ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, |
737 | LocationSize Size) { |
738 | return getModRefInfo(I, MemoryLocation(P, Size)); |
739 | } |
740 | |
741 | /// getModRefInfo (for catchpads) - Return information about whether |
742 | /// a particular catchpad modifies or reads the specified memory location. |
743 | ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc); |
744 | |
745 | /// getModRefInfo (for catchpads) - A convenience wrapper. |
746 | ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P, |
747 | LocationSize Size) { |
748 | return getModRefInfo(I, MemoryLocation(P, Size)); |
749 | } |
750 | |
751 | /// getModRefInfo (for catchrets) - Return information about whether |
752 | /// a particular catchret modifies or reads the specified memory location. |
753 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc); |
754 | |
755 | /// getModRefInfo (for catchrets) - A convenience wrapper. |
756 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P, |
757 | LocationSize Size) { |
758 | return getModRefInfo(I, MemoryLocation(P, Size)); |
759 | } |
760 | |
761 | /// Check whether or not an instruction may read or write the optionally |
762 | /// specified memory location. |
763 | /// |
764 | /// |
765 | /// An instruction that doesn't read or write memory may be trivially LICM'd |
766 | /// for example. |
767 | /// |
768 | /// For function calls, this delegates to the alias-analysis specific |
769 | /// call-site mod-ref behavior queries. Otherwise it delegates to the specific |
770 | /// helpers above. |
771 | ModRefInfo getModRefInfo(const Instruction *I, |
772 | const Optional<MemoryLocation> &OptLoc) { |
773 | AAQueryInfo AAQIP; |
774 | return getModRefInfo(I, OptLoc, AAQIP); |
775 | } |
776 | |
777 | /// A convenience wrapper for constructing the memory location. |
778 | ModRefInfo getModRefInfo(const Instruction *I, const Value *P, |
779 | LocationSize Size) { |
780 | return getModRefInfo(I, MemoryLocation(P, Size)); |
781 | } |
782 | |
783 | /// Return information about whether a call and an instruction may refer to |
784 | /// the same memory locations. |
785 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call); |
786 | |
787 | /// Return information about whether two call sites may refer to the same set |
788 | /// of memory locations. See the AA documentation for details: |
789 | /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo |
790 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); |
791 | |
792 | /// Return information about whether a particular call site modifies |
793 | /// or reads the specified memory location \p MemLoc before instruction \p I |
794 | /// in a BasicBlock. |
795 | /// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being |
796 | /// set. |
797 | ModRefInfo callCapturesBefore(const Instruction *I, |
798 | const MemoryLocation &MemLoc, |
799 | DominatorTree *DT) { |
800 | AAQueryInfo AAQIP; |
801 | return callCapturesBefore(I, MemLoc, DT, AAQIP); |
802 | } |
803 | |
804 | /// A convenience wrapper to synthesize a memory location. |
805 | ModRefInfo callCapturesBefore(const Instruction *I, const Value *P, |
806 | LocationSize Size, DominatorTree *DT) { |
807 | return callCapturesBefore(I, MemoryLocation(P, Size), DT); |
808 | } |
809 | |
810 | /// @} |
811 | //===--------------------------------------------------------------------===// |
812 | /// \name Higher level methods for querying mod/ref information. |
813 | /// @{ |
814 | |
815 | /// Check if it is possible for execution of the specified basic block to |
816 | /// modify the location Loc. |
817 | bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc); |
818 | |
819 | /// A convenience wrapper synthesizing a memory location. |
820 | bool canBasicBlockModify(const BasicBlock &BB, const Value *P, |
821 | LocationSize Size) { |
822 | return canBasicBlockModify(BB, MemoryLocation(P, Size)); |
823 | } |
824 | |
825 | /// Check if it is possible for the execution of the specified instructions |
826 | /// to mod\ref (according to the mode) the location Loc. |
827 | /// |
828 | /// The instructions to consider are all of the instructions in the range of |
829 | /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block. |
830 | bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2, |
831 | const MemoryLocation &Loc, |
832 | const ModRefInfo Mode); |
833 | |
834 | /// A convenience wrapper synthesizing a memory location. |
835 | bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2, |
836 | const Value *Ptr, LocationSize Size, |
837 | const ModRefInfo Mode) { |
838 | return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode); |
839 | } |
840 | |
841 | private: |
842 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
843 | AAQueryInfo &AAQI); |
844 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
845 | bool OrLocal = false); |
846 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2, |
847 | AAQueryInfo &AAQIP); |
848 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
849 | AAQueryInfo &AAQI); |
850 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
851 | AAQueryInfo &AAQI); |
852 | ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc, |
853 | AAQueryInfo &AAQI); |
854 | ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc, |
855 | AAQueryInfo &AAQI); |
856 | ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc, |
857 | AAQueryInfo &AAQI); |
858 | ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc, |
859 | AAQueryInfo &AAQI); |
860 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, |
861 | const MemoryLocation &Loc, AAQueryInfo &AAQI); |
862 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc, |
863 | AAQueryInfo &AAQI); |
864 | ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc, |
865 | AAQueryInfo &AAQI); |
866 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc, |
867 | AAQueryInfo &AAQI); |
868 | ModRefInfo getModRefInfo(const Instruction *I, |
869 | const Optional<MemoryLocation> &OptLoc, |
870 | AAQueryInfo &AAQIP); |
871 | ModRefInfo callCapturesBefore(const Instruction *I, |
872 | const MemoryLocation &MemLoc, DominatorTree *DT, |
873 | AAQueryInfo &AAQIP); |
874 | |
875 | class Concept; |
876 | |
877 | template <typename T> class Model; |
878 | |
879 | template <typename T> friend class AAResultBase; |
880 | |
881 | const TargetLibraryInfo &TLI; |
882 | |
883 | std::vector<std::unique_ptr<Concept>> AAs; |
884 | |
885 | std::vector<AnalysisKey *> AADeps; |
886 | |
887 | friend class BatchAAResults; |
888 | }; |
889 | |
890 | /// This class is a wrapper over an AAResults, and it is intended to be used |
891 | /// only when there are no IR changes inbetween queries. BatchAAResults is |
892 | /// reusing the same `AAQueryInfo` to preserve the state across queries, |
893 | /// esentially making AA work in "batch mode". The internal state cannot be |
894 | /// cleared, so to go "out-of-batch-mode", the user must either use AAResults, |
895 | /// or create a new BatchAAResults. |
896 | class BatchAAResults { |
897 | AAResults &AA; |
898 | AAQueryInfo AAQI; |
899 | |
900 | public: |
901 | BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {} |
902 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
903 | return AA.alias(LocA, LocB, AAQI); |
904 | } |
905 | bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) { |
906 | return AA.pointsToConstantMemory(Loc, AAQI, OrLocal); |
907 | } |
908 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { |
909 | return AA.getModRefInfo(Call, Loc, AAQI); |
910 | } |
911 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) { |
912 | return AA.getModRefInfo(Call1, Call2, AAQI); |
913 | } |
914 | ModRefInfo getModRefInfo(const Instruction *I, |
915 | const Optional<MemoryLocation> &OptLoc) { |
916 | return AA.getModRefInfo(I, OptLoc, AAQI); |
917 | } |
918 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2) { |
919 | return AA.getModRefInfo(I, Call2, AAQI); |
920 | } |
921 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
922 | return AA.getArgModRefInfo(Call, ArgIdx); |
923 | } |
924 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
925 | return AA.getModRefBehavior(Call); |
926 | } |
927 | bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
928 | return alias(LocA, LocB) == AliasResult::MustAlias; |
929 | } |
930 | bool isMustAlias(const Value *V1, const Value *V2) { |
931 | return alias(MemoryLocation(V1, LocationSize::precise(1)), |
932 | MemoryLocation(V2, LocationSize::precise(1))) == |
933 | AliasResult::MustAlias; |
934 | } |
935 | ModRefInfo callCapturesBefore(const Instruction *I, |
936 | const MemoryLocation &MemLoc, |
937 | DominatorTree *DT) { |
938 | return AA.callCapturesBefore(I, MemLoc, DT, AAQI); |
939 | } |
940 | }; |
941 | |
942 | /// Temporary typedef for legacy code that uses a generic \c AliasAnalysis |
943 | /// pointer or reference. |
944 | using AliasAnalysis = AAResults; |
945 | |
946 | /// A private abstract base class describing the concept of an individual alias |
947 | /// analysis implementation. |
948 | /// |
949 | /// This interface is implemented by any \c Model instantiation. It is also the |
950 | /// interface which a type used to instantiate the model must provide. |
951 | /// |
952 | /// All of these methods model methods by the same name in the \c |
953 | /// AAResults class. Only differences and specifics to how the |
954 | /// implementations are called are documented here. |
955 | class AAResults::Concept { |
956 | public: |
957 | virtual ~Concept() = 0; |
958 | |
959 | /// An update API used internally by the AAResults to provide |
960 | /// a handle back to the top level aggregation. |
961 | virtual void setAAResults(AAResults *NewAAR) = 0; |
962 | |
963 | //===--------------------------------------------------------------------===// |
964 | /// \name Alias Queries |
965 | /// @{ |
966 | |
967 | /// The main low level interface to the alias analysis implementation. |
968 | /// Returns an AliasResult indicating whether the two pointers are aliased to |
969 | /// each other. This is the interface that must be implemented by specific |
970 | /// alias analysis implementations. |
971 | virtual AliasResult alias(const MemoryLocation &LocA, |
972 | const MemoryLocation &LocB, AAQueryInfo &AAQI) = 0; |
973 | |
974 | /// Checks whether the given location points to constant memory, or if |
975 | /// \p OrLocal is true whether it points to a local alloca. |
976 | virtual bool pointsToConstantMemory(const MemoryLocation &Loc, |
977 | AAQueryInfo &AAQI, bool OrLocal) = 0; |
978 | |
979 | /// @} |
980 | //===--------------------------------------------------------------------===// |
981 | /// \name Simple mod/ref information |
982 | /// @{ |
983 | |
984 | /// Get the ModRef info associated with a pointer argument of a callsite. The |
985 | /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note |
986 | /// that these bits do not necessarily account for the overall behavior of |
987 | /// the function, but rather only provide additional per-argument |
988 | /// information. |
989 | virtual ModRefInfo getArgModRefInfo(const CallBase *Call, |
990 | unsigned ArgIdx) = 0; |
991 | |
992 | /// Return the behavior of the given call site. |
993 | virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0; |
994 | |
995 | /// Return the behavior when calling the given function. |
996 | virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0; |
997 | |
998 | /// getModRefInfo (for call sites) - Return information about whether |
999 | /// a particular call site modifies or reads the specified memory location. |
1000 | virtual ModRefInfo getModRefInfo(const CallBase *Call, |
1001 | const MemoryLocation &Loc, |
1002 | AAQueryInfo &AAQI) = 0; |
1003 | |
1004 | /// Return information about whether two call sites may refer to the same set |
1005 | /// of memory locations. See the AA documentation for details: |
1006 | /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo |
1007 | virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1008 | AAQueryInfo &AAQI) = 0; |
1009 | |
1010 | /// @} |
1011 | }; |
1012 | |
1013 | /// A private class template which derives from \c Concept and wraps some other |
1014 | /// type. |
1015 | /// |
1016 | /// This models the concept by directly forwarding each interface point to the |
1017 | /// wrapped type which must implement a compatible interface. This provides |
1018 | /// a type erased binding. |
1019 | template <typename AAResultT> class AAResults::Model final : public Concept { |
1020 | AAResultT &Result; |
1021 | |
1022 | public: |
1023 | explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) { |
1024 | Result.setAAResults(&AAR); |
1025 | } |
1026 | ~Model() override = default; |
1027 | |
1028 | void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); } |
1029 | |
1030 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
1031 | AAQueryInfo &AAQI) override { |
1032 | return Result.alias(LocA, LocB, AAQI); |
1033 | } |
1034 | |
1035 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
1036 | bool OrLocal) override { |
1037 | return Result.pointsToConstantMemory(Loc, AAQI, OrLocal); |
1038 | } |
1039 | |
1040 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override { |
1041 | return Result.getArgModRefInfo(Call, ArgIdx); |
1042 | } |
1043 | |
1044 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override { |
1045 | return Result.getModRefBehavior(Call); |
1046 | } |
1047 | |
1048 | FunctionModRefBehavior getModRefBehavior(const Function *F) override { |
1049 | return Result.getModRefBehavior(F); |
1050 | } |
1051 | |
1052 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
1053 | AAQueryInfo &AAQI) override { |
1054 | return Result.getModRefInfo(Call, Loc, AAQI); |
1055 | } |
1056 | |
1057 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1058 | AAQueryInfo &AAQI) override { |
1059 | return Result.getModRefInfo(Call1, Call2, AAQI); |
1060 | } |
1061 | }; |
1062 | |
1063 | /// A CRTP-driven "mixin" base class to help implement the function alias |
1064 | /// analysis results concept. |
1065 | /// |
1066 | /// Because of the nature of many alias analysis implementations, they often |
1067 | /// only implement a subset of the interface. This base class will attempt to |
1068 | /// implement the remaining portions of the interface in terms of simpler forms |
1069 | /// of the interface where possible, and otherwise provide conservatively |
1070 | /// correct fallback implementations. |
1071 | /// |
1072 | /// Implementors of an alias analysis should derive from this CRTP, and then |
1073 | /// override specific methods that they wish to customize. There is no need to |
1074 | /// use virtual anywhere, the CRTP base class does static dispatch to the |
1075 | /// derived type passed into it. |
1076 | template <typename DerivedT> class AAResultBase { |
1077 | // Expose some parts of the interface only to the AAResults::Model |
1078 | // for wrapping. Specifically, this allows the model to call our |
1079 | // setAAResults method without exposing it as a fully public API. |
1080 | friend class AAResults::Model<DerivedT>; |
1081 | |
1082 | /// A pointer to the AAResults object that this AAResult is |
1083 | /// aggregated within. May be null if not aggregated. |
1084 | AAResults *AAR = nullptr; |
1085 | |
1086 | /// Helper to dispatch calls back through the derived type. |
1087 | DerivedT &derived() { return static_cast<DerivedT &>(*this); } |
1088 | |
1089 | /// A setter for the AAResults pointer, which is used to satisfy the |
1090 | /// AAResults::Model contract. |
1091 | void setAAResults(AAResults *NewAAR) { AAR = NewAAR; } |
1092 | |
1093 | protected: |
1094 | /// This proxy class models a common pattern where we delegate to either the |
1095 | /// top-level \c AAResults aggregation if one is registered, or to the |
1096 | /// current result if none are registered. |
1097 | class AAResultsProxy { |
1098 | AAResults *AAR; |
1099 | DerivedT &CurrentResult; |
1100 | |
1101 | public: |
1102 | AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult) |
1103 | : AAR(AAR), CurrentResult(CurrentResult) {} |
1104 | |
1105 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
1106 | AAQueryInfo &AAQI) { |
1107 | return AAR ? AAR->alias(LocA, LocB, AAQI) |
1108 | : CurrentResult.alias(LocA, LocB, AAQI); |
1109 | } |
1110 | |
1111 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
1112 | bool OrLocal) { |
1113 | return AAR ? AAR->pointsToConstantMemory(Loc, AAQI, OrLocal) |
1114 | : CurrentResult.pointsToConstantMemory(Loc, AAQI, OrLocal); |
1115 | } |
1116 | |
1117 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
1118 | return AAR ? AAR->getArgModRefInfo(Call, ArgIdx) |
1119 | : CurrentResult.getArgModRefInfo(Call, ArgIdx); |
1120 | } |
1121 | |
1122 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
1123 | return AAR ? AAR->getModRefBehavior(Call) |
1124 | : CurrentResult.getModRefBehavior(Call); |
1125 | } |
1126 | |
1127 | FunctionModRefBehavior getModRefBehavior(const Function *F) { |
1128 | return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F); |
1129 | } |
1130 | |
1131 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
1132 | AAQueryInfo &AAQI) { |
1133 | return AAR ? AAR->getModRefInfo(Call, Loc, AAQI) |
1134 | : CurrentResult.getModRefInfo(Call, Loc, AAQI); |
1135 | } |
1136 | |
1137 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1138 | AAQueryInfo &AAQI) { |
1139 | return AAR ? AAR->getModRefInfo(Call1, Call2, AAQI) |
1140 | : CurrentResult.getModRefInfo(Call1, Call2, AAQI); |
1141 | } |
1142 | }; |
1143 | |
1144 | explicit AAResultBase() = default; |
1145 | |
1146 | // Provide all the copy and move constructors so that derived types aren't |
1147 | // constrained. |
1148 | AAResultBase(const AAResultBase &Arg) {} |
1149 | AAResultBase(AAResultBase &&Arg) {} |
1150 | |
1151 | /// Get a proxy for the best AA result set to query at this time. |
1152 | /// |
1153 | /// When this result is part of a larger aggregation, this will proxy to that |
1154 | /// aggregation. When this result is used in isolation, it will just delegate |
1155 | /// back to the derived class's implementation. |
1156 | /// |
1157 | /// Note that callers of this need to take considerable care to not cause |
1158 | /// performance problems when they use this routine, in the case of a large |
1159 | /// number of alias analyses being aggregated, it can be expensive to walk |
1160 | /// back across the chain. |
1161 | AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); } |
1162 | |
1163 | public: |
1164 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
1165 | AAQueryInfo &AAQI) { |
1166 | return AliasResult::MayAlias; |
1167 | } |
1168 | |
1169 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
1170 | bool OrLocal) { |
1171 | return false; |
1172 | } |
1173 | |
1174 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
1175 | return ModRefInfo::ModRef; |
1176 | } |
1177 | |
1178 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
1179 | return FMRB_UnknownModRefBehavior; |
1180 | } |
1181 | |
1182 | FunctionModRefBehavior getModRefBehavior(const Function *F) { |
1183 | return FMRB_UnknownModRefBehavior; |
1184 | } |
1185 | |
1186 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
1187 | AAQueryInfo &AAQI) { |
1188 | return ModRefInfo::ModRef; |
1189 | } |
1190 | |
1191 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1192 | AAQueryInfo &AAQI) { |
1193 | return ModRefInfo::ModRef; |
1194 | } |
1195 | }; |
1196 | |
1197 | /// Return true if this pointer is returned by a noalias function. |
1198 | bool isNoAliasCall(const Value *V); |
1199 | |
1200 | /// Return true if this pointer refers to a distinct and identifiable object. |
1201 | /// This returns true for: |
1202 | /// Global Variables and Functions (but not Global Aliases) |
1203 | /// Allocas |
1204 | /// ByVal and NoAlias Arguments |
1205 | /// NoAlias returns (e.g. calls to malloc) |
1206 | /// |
1207 | bool isIdentifiedObject(const Value *V); |
1208 | |
1209 | /// Return true if V is umabigously identified at the function-level. |
1210 | /// Different IdentifiedFunctionLocals can't alias. |
1211 | /// Further, an IdentifiedFunctionLocal can not alias with any function |
1212 | /// arguments other than itself, which is not necessarily true for |
1213 | /// IdentifiedObjects. |
1214 | bool isIdentifiedFunctionLocal(const Value *V); |
1215 | |
1216 | /// A manager for alias analyses. |
1217 | /// |
1218 | /// This class can have analyses registered with it and when run, it will run |
1219 | /// all of them and aggregate their results into single AA results interface |
1220 | /// that dispatches across all of the alias analysis results available. |
1221 | /// |
1222 | /// Note that the order in which analyses are registered is very significant. |
1223 | /// That is the order in which the results will be aggregated and queried. |
1224 | /// |
1225 | /// This manager effectively wraps the AnalysisManager for registering alias |
1226 | /// analyses. When you register your alias analysis with this manager, it will |
1227 | /// ensure the analysis itself is registered with its AnalysisManager. |
1228 | /// |
1229 | /// The result of this analysis is only invalidated if one of the particular |
1230 | /// aggregated AA results end up being invalidated. This removes the need to |
1231 | /// explicitly preserve the results of `AAManager`. Note that analyses should no |
1232 | /// longer be registered once the `AAManager` is run. |
1233 | class AAManager : public AnalysisInfoMixin<AAManager> { |
1234 | public: |
1235 | using Result = AAResults; |
1236 | |
1237 | /// Register a specific AA result. |
1238 | template <typename AnalysisT> void registerFunctionAnalysis() { |
1239 | ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>); |
1240 | } |
1241 | |
1242 | /// Register a specific AA result. |
1243 | template <typename AnalysisT> void registerModuleAnalysis() { |
1244 | ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>); |
1245 | } |
1246 | |
1247 | Result run(Function &F, FunctionAnalysisManager &AM); |
1248 | |
1249 | private: |
1250 | friend AnalysisInfoMixin<AAManager>; |
1251 | |
1252 | static AnalysisKey Key; |
1253 | |
1254 | SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM, |
1255 | AAResults &AAResults), |
1256 | 4> ResultGetters; |
1257 | |
1258 | template <typename AnalysisT> |
1259 | static void getFunctionAAResultImpl(Function &F, |
1260 | FunctionAnalysisManager &AM, |
1261 | AAResults &AAResults) { |
1262 | AAResults.addAAResult(AM.template getResult<AnalysisT>(F)); |
1263 | AAResults.addAADependencyID(AnalysisT::ID()); |
1264 | } |
1265 | |
1266 | template <typename AnalysisT> |
1267 | static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM, |
1268 | AAResults &AAResults) { |
1269 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); |
1270 | if (auto *R = |
1271 | MAMProxy.template getCachedResult<AnalysisT>(*F.getParent())) { |
1272 | AAResults.addAAResult(*R); |
1273 | MAMProxy |
1274 | .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>(); |
1275 | } |
1276 | } |
1277 | }; |
1278 | |
1279 | /// A wrapper pass to provide the legacy pass manager access to a suitably |
1280 | /// prepared AAResults object. |
1281 | class AAResultsWrapperPass : public FunctionPass { |
1282 | std::unique_ptr<AAResults> AAR; |
1283 | |
1284 | public: |
1285 | static char ID; |
1286 | |
1287 | AAResultsWrapperPass(); |
1288 | |
1289 | AAResults &getAAResults() { return *AAR; } |
1290 | const AAResults &getAAResults() const { return *AAR; } |
1291 | |
1292 | bool runOnFunction(Function &F) override; |
1293 | |
1294 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
1295 | }; |
1296 | |
1297 | /// A wrapper pass for external alias analyses. This just squirrels away the |
1298 | /// callback used to run any analyses and register their results. |
1299 | struct ExternalAAWrapperPass : ImmutablePass { |
1300 | using CallbackT = std::function<void(Pass &, Function &, AAResults &)>; |
1301 | |
1302 | CallbackT CB; |
1303 | |
1304 | static char ID; |
1305 | |
1306 | ExternalAAWrapperPass(); |
1307 | |
1308 | explicit ExternalAAWrapperPass(CallbackT CB); |
1309 | |
1310 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
1311 | AU.setPreservesAll(); |
1312 | } |
1313 | }; |
1314 | |
1315 | FunctionPass *createAAResultsWrapperPass(); |
1316 | |
1317 | /// A wrapper pass around a callback which can be used to populate the |
1318 | /// AAResults in the AAResultsWrapperPass from an external AA. |
1319 | /// |
1320 | /// The callback provided here will be used each time we prepare an AAResults |
1321 | /// object, and will receive a reference to the function wrapper pass, the |
1322 | /// function, and the AAResults object to populate. This should be used when |
1323 | /// setting up a custom pass pipeline to inject a hook into the AA results. |
1324 | ImmutablePass *createExternalAAWrapperPass( |
1325 | std::function<void(Pass &, Function &, AAResults &)> Callback); |
1326 | |
1327 | /// A helper for the legacy pass manager to create a \c AAResults |
1328 | /// object populated to the best of our ability for a particular function when |
1329 | /// inside of a \c ModulePass or a \c CallGraphSCCPass. |
1330 | /// |
1331 | /// If a \c ModulePass or a \c CallGraphSCCPass calls \p |
1332 | /// createLegacyPMAAResults, it also needs to call \p addUsedAAAnalyses in \p |
1333 | /// getAnalysisUsage. |
1334 | AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR); |
1335 | |
1336 | /// A helper for the legacy pass manager to populate \p AU to add uses to make |
1337 | /// sure the analyses required by \p createLegacyPMAAResults are available. |
1338 | void getAAResultsAnalysisUsage(AnalysisUsage &AU); |
1339 | |
1340 | } // end namespace llvm |
1341 | |
1342 | #endif // LLVM_ANALYSIS_ALIASANALYSIS_H |