File: | llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp |
Warning: | line 1976, column 19 Value stored to 'Current' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // The code below implements dead store elimination using MemorySSA. It uses |
10 | // the following general approach: given a MemoryDef, walk upwards to find |
11 | // clobbering MemoryDefs that may be killed by the starting def. Then check |
12 | // that there are no uses that may read the location of the original MemoryDef |
13 | // in between both MemoryDefs. A bit more concretely: |
14 | // |
15 | // For all MemoryDefs StartDef: |
16 | // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking |
17 | // upwards. |
18 | // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by |
19 | // checking all uses starting at MaybeDeadAccess and walking until we see |
20 | // StartDef. |
21 | // 3. For each found CurrentDef, check that: |
22 | // 1. There are no barrier instructions between CurrentDef and StartDef (like |
23 | // throws or stores with ordering constraints). |
24 | // 2. StartDef is executed whenever CurrentDef is executed. |
25 | // 3. StartDef completely overwrites CurrentDef. |
26 | // 4. Erase CurrentDef from the function and MemorySSA. |
27 | // |
28 | //===----------------------------------------------------------------------===// |
29 | |
30 | #include "llvm/Transforms/Scalar/DeadStoreElimination.h" |
31 | #include "llvm/ADT/APInt.h" |
32 | #include "llvm/ADT/DenseMap.h" |
33 | #include "llvm/ADT/MapVector.h" |
34 | #include "llvm/ADT/PostOrderIterator.h" |
35 | #include "llvm/ADT/SetVector.h" |
36 | #include "llvm/ADT/SmallPtrSet.h" |
37 | #include "llvm/ADT/SmallVector.h" |
38 | #include "llvm/ADT/Statistic.h" |
39 | #include "llvm/ADT/StringRef.h" |
40 | #include "llvm/Analysis/AliasAnalysis.h" |
41 | #include "llvm/Analysis/CaptureTracking.h" |
42 | #include "llvm/Analysis/GlobalsModRef.h" |
43 | #include "llvm/Analysis/LoopInfo.h" |
44 | #include "llvm/Analysis/MemoryBuiltins.h" |
45 | #include "llvm/Analysis/MemoryLocation.h" |
46 | #include "llvm/Analysis/MemorySSA.h" |
47 | #include "llvm/Analysis/MemorySSAUpdater.h" |
48 | #include "llvm/Analysis/MustExecute.h" |
49 | #include "llvm/Analysis/PostDominators.h" |
50 | #include "llvm/Analysis/TargetLibraryInfo.h" |
51 | #include "llvm/Analysis/ValueTracking.h" |
52 | #include "llvm/IR/Argument.h" |
53 | #include "llvm/IR/BasicBlock.h" |
54 | #include "llvm/IR/Constant.h" |
55 | #include "llvm/IR/Constants.h" |
56 | #include "llvm/IR/DataLayout.h" |
57 | #include "llvm/IR/Dominators.h" |
58 | #include "llvm/IR/Function.h" |
59 | #include "llvm/IR/IRBuilder.h" |
60 | #include "llvm/IR/InstIterator.h" |
61 | #include "llvm/IR/InstrTypes.h" |
62 | #include "llvm/IR/Instruction.h" |
63 | #include "llvm/IR/Instructions.h" |
64 | #include "llvm/IR/IntrinsicInst.h" |
65 | #include "llvm/IR/Intrinsics.h" |
66 | #include "llvm/IR/LLVMContext.h" |
67 | #include "llvm/IR/Module.h" |
68 | #include "llvm/IR/PassManager.h" |
69 | #include "llvm/IR/PatternMatch.h" |
70 | #include "llvm/IR/Value.h" |
71 | #include "llvm/InitializePasses.h" |
72 | #include "llvm/Pass.h" |
73 | #include "llvm/Support/Casting.h" |
74 | #include "llvm/Support/CommandLine.h" |
75 | #include "llvm/Support/Debug.h" |
76 | #include "llvm/Support/DebugCounter.h" |
77 | #include "llvm/Support/ErrorHandling.h" |
78 | #include "llvm/Support/MathExtras.h" |
79 | #include "llvm/Support/raw_ostream.h" |
80 | #include "llvm/Transforms/Scalar.h" |
81 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" |
82 | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
83 | #include "llvm/Transforms/Utils/Local.h" |
84 | #include <algorithm> |
85 | #include <cassert> |
86 | #include <cstddef> |
87 | #include <cstdint> |
88 | #include <iterator> |
89 | #include <map> |
90 | #include <utility> |
91 | |
92 | using namespace llvm; |
93 | using namespace PatternMatch; |
94 | |
95 | #define DEBUG_TYPE"dse" "dse" |
96 | |
97 | STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores" , "Number of stores remaining after DSE"}; |
98 | STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores" , "Number of redundant stores deleted"}; |
99 | STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores" , "Number of stores deleted"}; |
100 | STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther", "Number of other instrs removed"}; |
101 | STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials" , "Number of stores dead by later partials"}; |
102 | STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores" , "Number of stores modified"}; |
103 | STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks", "Number of stores modified"}; |
104 | STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified" }; |
105 | STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess" , "Number of stores modified"}; |
106 | STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" } |
107 | "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed" , "Number of times a valid candidate is returned from getDomMemoryDef" }; |
108 | STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"} |
109 | "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks" , "Number iterations check for reads in getDomMemoryDef"}; |
110 | |
111 | DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ) |
112 | "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter ("dse-memoryssa", "Controls which MemoryDefs are eliminated." ); |
113 | |
114 | static cl::opt<bool> |
115 | EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", |
116 | cl::init(true), cl::Hidden, |
117 | cl::desc("Enable partial-overwrite tracking in DSE")); |
118 | |
119 | static cl::opt<bool> |
120 | EnablePartialStoreMerging("enable-dse-partial-store-merging", |
121 | cl::init(true), cl::Hidden, |
122 | cl::desc("Enable partial store merging in DSE")); |
123 | |
124 | static cl::opt<unsigned> |
125 | MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, |
126 | cl::desc("The number of memory instructions to scan for " |
127 | "dead store elimination (default = 150)")); |
128 | static cl::opt<unsigned> MemorySSAUpwardsStepLimit( |
129 | "dse-memoryssa-walklimit", cl::init(90), cl::Hidden, |
130 | cl::desc("The maximum number of steps while walking upwards to find " |
131 | "MemoryDefs that may be killed (default = 90)")); |
132 | |
133 | static cl::opt<unsigned> MemorySSAPartialStoreLimit( |
134 | "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, |
135 | cl::desc("The maximum number candidates that only partially overwrite the " |
136 | "killing MemoryDef to consider" |
137 | " (default = 5)")); |
138 | |
139 | static cl::opt<unsigned> MemorySSADefsPerBlockLimit( |
140 | "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, |
141 | cl::desc("The number of MemoryDefs we consider as candidates to eliminated " |
142 | "other stores per basic block (default = 5000)")); |
143 | |
144 | static cl::opt<unsigned> MemorySSASameBBStepCost( |
145 | "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, |
146 | cl::desc( |
147 | "The cost of a step in the same basic block as the killing MemoryDef" |
148 | "(default = 1)")); |
149 | |
150 | static cl::opt<unsigned> |
151 | MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), |
152 | cl::Hidden, |
153 | cl::desc("The cost of a step in a different basic " |
154 | "block than the killing MemoryDef" |
155 | "(default = 5)")); |
156 | |
157 | static cl::opt<unsigned> MemorySSAPathCheckLimit( |
158 | "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, |
159 | cl::desc("The maximum number of blocks to check when trying to prove that " |
160 | "all paths to an exit go through a killing block (default = 50)")); |
161 | |
162 | //===----------------------------------------------------------------------===// |
163 | // Helper functions |
164 | //===----------------------------------------------------------------------===// |
165 | using OverlapIntervalsTy = std::map<int64_t, int64_t>; |
166 | using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; |
167 | |
168 | /// Does this instruction write some memory? This only returns true for things |
169 | /// that we can analyze with other helpers below. |
170 | static bool hasAnalyzableMemoryWrite(Instruction *I, |
171 | const TargetLibraryInfo &TLI) { |
172 | if (isa<StoreInst>(I)) |
173 | return true; |
174 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
175 | switch (II->getIntrinsicID()) { |
176 | default: |
177 | return false; |
178 | case Intrinsic::memset: |
179 | case Intrinsic::memmove: |
180 | case Intrinsic::memcpy: |
181 | case Intrinsic::memcpy_inline: |
182 | case Intrinsic::memcpy_element_unordered_atomic: |
183 | case Intrinsic::memmove_element_unordered_atomic: |
184 | case Intrinsic::memset_element_unordered_atomic: |
185 | case Intrinsic::init_trampoline: |
186 | case Intrinsic::lifetime_end: |
187 | case Intrinsic::masked_store: |
188 | return true; |
189 | } |
190 | } |
191 | if (auto *CB = dyn_cast<CallBase>(I)) { |
192 | LibFunc LF; |
193 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { |
194 | switch (LF) { |
195 | case LibFunc_strcpy: |
196 | case LibFunc_strncpy: |
197 | case LibFunc_strcat: |
198 | case LibFunc_strncat: |
199 | return true; |
200 | default: |
201 | return false; |
202 | } |
203 | } |
204 | } |
205 | return false; |
206 | } |
207 | |
208 | /// Return a Location stored to by the specified instruction. If isRemovable |
209 | /// returns true, this function and getLocForRead completely describe the memory |
210 | /// operations for this instruction. |
211 | static MemoryLocation getLocForWrite(Instruction *Inst, |
212 | const TargetLibraryInfo &TLI) { |
213 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) |
214 | return MemoryLocation::get(SI); |
215 | |
216 | // memcpy/memmove/memset. |
217 | if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) |
218 | return MemoryLocation::getForDest(MI); |
219 | |
220 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
221 | switch (II->getIntrinsicID()) { |
222 | default: |
223 | return MemoryLocation(); // Unhandled intrinsic. |
224 | case Intrinsic::init_trampoline: |
225 | return MemoryLocation::getAfter(II->getArgOperand(0)); |
226 | case Intrinsic::masked_store: |
227 | return MemoryLocation::getForArgument(II, 1, TLI); |
228 | case Intrinsic::lifetime_end: { |
229 | uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); |
230 | return MemoryLocation(II->getArgOperand(1), Len); |
231 | } |
232 | } |
233 | } |
234 | if (auto *CB = dyn_cast<CallBase>(Inst)) |
235 | // All the supported TLI functions so far happen to have dest as their |
236 | // first argument. |
237 | return MemoryLocation::getAfter(CB->getArgOperand(0)); |
238 | return MemoryLocation(); |
239 | } |
240 | |
241 | /// If the value of this instruction and the memory it writes to is unused, may |
242 | /// we delete this instruction? |
243 | static bool isRemovable(Instruction *I) { |
244 | // Don't remove volatile/atomic stores. |
245 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) |
246 | return SI->isUnordered(); |
247 | |
248 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
249 | switch (II->getIntrinsicID()) { |
250 | default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")::llvm::llvm_unreachable_internal("doesn't pass 'hasAnalyzableMemoryWrite' predicate" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 250); |
251 | case Intrinsic::lifetime_end: |
252 | // Never remove dead lifetime_end's, e.g. because it is followed by a |
253 | // free. |
254 | return false; |
255 | case Intrinsic::init_trampoline: |
256 | // Always safe to remove init_trampoline. |
257 | return true; |
258 | case Intrinsic::memset: |
259 | case Intrinsic::memmove: |
260 | case Intrinsic::memcpy: |
261 | case Intrinsic::memcpy_inline: |
262 | // Don't remove volatile memory intrinsics. |
263 | return !cast<MemIntrinsic>(II)->isVolatile(); |
264 | case Intrinsic::memcpy_element_unordered_atomic: |
265 | case Intrinsic::memmove_element_unordered_atomic: |
266 | case Intrinsic::memset_element_unordered_atomic: |
267 | case Intrinsic::masked_store: |
268 | return true; |
269 | } |
270 | } |
271 | |
272 | // note: only get here for calls with analyzable writes - i.e. libcalls |
273 | if (auto *CB = dyn_cast<CallBase>(I)) |
274 | return CB->use_empty(); |
275 | |
276 | return false; |
277 | } |
278 | |
279 | /// Returns true if the end of this instruction can be safely shortened in |
280 | /// length. |
281 | static bool isShortenableAtTheEnd(Instruction *I) { |
282 | // Don't shorten stores for now |
283 | if (isa<StoreInst>(I)) |
284 | return false; |
285 | |
286 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
287 | switch (II->getIntrinsicID()) { |
288 | default: return false; |
289 | case Intrinsic::memset: |
290 | case Intrinsic::memcpy: |
291 | case Intrinsic::memcpy_element_unordered_atomic: |
292 | case Intrinsic::memset_element_unordered_atomic: |
293 | // Do shorten memory intrinsics. |
294 | // FIXME: Add memmove if it's also safe to transform. |
295 | return true; |
296 | } |
297 | } |
298 | |
299 | // Don't shorten libcalls calls for now. |
300 | |
301 | return false; |
302 | } |
303 | |
304 | /// Returns true if the beginning of this instruction can be safely shortened |
305 | /// in length. |
306 | static bool isShortenableAtTheBeginning(Instruction *I) { |
307 | // FIXME: Handle only memset for now. Supporting memcpy/memmove should be |
308 | // easily done by offsetting the source address. |
309 | return isa<AnyMemSetInst>(I); |
310 | } |
311 | |
312 | static uint64_t getPointerSize(const Value *V, const DataLayout &DL, |
313 | const TargetLibraryInfo &TLI, |
314 | const Function *F) { |
315 | uint64_t Size; |
316 | ObjectSizeOpts Opts; |
317 | Opts.NullIsUnknownSize = NullPointerIsDefined(F); |
318 | |
319 | if (getObjectSize(V, Size, DL, &TLI, Opts)) |
320 | return Size; |
321 | return MemoryLocation::UnknownSize; |
322 | } |
323 | |
324 | namespace { |
325 | |
326 | enum OverwriteResult { |
327 | OW_Begin, |
328 | OW_Complete, |
329 | OW_End, |
330 | OW_PartialEarlierWithFullLater, |
331 | OW_MaybePartial, |
332 | OW_Unknown |
333 | }; |
334 | |
335 | } // end anonymous namespace |
336 | |
337 | /// Check if two instruction are masked stores that completely |
338 | /// overwrite one another. More specifically, \p KillingI has to |
339 | /// overwrite \p DeadI. |
340 | static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, |
341 | const Instruction *DeadI, |
342 | BatchAAResults &AA) { |
343 | const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI); |
344 | const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI); |
345 | if (KillingII == nullptr || DeadII == nullptr) |
346 | return OW_Unknown; |
347 | if (KillingII->getIntrinsicID() != Intrinsic::masked_store || |
348 | DeadII->getIntrinsicID() != Intrinsic::masked_store) |
349 | return OW_Unknown; |
350 | // Pointers. |
351 | Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts(); |
352 | Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts(); |
353 | if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr)) |
354 | return OW_Unknown; |
355 | // Masks. |
356 | // TODO: check that KillingII's mask is a superset of the DeadII's mask. |
357 | if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3)) |
358 | return OW_Unknown; |
359 | return OW_Complete; |
360 | } |
361 | |
362 | /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely |
363 | /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the |
364 | /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin' |
365 | /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'. |
366 | /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was |
367 | /// overwritten by a killing (smaller) store which doesn't write outside the big |
368 | /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. |
369 | /// NOTE: This function must only be called if both \p KillingLoc and \p |
370 | /// DeadLoc belong to the same underlying object with valid \p KillingOff and |
371 | /// \p DeadOff. |
372 | static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, |
373 | const MemoryLocation &DeadLoc, |
374 | int64_t KillingOff, int64_t DeadOff, |
375 | Instruction *DeadI, |
376 | InstOverlapIntervalsTy &IOL) { |
377 | const uint64_t KillingSize = KillingLoc.Size.getValue(); |
378 | const uint64_t DeadSize = DeadLoc.Size.getValue(); |
379 | // We may now overlap, although the overlap is not complete. There might also |
380 | // be other incomplete overlaps, and together, they might cover the complete |
381 | // dead store. |
382 | // Note: The correctness of this logic depends on the fact that this function |
383 | // is not even called providing DepWrite when there are any intervening reads. |
384 | if (EnablePartialOverwriteTracking && |
385 | KillingOff < int64_t(DeadOff + DeadSize) && |
386 | int64_t(KillingOff + KillingSize) >= DeadOff) { |
387 | |
388 | // Insert our part of the overlap into the map. |
389 | auto &IM = IOL[DeadI]; |
390 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") KillingLoc [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n"; } } while (false) |
391 | << int64_t(DeadOff + DeadSize) << ") KillingLoc ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") KillingLoc [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n"; } } while (false) |
392 | << KillingOff << ", " << int64_t(KillingOff + KillingSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") KillingLoc [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n"; } } while (false) |
393 | << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") KillingLoc [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n"; } } while (false); |
394 | |
395 | // Make sure that we only insert non-overlapping intervals and combine |
396 | // adjacent intervals. The intervals are stored in the map with the ending |
397 | // offset as the key (in the half-open sense) and the starting offset as |
398 | // the value. |
399 | int64_t KillingIntStart = KillingOff; |
400 | int64_t KillingIntEnd = KillingOff + KillingSize; |
401 | |
402 | // Find any intervals ending at, or after, KillingIntStart which start |
403 | // before KillingIntEnd. |
404 | auto ILI = IM.lower_bound(KillingIntStart); |
405 | if (ILI != IM.end() && ILI->second <= KillingIntEnd) { |
406 | // This existing interval is overlapped with the current store somewhere |
407 | // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing |
408 | // intervals and adjusting our start and end. |
409 | KillingIntStart = std::min(KillingIntStart, ILI->second); |
410 | KillingIntEnd = std::max(KillingIntEnd, ILI->first); |
411 | ILI = IM.erase(ILI); |
412 | |
413 | // Continue erasing and adjusting our end in case other previous |
414 | // intervals are also overlapped with the current store. |
415 | // |
416 | // |--- dead 1 ---| |--- dead 2 ---| |
417 | // |------- killing---------| |
418 | // |
419 | while (ILI != IM.end() && ILI->second <= KillingIntEnd) { |
420 | assert(ILI->second > KillingIntStart && "Unexpected interval")(static_cast <bool> (ILI->second > KillingIntStart && "Unexpected interval") ? void (0) : __assert_fail ("ILI->second > KillingIntStart && \"Unexpected interval\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 420, __extension__ __PRETTY_FUNCTION__)); |
421 | KillingIntEnd = std::max(KillingIntEnd, ILI->first); |
422 | ILI = IM.erase(ILI); |
423 | } |
424 | } |
425 | |
426 | IM[KillingIntEnd] = KillingIntStart; |
427 | |
428 | ILI = IM.begin(); |
429 | if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) { |
430 | LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Full overwrite from partials: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") Composite KillingLoc [" << ILI->second << ", " << ILI->first << ")\n"; } } while (false) |
431 | << DeadOff << ", " << int64_t(DeadOff + DeadSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Full overwrite from partials: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") Composite KillingLoc [" << ILI->second << ", " << ILI->first << ")\n"; } } while (false) |
432 | << ") Composite KillingLoc [" << ILI->second << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Full overwrite from partials: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") Composite KillingLoc [" << ILI->second << ", " << ILI->first << ")\n"; } } while (false) |
433 | << ILI->first << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Full overwrite from partials: DeadLoc [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") Composite KillingLoc [" << ILI->second << ", " << ILI->first << ")\n"; } } while (false); |
434 | ++NumCompletePartials; |
435 | return OW_Complete; |
436 | } |
437 | } |
438 | |
439 | // Check for a dead store which writes to all the memory locations that |
440 | // the killing store writes to. |
441 | if (EnablePartialStoreMerging && KillingOff >= DeadOff && |
442 | int64_t(DeadOff + DeadSize) > KillingOff && |
443 | uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) { |
444 | LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOffdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") by a killing store [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n" ; } } while (false) |
445 | << ", " << int64_t(DeadOff + DeadSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") by a killing store [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n" ; } } while (false) |
446 | << ") by a killing store [" << KillingOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") by a killing store [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n" ; } } while (false) |
447 | << int64_t(KillingOff + KillingSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff << ", " << int64_t(DeadOff + DeadSize ) << ") by a killing store [" << KillingOff << ", " << int64_t(KillingOff + KillingSize) << ")\n" ; } } while (false); |
448 | // TODO: Maybe come up with a better name? |
449 | return OW_PartialEarlierWithFullLater; |
450 | } |
451 | |
452 | // Another interesting case is if the killing store overwrites the end of the |
453 | // dead store. |
454 | // |
455 | // |--dead--| |
456 | // |-- killing --| |
457 | // |
458 | // In this case we may want to trim the size of dead store to avoid |
459 | // generating stores to addresses which will definitely be overwritten killing |
460 | // store. |
461 | if (!EnablePartialOverwriteTracking && |
462 | (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) && |
463 | int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize))) |
464 | return OW_End; |
465 | |
466 | // Finally, we also need to check if the killing store overwrites the |
467 | // beginning of the dead store. |
468 | // |
469 | // |--dead--| |
470 | // |-- killing --| |
471 | // |
472 | // In this case we may want to move the destination address and trim the size |
473 | // of dead store to avoid generating stores to addresses which will definitely |
474 | // be overwritten killing store. |
475 | if (!EnablePartialOverwriteTracking && |
476 | (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) { |
477 | assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&(static_cast <bool> (int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && "Expect to be handled as OW_Complete" ) ? void (0) : __assert_fail ("int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && \"Expect to be handled as OW_Complete\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 478, __extension__ __PRETTY_FUNCTION__)) |
478 | "Expect to be handled as OW_Complete")(static_cast <bool> (int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && "Expect to be handled as OW_Complete" ) ? void (0) : __assert_fail ("int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) && \"Expect to be handled as OW_Complete\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 478, __extension__ __PRETTY_FUNCTION__)); |
479 | return OW_Begin; |
480 | } |
481 | // Otherwise, they don't completely overlap. |
482 | return OW_Unknown; |
483 | } |
484 | |
485 | /// Returns true if the memory which is accessed by the second instruction is not |
486 | /// modified between the first and the second instruction. |
487 | /// Precondition: Second instruction must be dominated by the first |
488 | /// instruction. |
489 | static bool |
490 | memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, |
491 | BatchAAResults &AA, const DataLayout &DL, |
492 | DominatorTree *DT) { |
493 | // Do a backwards scan through the CFG from SecondI to FirstI. Look for |
494 | // instructions which can modify the memory location accessed by SecondI. |
495 | // |
496 | // While doing the walk keep track of the address to check. It might be |
497 | // different in different basic blocks due to PHI translation. |
498 | using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>; |
499 | SmallVector<BlockAddressPair, 16> WorkList; |
500 | // Keep track of the address we visited each block with. Bail out if we |
501 | // visit a block with different addresses. |
502 | DenseMap<BasicBlock *, Value *> Visited; |
503 | |
504 | BasicBlock::iterator FirstBBI(FirstI); |
505 | ++FirstBBI; |
506 | BasicBlock::iterator SecondBBI(SecondI); |
507 | BasicBlock *FirstBB = FirstI->getParent(); |
508 | BasicBlock *SecondBB = SecondI->getParent(); |
509 | MemoryLocation MemLoc; |
510 | if (auto *MemSet = dyn_cast<MemSetInst>(SecondI)) |
511 | MemLoc = MemoryLocation::getForDest(MemSet); |
512 | else |
513 | MemLoc = MemoryLocation::get(SecondI); |
514 | |
515 | auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr); |
516 | |
517 | // Start checking the SecondBB. |
518 | WorkList.push_back( |
519 | std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr))); |
520 | bool isFirstBlock = true; |
521 | |
522 | // Check all blocks going backward until we reach the FirstBB. |
523 | while (!WorkList.empty()) { |
524 | BlockAddressPair Current = WorkList.pop_back_val(); |
525 | BasicBlock *B = Current.first; |
526 | PHITransAddr &Addr = Current.second; |
527 | Value *Ptr = Addr.getAddr(); |
528 | |
529 | // Ignore instructions before FirstI if this is the FirstBB. |
530 | BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); |
531 | |
532 | BasicBlock::iterator EI; |
533 | if (isFirstBlock) { |
534 | // Ignore instructions after SecondI if this is the first visit of SecondBB. |
535 | assert(B == SecondBB && "first block is not the store block")(static_cast <bool> (B == SecondBB && "first block is not the store block" ) ? void (0) : __assert_fail ("B == SecondBB && \"first block is not the store block\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 535, __extension__ __PRETTY_FUNCTION__)); |
536 | EI = SecondBBI; |
537 | isFirstBlock = false; |
538 | } else { |
539 | // It's not SecondBB or (in case of a loop) the second visit of SecondBB. |
540 | // In this case we also have to look at instructions after SecondI. |
541 | EI = B->end(); |
542 | } |
543 | for (; BI != EI; ++BI) { |
544 | Instruction *I = &*BI; |
545 | if (I->mayWriteToMemory() && I != SecondI) |
546 | if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) |
547 | return false; |
548 | } |
549 | if (B != FirstBB) { |
550 | assert(B != &FirstBB->getParent()->getEntryBlock() &&(static_cast <bool> (B != &FirstBB->getParent()-> getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI" ) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 551, __extension__ __PRETTY_FUNCTION__)) |
551 | "Should not hit the entry block because SI must be dominated by LI")(static_cast <bool> (B != &FirstBB->getParent()-> getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI" ) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 551, __extension__ __PRETTY_FUNCTION__)); |
552 | for (BasicBlock *Pred : predecessors(B)) { |
553 | PHITransAddr PredAddr = Addr; |
554 | if (PredAddr.NeedsPHITranslationFromBlock(B)) { |
555 | if (!PredAddr.IsPotentiallyPHITranslatable()) |
556 | return false; |
557 | if (PredAddr.PHITranslateValue(B, Pred, DT, false)) |
558 | return false; |
559 | } |
560 | Value *TranslatedPtr = PredAddr.getAddr(); |
561 | auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr)); |
562 | if (!Inserted.second) { |
563 | // We already visited this block before. If it was with a different |
564 | // address - bail out! |
565 | if (TranslatedPtr != Inserted.first->second) |
566 | return false; |
567 | // ... otherwise just skip it. |
568 | continue; |
569 | } |
570 | WorkList.push_back(std::make_pair(Pred, PredAddr)); |
571 | } |
572 | } |
573 | } |
574 | return true; |
575 | } |
576 | |
577 | static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, |
578 | uint64_t &DeadSize, int64_t KillingStart, |
579 | uint64_t KillingSize, bool IsOverwriteEnd) { |
580 | auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI); |
581 | Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne(); |
582 | |
583 | // We assume that memet/memcpy operates in chunks of the "largest" native |
584 | // type size and aligned on the same value. That means optimal start and size |
585 | // of memset/memcpy should be modulo of preferred alignment of that type. That |
586 | // is it there is no any sense in trying to reduce store size any further |
587 | // since any "extra" stores comes for free anyway. |
588 | // On the other hand, maximum alignment we can achieve is limited by alignment |
589 | // of initial store. |
590 | |
591 | // TODO: Limit maximum alignment by preferred (or abi?) alignment of the |
592 | // "largest" native type. |
593 | // Note: What is the proper way to get that value? |
594 | // Should TargetTransformInfo::getRegisterBitWidth be used or anything else? |
595 | // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign); |
596 | |
597 | int64_t ToRemoveStart = 0; |
598 | uint64_t ToRemoveSize = 0; |
599 | // Compute start and size of the region to remove. Make sure 'PrefAlign' is |
600 | // maintained on the remaining store. |
601 | if (IsOverwriteEnd) { |
602 | // Calculate required adjustment for 'KillingStart' in order to keep |
603 | // remaining store size aligned on 'PerfAlign'. |
604 | uint64_t Off = |
605 | offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign); |
606 | ToRemoveStart = KillingStart + Off; |
607 | if (DeadSize <= uint64_t(ToRemoveStart - DeadStart)) |
608 | return false; |
609 | ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart); |
610 | } else { |
611 | ToRemoveStart = DeadStart; |
612 | assert(KillingSize >= uint64_t(DeadStart - KillingStart) &&(static_cast <bool> (KillingSize >= uint64_t(DeadStart - KillingStart) && "Not overlapping accesses?") ? void (0) : __assert_fail ("KillingSize >= uint64_t(DeadStart - KillingStart) && \"Not overlapping accesses?\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 613, __extension__ __PRETTY_FUNCTION__)) |
613 | "Not overlapping accesses?")(static_cast <bool> (KillingSize >= uint64_t(DeadStart - KillingStart) && "Not overlapping accesses?") ? void (0) : __assert_fail ("KillingSize >= uint64_t(DeadStart - KillingStart) && \"Not overlapping accesses?\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 613, __extension__ __PRETTY_FUNCTION__)); |
614 | ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart); |
615 | // Calculate required adjustment for 'ToRemoveSize'in order to keep |
616 | // start of the remaining store aligned on 'PerfAlign'. |
617 | uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign); |
618 | if (Off != 0) { |
619 | if (ToRemoveSize <= (PrefAlign.value() - Off)) |
620 | return false; |
621 | ToRemoveSize -= PrefAlign.value() - Off; |
622 | } |
623 | assert(isAligned(PrefAlign, ToRemoveSize) &&(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize) && "Should preserve selected alignment") ? void (0) : __assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 624, __extension__ __PRETTY_FUNCTION__)) |
624 | "Should preserve selected alignment")(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize) && "Should preserve selected alignment") ? void (0) : __assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 624, __extension__ __PRETTY_FUNCTION__)); |
625 | } |
626 | |
627 | assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")(static_cast <bool> (ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove" ) ? void (0) : __assert_fail ("ToRemoveSize > 0 && \"Shouldn't reach here if nothing to remove\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 627, __extension__ __PRETTY_FUNCTION__)); |
628 | assert(DeadSize > ToRemoveSize && "Can't remove more than original size")(static_cast <bool> (DeadSize > ToRemoveSize && "Can't remove more than original size") ? void (0) : __assert_fail ("DeadSize > ToRemoveSize && \"Can't remove more than original size\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 628, __extension__ __PRETTY_FUNCTION__)); |
629 | |
630 | uint64_t NewSize = DeadSize - ToRemoveSize; |
631 | if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) { |
632 | // When shortening an atomic memory intrinsic, the newly shortened |
633 | // length must remain an integer multiple of the element size. |
634 | const uint32_t ElementSize = AMI->getElementSizeInBytes(); |
635 | if (0 != NewSize % ElementSize) |
636 | return false; |
637 | } |
638 | |
639 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI << "\n KILLER [" << ToRemoveStart << ", " << int64_t(ToRemoveStart + ToRemoveSize) << ")\n" ; } } while (false) |
640 | << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI << "\n KILLER [" << ToRemoveStart << ", " << int64_t(ToRemoveStart + ToRemoveSize) << ")\n" ; } } while (false) |
641 | << "\n KILLER [" << ToRemoveStart << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI << "\n KILLER [" << ToRemoveStart << ", " << int64_t(ToRemoveStart + ToRemoveSize) << ")\n" ; } } while (false) |
642 | << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI << "\n KILLER [" << ToRemoveStart << ", " << int64_t(ToRemoveStart + ToRemoveSize) << ")\n" ; } } while (false); |
643 | |
644 | Value *DeadWriteLength = DeadIntrinsic->getLength(); |
645 | Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize); |
646 | DeadIntrinsic->setLength(TrimmedLength); |
647 | DeadIntrinsic->setDestAlignment(PrefAlign); |
648 | |
649 | if (!IsOverwriteEnd) { |
650 | Value *OrigDest = DeadIntrinsic->getRawDest(); |
651 | Type *Int8PtrTy = |
652 | Type::getInt8PtrTy(DeadIntrinsic->getContext(), |
653 | OrigDest->getType()->getPointerAddressSpace()); |
654 | Value *Dest = OrigDest; |
655 | if (OrigDest->getType() != Int8PtrTy) |
656 | Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI); |
657 | Value *Indices[1] = { |
658 | ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)}; |
659 | Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds( |
660 | Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI); |
661 | NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc()); |
662 | if (NewDestGEP->getType() != OrigDest->getType()) |
663 | NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(), |
664 | "", DeadI); |
665 | DeadIntrinsic->setDest(NewDestGEP); |
666 | } |
667 | |
668 | // Finally update start and size of dead access. |
669 | if (!IsOverwriteEnd) |
670 | DeadStart += ToRemoveSize; |
671 | DeadSize = NewSize; |
672 | |
673 | return true; |
674 | } |
675 | |
676 | static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, |
677 | int64_t &DeadStart, uint64_t &DeadSize) { |
678 | if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI)) |
679 | return false; |
680 | |
681 | OverlapIntervalsTy::iterator OII = --IntervalMap.end(); |
682 | int64_t KillingStart = OII->second; |
683 | uint64_t KillingSize = OII->first - KillingStart; |
684 | |
685 | assert(OII->first - KillingStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - KillingStart >= 0 && "Size expected to be positive") ? void (0) : __assert_fail ("OII->first - KillingStart >= 0 && \"Size expected to be positive\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 685, __extension__ __PRETTY_FUNCTION__)); |
686 | |
687 | if (KillingStart > DeadStart && |
688 | // Note: "KillingStart - KillingStart" is known to be positive due to |
689 | // preceding check. |
690 | (uint64_t)(KillingStart - DeadStart) < DeadSize && |
691 | // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to |
692 | // be non negative due to preceding checks. |
693 | KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) { |
694 | if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, |
695 | true)) { |
696 | IntervalMap.erase(OII); |
697 | return true; |
698 | } |
699 | } |
700 | return false; |
701 | } |
702 | |
703 | static bool tryToShortenBegin(Instruction *DeadI, |
704 | OverlapIntervalsTy &IntervalMap, |
705 | int64_t &DeadStart, uint64_t &DeadSize) { |
706 | if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI)) |
707 | return false; |
708 | |
709 | OverlapIntervalsTy::iterator OII = IntervalMap.begin(); |
710 | int64_t KillingStart = OII->second; |
711 | uint64_t KillingSize = OII->first - KillingStart; |
712 | |
713 | assert(OII->first - KillingStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - KillingStart >= 0 && "Size expected to be positive") ? void (0) : __assert_fail ("OII->first - KillingStart >= 0 && \"Size expected to be positive\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 713, __extension__ __PRETTY_FUNCTION__)); |
714 | |
715 | if (KillingStart <= DeadStart && |
716 | // Note: "DeadStart - KillingStart" is known to be non negative due to |
717 | // preceding check. |
718 | KillingSize > (uint64_t)(DeadStart - KillingStart)) { |
719 | // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to |
720 | // be positive due to preceding checks. |
721 | assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize &&(static_cast <bool> (KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && "Should have been handled as OW_Complete" ) ? void (0) : __assert_fail ("KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && \"Should have been handled as OW_Complete\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 722, __extension__ __PRETTY_FUNCTION__)) |
722 | "Should have been handled as OW_Complete")(static_cast <bool> (KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && "Should have been handled as OW_Complete" ) ? void (0) : __assert_fail ("KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize && \"Should have been handled as OW_Complete\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 722, __extension__ __PRETTY_FUNCTION__)); |
723 | if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize, |
724 | false)) { |
725 | IntervalMap.erase(OII); |
726 | return true; |
727 | } |
728 | } |
729 | return false; |
730 | } |
731 | |
732 | static bool removePartiallyOverlappedStores(const DataLayout &DL, |
733 | InstOverlapIntervalsTy &IOL, |
734 | const TargetLibraryInfo &TLI) { |
735 | bool Changed = false; |
736 | for (auto OI : IOL) { |
737 | Instruction *DeadI = OI.first; |
738 | MemoryLocation Loc = getLocForWrite(DeadI, TLI); |
739 | assert(isRemovable(DeadI) && "Expect only removable instruction")(static_cast <bool> (isRemovable(DeadI) && "Expect only removable instruction" ) ? void (0) : __assert_fail ("isRemovable(DeadI) && \"Expect only removable instruction\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 739, __extension__ __PRETTY_FUNCTION__)); |
740 | |
741 | const Value *Ptr = Loc.Ptr->stripPointerCasts(); |
742 | int64_t DeadStart = 0; |
743 | uint64_t DeadSize = Loc.Size.getValue(); |
744 | GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL); |
745 | OverlapIntervalsTy &IntervalMap = OI.second; |
746 | Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize); |
747 | if (IntervalMap.empty()) |
748 | continue; |
749 | Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize); |
750 | } |
751 | return Changed; |
752 | } |
753 | |
754 | static Constant * |
755 | tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, |
756 | int64_t KillingOffset, int64_t DeadOffset, |
757 | const DataLayout &DL, BatchAAResults &AA, |
758 | DominatorTree *DT) { |
759 | |
760 | if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) && |
761 | DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) && |
762 | KillingI && isa<ConstantInt>(KillingI->getValueOperand()) && |
763 | DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) && |
764 | memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) { |
765 | // If the store we find is: |
766 | // a) partially overwritten by the store to 'Loc' |
767 | // b) the killing store is fully contained in the dead one and |
768 | // c) they both have a constant value |
769 | // d) none of the two stores need padding |
770 | // Merge the two stores, replacing the dead store's value with a |
771 | // merge of both values. |
772 | // TODO: Deal with other constant types (vectors, etc), and probably |
773 | // some mem intrinsics (if needed) |
774 | |
775 | APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue(); |
776 | APInt KillingValue = |
777 | cast<ConstantInt>(KillingI->getValueOperand())->getValue(); |
778 | unsigned KillingBits = KillingValue.getBitWidth(); |
779 | assert(DeadValue.getBitWidth() > KillingValue.getBitWidth())(static_cast <bool> (DeadValue.getBitWidth() > KillingValue .getBitWidth()) ? void (0) : __assert_fail ("DeadValue.getBitWidth() > KillingValue.getBitWidth()" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 779, __extension__ __PRETTY_FUNCTION__)); |
780 | KillingValue = KillingValue.zext(DeadValue.getBitWidth()); |
781 | |
782 | // Offset of the smaller store inside the larger store |
783 | unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8; |
784 | unsigned LShiftAmount = |
785 | DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits |
786 | : BitOffsetDiff; |
787 | APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount, |
788 | LShiftAmount + KillingBits); |
789 | // Clear the bits we'll be replacing, then OR with the smaller |
790 | // store, shifted appropriately. |
791 | APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount); |
792 | LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI << "\n Killing: " << *KillingI << "\n Merged Value: " << Merged << '\n'; } } while (false) |
793 | << "\n Killing: " << *KillingIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI << "\n Killing: " << *KillingI << "\n Merged Value: " << Merged << '\n'; } } while (false) |
794 | << "\n Merged Value: " << Merged << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI << "\n Killing: " << *KillingI << "\n Merged Value: " << Merged << '\n'; } } while (false); |
795 | return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged); |
796 | } |
797 | return nullptr; |
798 | } |
799 | |
800 | namespace { |
801 | // Returns true if \p I is an intrisnic that does not read or write memory. |
802 | bool isNoopIntrinsic(Instruction *I) { |
803 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
804 | switch (II->getIntrinsicID()) { |
805 | case Intrinsic::lifetime_start: |
806 | case Intrinsic::lifetime_end: |
807 | case Intrinsic::invariant_end: |
808 | case Intrinsic::launder_invariant_group: |
809 | case Intrinsic::assume: |
810 | return true; |
811 | case Intrinsic::dbg_addr: |
812 | case Intrinsic::dbg_declare: |
813 | case Intrinsic::dbg_label: |
814 | case Intrinsic::dbg_value: |
815 | llvm_unreachable("Intrinsic should not be modeled in MemorySSA")::llvm::llvm_unreachable_internal("Intrinsic should not be modeled in MemorySSA" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 815); |
816 | default: |
817 | return false; |
818 | } |
819 | } |
820 | return false; |
821 | } |
822 | |
823 | // Check if we can ignore \p D for DSE. |
824 | bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller, |
825 | const TargetLibraryInfo &TLI) { |
826 | Instruction *DI = D->getMemoryInst(); |
827 | // Calls that only access inaccessible memory cannot read or write any memory |
828 | // locations we consider for elimination. |
829 | if (auto *CB = dyn_cast<CallBase>(DI)) |
830 | if (CB->onlyAccessesInaccessibleMemory()) { |
831 | if (isAllocLikeFn(DI, &TLI)) |
832 | return false; |
833 | return true; |
834 | } |
835 | // We can eliminate stores to locations not visible to the caller across |
836 | // throwing instructions. |
837 | if (DI->mayThrow() && !DefVisibleToCaller) |
838 | return true; |
839 | |
840 | // We can remove the dead stores, irrespective of the fence and its ordering |
841 | // (release/acquire/seq_cst). Fences only constraints the ordering of |
842 | // already visible stores, it does not make a store visible to other |
843 | // threads. So, skipping over a fence does not change a store from being |
844 | // dead. |
845 | if (isa<FenceInst>(DI)) |
846 | return true; |
847 | |
848 | // Skip intrinsics that do not really read or modify memory. |
849 | if (isNoopIntrinsic(DI)) |
850 | return true; |
851 | |
852 | return false; |
853 | } |
854 | |
855 | struct DSEState { |
856 | Function &F; |
857 | AliasAnalysis &AA; |
858 | EarliestEscapeInfo EI; |
859 | |
860 | /// The single BatchAA instance that is used to cache AA queries. It will |
861 | /// not be invalidated over the whole run. This is safe, because: |
862 | /// 1. Only memory writes are removed, so the alias cache for memory |
863 | /// locations remains valid. |
864 | /// 2. No new instructions are added (only instructions removed), so cached |
865 | /// information for a deleted value cannot be accessed by a re-used new |
866 | /// value pointer. |
867 | BatchAAResults BatchAA; |
868 | |
869 | MemorySSA &MSSA; |
870 | DominatorTree &DT; |
871 | PostDominatorTree &PDT; |
872 | const TargetLibraryInfo &TLI; |
873 | const DataLayout &DL; |
874 | const LoopInfo &LI; |
875 | |
876 | // Whether the function contains any irreducible control flow, useful for |
877 | // being accurately able to detect loops. |
878 | bool ContainsIrreducibleLoops; |
879 | |
880 | // All MemoryDefs that potentially could kill other MemDefs. |
881 | SmallVector<MemoryDef *, 64> MemDefs; |
882 | // Any that should be skipped as they are already deleted |
883 | SmallPtrSet<MemoryAccess *, 4> SkipStores; |
884 | // Keep track of all of the objects that are invisible to the caller before |
885 | // the function returns. |
886 | // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet; |
887 | DenseMap<const Value *, bool> InvisibleToCallerBeforeRet; |
888 | // Keep track of all of the objects that are invisible to the caller after |
889 | // the function returns. |
890 | DenseMap<const Value *, bool> InvisibleToCallerAfterRet; |
891 | // Keep track of blocks with throwing instructions not modeled in MemorySSA. |
892 | SmallPtrSet<BasicBlock *, 16> ThrowingBlocks; |
893 | // Post-order numbers for each basic block. Used to figure out if memory |
894 | // accesses are executed before another access. |
895 | DenseMap<BasicBlock *, unsigned> PostOrderNumbers; |
896 | |
897 | /// Keep track of instructions (partly) overlapping with killing MemoryDefs per |
898 | /// basic block. |
899 | DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs; |
900 | |
901 | // Class contains self-reference, make sure it's not copied/moved. |
902 | DSEState(const DSEState &) = delete; |
903 | DSEState &operator=(const DSEState &) = delete; |
904 | |
905 | DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, |
906 | PostDominatorTree &PDT, const TargetLibraryInfo &TLI, |
907 | const LoopInfo &LI) |
908 | : F(F), AA(AA), EI(DT, LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT), |
909 | PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) { |
910 | // Collect blocks with throwing instructions not modeled in MemorySSA and |
911 | // alloc-like objects. |
912 | unsigned PO = 0; |
913 | for (BasicBlock *BB : post_order(&F)) { |
914 | PostOrderNumbers[BB] = PO++; |
915 | for (Instruction &I : *BB) { |
916 | MemoryAccess *MA = MSSA.getMemoryAccess(&I); |
917 | if (I.mayThrow() && !MA) |
918 | ThrowingBlocks.insert(I.getParent()); |
919 | |
920 | auto *MD = dyn_cast_or_null<MemoryDef>(MA); |
921 | if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit && |
922 | (getLocForWriteEx(&I) || isMemTerminatorInst(&I))) |
923 | MemDefs.push_back(MD); |
924 | } |
925 | } |
926 | |
927 | // Treat byval or inalloca arguments the same as Allocas, stores to them are |
928 | // dead at the end of the function. |
929 | for (Argument &AI : F.args()) |
930 | if (AI.hasPassPointeeByValueCopyAttr()) { |
931 | // For byval, the caller doesn't know the address of the allocation. |
932 | if (AI.hasByValAttr()) |
933 | InvisibleToCallerBeforeRet.insert({&AI, true}); |
934 | InvisibleToCallerAfterRet.insert({&AI, true}); |
935 | } |
936 | |
937 | // Collect whether there is any irreducible control flow in the function. |
938 | ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI); |
939 | } |
940 | |
941 | /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p |
942 | /// KillingI instruction) completely overwrites a store to the 'DeadLoc' |
943 | /// location (by \p DeadI instruction). |
944 | /// Return OW_MaybePartial if \p KillingI does not completely overwrite |
945 | /// \p DeadI, but they both write to the same underlying object. In that |
946 | /// case, use isPartialOverwrite to check if \p KillingI partially overwrites |
947 | /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined. |
948 | OverwriteResult isOverwrite(const Instruction *KillingI, |
949 | const Instruction *DeadI, |
950 | const MemoryLocation &KillingLoc, |
951 | const MemoryLocation &DeadLoc, |
952 | int64_t &KillingOff, int64_t &DeadOff) { |
953 | // AliasAnalysis does not always account for loops. Limit overwrite checks |
954 | // to dependencies for which we can guarantee they are independent of any |
955 | // loops they are in. |
956 | if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc)) |
957 | return OW_Unknown; |
958 | |
959 | // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll |
960 | // get imprecise values here, though (except for unknown sizes). |
961 | if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) { |
962 | // In case no constant size is known, try to an IR values for the number |
963 | // of bytes written and check if they match. |
964 | const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI); |
965 | const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI); |
966 | if (KillingMemI && DeadMemI) { |
967 | const Value *KillingV = KillingMemI->getLength(); |
968 | const Value *DeadV = DeadMemI->getLength(); |
969 | if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc)) |
970 | return OW_Complete; |
971 | } |
972 | |
973 | // Masked stores have imprecise locations, but we can reason about them |
974 | // to some extent. |
975 | return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA); |
976 | } |
977 | |
978 | const uint64_t KillingSize = KillingLoc.Size.getValue(); |
979 | const uint64_t DeadSize = DeadLoc.Size.getValue(); |
980 | |
981 | // Query the alias information |
982 | AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc); |
983 | |
984 | // If the start pointers are the same, we just have to compare sizes to see if |
985 | // the killing store was larger than the dead store. |
986 | if (AAR == AliasResult::MustAlias) { |
987 | // Make sure that the KillingSize size is >= the DeadSize size. |
988 | if (KillingSize >= DeadSize) |
989 | return OW_Complete; |
990 | } |
991 | |
992 | // If we hit a partial alias we may have a full overwrite |
993 | if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) { |
994 | int32_t Off = AAR.getOffset(); |
995 | if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize) |
996 | return OW_Complete; |
997 | } |
998 | |
999 | // Check to see if the killing store is to the entire object (either a |
1000 | // global, an alloca, or a byval/inalloca argument). If so, then it clearly |
1001 | // overwrites any other store to the same object. |
1002 | const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts(); |
1003 | const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts(); |
1004 | const Value *DeadUndObj = getUnderlyingObject(DeadPtr); |
1005 | const Value *KillingUndObj = getUnderlyingObject(KillingPtr); |
1006 | |
1007 | // If we can't resolve the same pointers to the same object, then we can't |
1008 | // analyze them at all. |
1009 | if (DeadUndObj != KillingUndObj) |
1010 | return OW_Unknown; |
1011 | |
1012 | // If the KillingI store is to a recognizable object, get its size. |
1013 | uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F); |
1014 | if (KillingUndObjSize != MemoryLocation::UnknownSize) |
1015 | if (KillingUndObjSize == KillingSize && KillingUndObjSize >= DeadSize) |
1016 | return OW_Complete; |
1017 | |
1018 | // Okay, we have stores to two completely different pointers. Try to |
1019 | // decompose the pointer into a "base + constant_offset" form. If the base |
1020 | // pointers are equal, then we can reason about the two stores. |
1021 | DeadOff = 0; |
1022 | KillingOff = 0; |
1023 | const Value *DeadBasePtr = |
1024 | GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL); |
1025 | const Value *KillingBasePtr = |
1026 | GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL); |
1027 | |
1028 | // If the base pointers still differ, we have two completely different |
1029 | // stores. |
1030 | if (DeadBasePtr != KillingBasePtr) |
1031 | return OW_Unknown; |
1032 | |
1033 | // The killing access completely overlaps the dead store if and only if |
1034 | // both start and end of the dead one is "inside" the killing one: |
1035 | // |<->|--dead--|<->| |
1036 | // |-----killing------| |
1037 | // Accesses may overlap if and only if start of one of them is "inside" |
1038 | // another one: |
1039 | // |<->|--dead--|<-------->| |
1040 | // |-------killing--------| |
1041 | // OR |
1042 | // |-------dead-------| |
1043 | // |<->|---killing---|<----->| |
1044 | // |
1045 | // We have to be careful here as *Off is signed while *.Size is unsigned. |
1046 | |
1047 | // Check if the dead access starts "not before" the killing one. |
1048 | if (DeadOff >= KillingOff) { |
1049 | // If the dead access ends "not after" the killing access then the |
1050 | // dead one is completely overwritten by the killing one. |
1051 | if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize) |
1052 | return OW_Complete; |
1053 | // If start of the dead access is "before" end of the killing access |
1054 | // then accesses overlap. |
1055 | else if ((uint64_t)(DeadOff - KillingOff) < KillingSize) |
1056 | return OW_MaybePartial; |
1057 | } |
1058 | // If start of the killing access is "before" end of the dead access then |
1059 | // accesses overlap. |
1060 | else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) { |
1061 | return OW_MaybePartial; |
1062 | } |
1063 | |
1064 | // Can reach here only if accesses are known not to overlap. There is no |
1065 | // dedicated code to indicate no overlap so signal "unknown". |
1066 | return OW_Unknown; |
1067 | } |
1068 | |
1069 | bool isInvisibleToCallerAfterRet(const Value *V) { |
1070 | if (isa<AllocaInst>(V)) |
1071 | return true; |
1072 | auto I = InvisibleToCallerAfterRet.insert({V, false}); |
1073 | if (I.second) { |
1074 | if (!isInvisibleToCallerBeforeRet(V)) { |
1075 | I.first->second = false; |
1076 | } else { |
1077 | auto *Inst = dyn_cast<Instruction>(V); |
1078 | if (Inst && isAllocLikeFn(Inst, &TLI)) |
1079 | I.first->second = !PointerMayBeCaptured(V, true, false); |
1080 | } |
1081 | } |
1082 | return I.first->second; |
1083 | } |
1084 | |
1085 | bool isInvisibleToCallerBeforeRet(const Value *V) { |
1086 | if (isa<AllocaInst>(V)) |
1087 | return true; |
1088 | auto I = InvisibleToCallerBeforeRet.insert({V, false}); |
1089 | if (I.second) { |
1090 | auto *Inst = dyn_cast<Instruction>(V); |
1091 | if (Inst && isAllocLikeFn(Inst, &TLI)) |
1092 | // NOTE: This could be made more precise by PointerMayBeCapturedBefore |
1093 | // with the killing MemoryDef. But we refrain from doing so for now to |
1094 | // limit compile-time and this does not cause any changes to the number |
1095 | // of stores removed on a large test set in practice. |
1096 | I.first->second = !PointerMayBeCaptured(V, false, true); |
1097 | } |
1098 | return I.first->second; |
1099 | } |
1100 | |
1101 | Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const { |
1102 | if (!I->mayWriteToMemory()) |
1103 | return None; |
1104 | |
1105 | if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I)) |
1106 | return {MemoryLocation::getForDest(MTI)}; |
1107 | |
1108 | if (auto *CB = dyn_cast<CallBase>(I)) { |
1109 | // If the functions may write to memory we do not know about, bail out. |
1110 | if (!CB->onlyAccessesArgMemory() && |
1111 | !CB->onlyAccessesInaccessibleMemOrArgMem()) |
1112 | return None; |
1113 | |
1114 | LibFunc LF; |
1115 | if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) { |
1116 | switch (LF) { |
1117 | case LibFunc_strcpy: |
1118 | case LibFunc_strncpy: |
1119 | case LibFunc_strcat: |
1120 | case LibFunc_strncat: |
1121 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; |
1122 | default: |
1123 | break; |
1124 | } |
1125 | } |
1126 | switch (CB->getIntrinsicID()) { |
1127 | case Intrinsic::init_trampoline: |
1128 | return {MemoryLocation::getAfter(CB->getArgOperand(0))}; |
1129 | case Intrinsic::masked_store: |
1130 | return {MemoryLocation::getForArgument(CB, 1, TLI)}; |
1131 | default: |
1132 | break; |
1133 | } |
1134 | return None; |
1135 | } |
1136 | |
1137 | return MemoryLocation::getOrNone(I); |
1138 | } |
1139 | |
1140 | /// Returns true if \p UseInst completely overwrites \p DefLoc |
1141 | /// (stored by \p DefInst). |
1142 | bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst, |
1143 | Instruction *UseInst) { |
1144 | // UseInst has a MemoryDef associated in MemorySSA. It's possible for a |
1145 | // MemoryDef to not write to memory, e.g. a volatile load is modeled as a |
1146 | // MemoryDef. |
1147 | if (!UseInst->mayWriteToMemory()) |
1148 | return false; |
1149 | |
1150 | if (auto *CB = dyn_cast<CallBase>(UseInst)) |
1151 | if (CB->onlyAccessesInaccessibleMemory()) |
1152 | return false; |
1153 | |
1154 | int64_t InstWriteOffset, DepWriteOffset; |
1155 | if (auto CC = getLocForWriteEx(UseInst)) |
1156 | return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset, |
1157 | DepWriteOffset) == OW_Complete; |
1158 | return false; |
1159 | } |
1160 | |
1161 | /// Returns true if \p Def is not read before returning from the function. |
1162 | bool isWriteAtEndOfFunction(MemoryDef *Def) { |
1163 | LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Check if def " << *Def << " (" << *Def->getMemoryInst() << ") is at the end the function \n" ; } } while (false) |
1164 | << *Def->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Check if def " << *Def << " (" << *Def->getMemoryInst() << ") is at the end the function \n" ; } } while (false) |
1165 | << ") is at the end the function \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Check if def " << *Def << " (" << *Def->getMemoryInst() << ") is at the end the function \n" ; } } while (false); |
1166 | |
1167 | auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst()); |
1168 | if (!MaybeLoc) { |
1169 | LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... could not get location for write.\n" ; } } while (false); |
1170 | return false; |
1171 | } |
1172 | |
1173 | SmallVector<MemoryAccess *, 4> WorkList; |
1174 | SmallPtrSet<MemoryAccess *, 8> Visited; |
1175 | auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) { |
1176 | if (!Visited.insert(Acc).second) |
1177 | return; |
1178 | for (Use &U : Acc->uses()) |
1179 | WorkList.push_back(cast<MemoryAccess>(U.getUser())); |
1180 | }; |
1181 | PushMemUses(Def); |
1182 | for (unsigned I = 0; I < WorkList.size(); I++) { |
1183 | if (WorkList.size() >= MemorySSAScanLimit) { |
1184 | LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... hit exploration limit.\n"; } } while (false); |
1185 | return false; |
1186 | } |
1187 | |
1188 | MemoryAccess *UseAccess = WorkList[I]; |
1189 | // Simply adding the users of MemoryPhi to the worklist is not enough, |
1190 | // because we might miss read clobbers in different iterations of a loop, |
1191 | // for example. |
1192 | // TODO: Add support for phi translation to handle the loop case. |
1193 | if (isa<MemoryPhi>(UseAccess)) |
1194 | return false; |
1195 | |
1196 | // TODO: Checking for aliasing is expensive. Consider reducing the amount |
1197 | // of times this is called and/or caching it. |
1198 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); |
1199 | if (isReadClobber(*MaybeLoc, UseInst)) { |
1200 | LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... hit read clobber " << *UseInst << ".\n"; } } while (false); |
1201 | return false; |
1202 | } |
1203 | |
1204 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) |
1205 | PushMemUses(UseDef); |
1206 | } |
1207 | return true; |
1208 | } |
1209 | |
1210 | /// If \p I is a memory terminator like llvm.lifetime.end or free, return a |
1211 | /// pair with the MemoryLocation terminated by \p I and a boolean flag |
1212 | /// indicating whether \p I is a free-like call. |
1213 | Optional<std::pair<MemoryLocation, bool>> |
1214 | getLocForTerminator(Instruction *I) const { |
1215 | uint64_t Len; |
1216 | Value *Ptr; |
1217 | if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len), |
1218 | m_Value(Ptr)))) |
1219 | return {std::make_pair(MemoryLocation(Ptr, Len), false)}; |
1220 | |
1221 | if (auto *CB = dyn_cast<CallBase>(I)) { |
1222 | if (isFreeCall(I, &TLI)) |
1223 | return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)), |
1224 | true)}; |
1225 | } |
1226 | |
1227 | return None; |
1228 | } |
1229 | |
1230 | /// Returns true if \p I is a memory terminator instruction like |
1231 | /// llvm.lifetime.end or free. |
1232 | bool isMemTerminatorInst(Instruction *I) const { |
1233 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); |
1234 | return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) || |
1235 | isFreeCall(I, &TLI); |
1236 | } |
1237 | |
1238 | /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from |
1239 | /// instruction \p AccessI. |
1240 | bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI, |
1241 | Instruction *MaybeTerm) { |
1242 | Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc = |
1243 | getLocForTerminator(MaybeTerm); |
1244 | |
1245 | if (!MaybeTermLoc) |
1246 | return false; |
1247 | |
1248 | // If the terminator is a free-like call, all accesses to the underlying |
1249 | // object can be considered terminated. |
1250 | if (getUnderlyingObject(Loc.Ptr) != |
1251 | getUnderlyingObject(MaybeTermLoc->first.Ptr)) |
1252 | return false; |
1253 | |
1254 | auto TermLoc = MaybeTermLoc->first; |
1255 | if (MaybeTermLoc->second) { |
1256 | const Value *LocUO = getUnderlyingObject(Loc.Ptr); |
1257 | return BatchAA.isMustAlias(TermLoc.Ptr, LocUO); |
1258 | } |
1259 | int64_t InstWriteOffset = 0; |
1260 | int64_t DepWriteOffset = 0; |
1261 | return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset, |
1262 | DepWriteOffset) == OW_Complete; |
1263 | } |
1264 | |
1265 | // Returns true if \p Use may read from \p DefLoc. |
1266 | bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) { |
1267 | if (isNoopIntrinsic(UseInst)) |
1268 | return false; |
1269 | |
1270 | // Monotonic or weaker atomic stores can be re-ordered and do not need to be |
1271 | // treated as read clobber. |
1272 | if (auto SI = dyn_cast<StoreInst>(UseInst)) |
1273 | return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic); |
1274 | |
1275 | if (!UseInst->mayReadFromMemory()) |
1276 | return false; |
1277 | |
1278 | if (auto *CB = dyn_cast<CallBase>(UseInst)) |
1279 | if (CB->onlyAccessesInaccessibleMemory()) |
1280 | return false; |
1281 | |
1282 | return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc)); |
1283 | } |
1284 | |
1285 | /// Returns true if a dependency between \p Current and \p KillingDef is |
1286 | /// guaranteed to be loop invariant for the loops that they are in. Either |
1287 | /// because they are known to be in the same block, in the same loop level or |
1288 | /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation |
1289 | /// during execution of the containing function. |
1290 | bool isGuaranteedLoopIndependent(const Instruction *Current, |
1291 | const Instruction *KillingDef, |
1292 | const MemoryLocation &CurrentLoc) { |
1293 | // If the dependency is within the same block or loop level (being careful |
1294 | // of irreducible loops), we know that AA will return a valid result for the |
1295 | // memory dependency. (Both at the function level, outside of any loop, |
1296 | // would also be valid but we currently disable that to limit compile time). |
1297 | if (Current->getParent() == KillingDef->getParent()) |
1298 | return true; |
1299 | const Loop *CurrentLI = LI.getLoopFor(Current->getParent()); |
1300 | if (!ContainsIrreducibleLoops && CurrentLI && |
1301 | CurrentLI == LI.getLoopFor(KillingDef->getParent())) |
1302 | return true; |
1303 | // Otherwise check the memory location is invariant to any loops. |
1304 | return isGuaranteedLoopInvariant(CurrentLoc.Ptr); |
1305 | } |
1306 | |
1307 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible |
1308 | /// loop. In particular, this guarantees that it only references a single |
1309 | /// MemoryLocation during execution of the containing function. |
1310 | bool isGuaranteedLoopInvariant(const Value *Ptr) { |
1311 | auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) { |
1312 | Ptr = Ptr->stripPointerCasts(); |
1313 | if (auto *I = dyn_cast<Instruction>(Ptr)) { |
1314 | if (isa<AllocaInst>(Ptr)) |
1315 | return true; |
1316 | |
1317 | if (isAllocLikeFn(I, &TLI)) |
1318 | return true; |
1319 | |
1320 | return false; |
1321 | } |
1322 | return true; |
1323 | }; |
1324 | |
1325 | Ptr = Ptr->stripPointerCasts(); |
1326 | if (auto *I = dyn_cast<Instruction>(Ptr)) { |
1327 | if (I->getParent()->isEntryBlock()) |
1328 | return true; |
1329 | } |
1330 | if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { |
1331 | return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && |
1332 | GEP->hasAllConstantIndices(); |
1333 | } |
1334 | return IsGuaranteedLoopInvariantBase(Ptr); |
1335 | } |
1336 | |
1337 | // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess, |
1338 | // with no read access between them or on any other path to a function exit |
1339 | // block if \p KillingLoc is not accessible after the function returns. If |
1340 | // there is no such MemoryDef, return None. The returned value may not |
1341 | // (completely) overwrite \p KillingLoc. Currently we bail out when we |
1342 | // encounter an aliasing MemoryUse (read). |
1343 | Optional<MemoryAccess *> |
1344 | getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess, |
1345 | const MemoryLocation &KillingLoc, const Value *KillingUndObj, |
1346 | unsigned &ScanLimit, unsigned &WalkerStepLimit, |
1347 | bool IsMemTerm, unsigned &PartialLimit) { |
1348 | if (ScanLimit == 0 || WalkerStepLimit == 0) { |
1349 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "\n ... hit scan limit\n"; } } while (false); |
1350 | return None; |
1351 | } |
1352 | |
1353 | MemoryAccess *Current = StartAccess; |
1354 | Instruction *KillingI = KillingDef->getMemoryInst(); |
1355 | LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " trying to get dominating access\n" ; } } while (false); |
1356 | |
1357 | // Find the next clobbering Mod access for DefLoc, starting at StartAccess. |
1358 | Optional<MemoryLocation> CurrentLoc; |
1359 | for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) { |
1360 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1361 | dbgs() << " visiting " << *Current;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1362 | if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1363 | dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1364 | << ")";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1365 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false) |
1366 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { { dbgs() << " visiting " << *Current ; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef >(Current)) dbgs() << " (" << *cast<MemoryUseOrDef >(Current)->getMemoryInst() << ")"; dbgs() << "\n"; }; } } while (false); |
1367 | |
1368 | // Reached TOP. |
1369 | if (MSSA.isLiveOnEntryDef(Current)) { |
1370 | LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found LiveOnEntryDef\n"; } } while (false); |
1371 | return None; |
1372 | } |
1373 | |
1374 | // Cost of a step. Accesses in the same block are more likely to be valid |
1375 | // candidates for elimination, hence consider them cheaper. |
1376 | unsigned StepCost = KillingDef->getBlock() == Current->getBlock() |
1377 | ? MemorySSASameBBStepCost |
1378 | : MemorySSAOtherBBStepCost; |
1379 | if (WalkerStepLimit <= StepCost) { |
1380 | LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... hit walker step limit\n"; } } while (false); |
1381 | return None; |
1382 | } |
1383 | WalkerStepLimit -= StepCost; |
1384 | |
1385 | // Return for MemoryPhis. They cannot be eliminated directly and the |
1386 | // caller is responsible for traversing them. |
1387 | if (isa<MemoryPhi>(Current)) { |
1388 | LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found MemoryPhi\n"; } } while (false); |
1389 | return Current; |
1390 | } |
1391 | |
1392 | // Below, check if CurrentDef is a valid candidate to be eliminated by |
1393 | // KillingDef. If it is not, check the next candidate. |
1394 | MemoryDef *CurrentDef = cast<MemoryDef>(Current); |
1395 | Instruction *CurrentI = CurrentDef->getMemoryInst(); |
1396 | |
1397 | if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(KillingUndObj), |
1398 | TLI)) |
1399 | continue; |
1400 | |
1401 | // Before we try to remove anything, check for any extra throwing |
1402 | // instructions that block us from DSEing |
1403 | if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) { |
1404 | LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skip, may throw!\n"; } } while (false); |
1405 | return None; |
1406 | } |
1407 | |
1408 | // Check for anything that looks like it will be a barrier to further |
1409 | // removal |
1410 | if (isDSEBarrier(KillingUndObj, CurrentI)) { |
1411 | LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skip, barrier\n"; } } while (false); |
1412 | return None; |
1413 | } |
1414 | |
1415 | // If Current is known to be on path that reads DefLoc or is a read |
1416 | // clobber, bail out, as the path is not profitable. We skip this check |
1417 | // for intrinsic calls, because the code knows how to handle memcpy |
1418 | // intrinsics. |
1419 | if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI)) |
1420 | return None; |
1421 | |
1422 | // Quick check if there are direct uses that are read-clobbers. |
1423 | if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) { |
1424 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser())) |
1425 | return !MSSA.dominates(StartAccess, UseOrDef) && |
1426 | isReadClobber(KillingLoc, UseOrDef->getMemoryInst()); |
1427 | return false; |
1428 | })) { |
1429 | LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found a read clobber\n"; } } while (false); |
1430 | return None; |
1431 | } |
1432 | |
1433 | // If Current cannot be analyzed or is not removable, check the next |
1434 | // candidate. |
1435 | if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) |
1436 | continue; |
1437 | |
1438 | // If Current does not have an analyzable write location, skip it |
1439 | CurrentLoc = getLocForWriteEx(CurrentI); |
1440 | if (!CurrentLoc) |
1441 | continue; |
1442 | |
1443 | // AliasAnalysis does not account for loops. Limit elimination to |
1444 | // candidates for which we can guarantee they always store to the same |
1445 | // memory location and not located in different loops. |
1446 | if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { |
1447 | LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... not guaranteed loop independent\n" ; } } while (false); |
1448 | WalkerStepLimit -= 1; |
1449 | continue; |
1450 | } |
1451 | |
1452 | if (IsMemTerm) { |
1453 | // If the killing def is a memory terminator (e.g. lifetime.end), check |
1454 | // the next candidate if the current Current does not write the same |
1455 | // underlying object as the terminator. |
1456 | if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) |
1457 | continue; |
1458 | } else { |
1459 | int64_t KillingOffset = 0; |
1460 | int64_t DeadOffset = 0; |
1461 | auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc, |
1462 | KillingOffset, DeadOffset); |
1463 | // If Current does not write to the same object as KillingDef, check |
1464 | // the next candidate. |
1465 | if (OR == OW_Unknown) |
1466 | continue; |
1467 | else if (OR == OW_MaybePartial) { |
1468 | // If KillingDef only partially overwrites Current, check the next |
1469 | // candidate if the partial step limit is exceeded. This aggressively |
1470 | // limits the number of candidates for partial store elimination, |
1471 | // which are less likely to be removable in the end. |
1472 | if (PartialLimit <= 1) { |
1473 | WalkerStepLimit -= 1; |
1474 | continue; |
1475 | } |
1476 | PartialLimit -= 1; |
1477 | } |
1478 | } |
1479 | break; |
1480 | }; |
1481 | |
1482 | // Accesses to objects accessible after the function returns can only be |
1483 | // eliminated if the access is dead along all paths to the exit. Collect |
1484 | // the blocks with killing (=completely overwriting MemoryDefs) and check if |
1485 | // they cover all paths from MaybeDeadAccess to any function exit. |
1486 | SmallPtrSet<Instruction *, 16> KillingDefs; |
1487 | KillingDefs.insert(KillingDef->getMemoryInst()); |
1488 | MemoryAccess *MaybeDeadAccess = Current; |
1489 | MemoryLocation MaybeDeadLoc = *CurrentLoc; |
1490 | Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst(); |
1491 | LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Checking for reads of " << *MaybeDeadAccess << " (" << *MaybeDeadI << ")\n"; } } while (false) |
1492 | << *MaybeDeadI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Checking for reads of " << *MaybeDeadAccess << " (" << *MaybeDeadI << ")\n"; } } while (false); |
1493 | |
1494 | SmallSetVector<MemoryAccess *, 32> WorkList; |
1495 | auto PushMemUses = [&WorkList](MemoryAccess *Acc) { |
1496 | for (Use &U : Acc->uses()) |
1497 | WorkList.insert(cast<MemoryAccess>(U.getUser())); |
1498 | }; |
1499 | PushMemUses(MaybeDeadAccess); |
1500 | |
1501 | // Check if DeadDef may be read. |
1502 | for (unsigned I = 0; I < WorkList.size(); I++) { |
1503 | MemoryAccess *UseAccess = WorkList[I]; |
1504 | |
1505 | LLVM_DEBUG(dbgs() << " " << *UseAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " " << *UseAccess; } } while (false); |
1506 | // Bail out if the number of accesses to check exceeds the scan limit. |
1507 | if (ScanLimit < (WorkList.size() - I)) { |
1508 | LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "\n ... hit scan limit\n"; } } while (false); |
1509 | return None; |
1510 | } |
1511 | --ScanLimit; |
1512 | NumDomMemDefChecks++; |
1513 | |
1514 | if (isa<MemoryPhi>(UseAccess)) { |
1515 | if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) { |
1516 | return DT.properlyDominates(KI->getParent(), |
1517 | UseAccess->getBlock()); |
1518 | })) { |
1519 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping, dominated by killing block\n" ; } } while (false); |
1520 | continue; |
1521 | } |
1522 | LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "\n ... adding PHI uses\n"; } } while (false); |
1523 | PushMemUses(UseAccess); |
1524 | continue; |
1525 | } |
1526 | |
1527 | Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst(); |
1528 | LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " (" << *UseInst << ")\n" ; } } while (false); |
1529 | |
1530 | if (any_of(KillingDefs, [this, UseInst](Instruction *KI) { |
1531 | return DT.dominates(KI, UseInst); |
1532 | })) { |
1533 | LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping, dominated by killing def\n" ; } } while (false); |
1534 | continue; |
1535 | } |
1536 | |
1537 | // A memory terminator kills all preceeding MemoryDefs and all succeeding |
1538 | // MemoryAccesses. We do not have to check it's users. |
1539 | if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) { |
1540 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n" ; } } while (false) |
1541 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n" ; } } while (false) |
1542 | << " ... skipping, memterminator invalidates following accesses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n" ; } } while (false); |
1543 | continue; |
1544 | } |
1545 | |
1546 | if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) { |
1547 | LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... adding uses of intrinsic\n" ; } } while (false); |
1548 | PushMemUses(UseAccess); |
1549 | continue; |
1550 | } |
1551 | |
1552 | if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) { |
1553 | LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found throwing instruction\n" ; } } while (false); |
1554 | return None; |
1555 | } |
1556 | |
1557 | // Uses which may read the original MemoryDef mean we cannot eliminate the |
1558 | // original MD. Stop walk. |
1559 | if (isReadClobber(MaybeDeadLoc, UseInst)) { |
1560 | LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found read clobber\n"; } } while (false); |
1561 | return None; |
1562 | } |
1563 | |
1564 | // If this worklist walks back to the original memory access (and the |
1565 | // pointer is not guarenteed loop invariant) then we cannot assume that a |
1566 | // store kills itself. |
1567 | if (MaybeDeadAccess == UseAccess && |
1568 | !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) { |
1569 | LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found not loop invariant self access\n" ; } } while (false); |
1570 | return None; |
1571 | } |
1572 | // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check |
1573 | // if it reads the memory location. |
1574 | // TODO: It would probably be better to check for self-reads before |
1575 | // calling the function. |
1576 | if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) { |
1577 | LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... skipping killing def/dom access\n" ; } } while (false); |
1578 | continue; |
1579 | } |
1580 | |
1581 | // Check all uses for MemoryDefs, except for defs completely overwriting |
1582 | // the original location. Otherwise we have to check uses of *all* |
1583 | // MemoryDefs we discover, including non-aliasing ones. Otherwise we might |
1584 | // miss cases like the following |
1585 | // 1 = Def(LoE) ; <----- DeadDef stores [0,1] |
1586 | // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3] |
1587 | // Use(2) ; MayAlias 2 *and* 1, loads [0, 3]. |
1588 | // (The Use points to the *first* Def it may alias) |
1589 | // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, |
1590 | // stores [0,1] |
1591 | if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) { |
1592 | if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) { |
1593 | BasicBlock *MaybeKillingBlock = UseInst->getParent(); |
1594 | if (PostOrderNumbers.find(MaybeKillingBlock)->second < |
1595 | PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) { |
1596 | if (!isInvisibleToCallerAfterRet(KillingUndObj)) { |
1597 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found killing def " << *UseInst << "\n"; } } while (false) |
1598 | << " ... found killing def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found killing def " << *UseInst << "\n"; } } while (false); |
1599 | KillingDefs.insert(UseInst); |
1600 | } |
1601 | } else { |
1602 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found preceeding def " << *UseInst << "\n"; } } while (false) |
1603 | << " ... found preceeding def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... found preceeding def " << *UseInst << "\n"; } } while (false); |
1604 | return None; |
1605 | } |
1606 | } else |
1607 | PushMemUses(UseDef); |
1608 | } |
1609 | } |
1610 | |
1611 | // For accesses to locations visible after the function returns, make sure |
1612 | // that the location is dead (=overwritten) along all paths from |
1613 | // MaybeDeadAccess to the exit. |
1614 | if (!isInvisibleToCallerAfterRet(KillingUndObj)) { |
1615 | SmallPtrSet<BasicBlock *, 16> KillingBlocks; |
1616 | for (Instruction *KD : KillingDefs) |
1617 | KillingBlocks.insert(KD->getParent()); |
1618 | assert(!KillingBlocks.empty() &&(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block" ) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1619, __extension__ __PRETTY_FUNCTION__)) |
1619 | "Expected at least a single killing block")(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block" ) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1619, __extension__ __PRETTY_FUNCTION__)); |
1620 | |
1621 | // Find the common post-dominator of all killing blocks. |
1622 | BasicBlock *CommonPred = *KillingBlocks.begin(); |
1623 | for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) { |
1624 | if (!CommonPred) |
1625 | break; |
1626 | CommonPred = PDT.findNearestCommonDominator(CommonPred, BB); |
1627 | } |
1628 | |
1629 | // If CommonPred is in the set of killing blocks, just check if it |
1630 | // post-dominates MaybeDeadAccess. |
1631 | if (KillingBlocks.count(CommonPred)) { |
1632 | if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) |
1633 | return {MaybeDeadAccess}; |
1634 | return None; |
1635 | } |
1636 | |
1637 | // If the common post-dominator does not post-dominate MaybeDeadAccess, |
1638 | // there is a path from MaybeDeadAccess to an exit not going through a |
1639 | // killing block. |
1640 | if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) { |
1641 | SetVector<BasicBlock *> WorkList; |
1642 | |
1643 | // If CommonPred is null, there are multiple exits from the function. |
1644 | // They all have to be added to the worklist. |
1645 | if (CommonPred) |
1646 | WorkList.insert(CommonPred); |
1647 | else |
1648 | for (BasicBlock *R : PDT.roots()) |
1649 | WorkList.insert(R); |
1650 | |
1651 | NumCFGTries++; |
1652 | // Check if all paths starting from an exit node go through one of the |
1653 | // killing blocks before reaching MaybeDeadAccess. |
1654 | for (unsigned I = 0; I < WorkList.size(); I++) { |
1655 | NumCFGChecks++; |
1656 | BasicBlock *Current = WorkList[I]; |
1657 | if (KillingBlocks.count(Current)) |
1658 | continue; |
1659 | if (Current == MaybeDeadAccess->getBlock()) |
1660 | return None; |
1661 | |
1662 | // MaybeDeadAccess is reachable from the entry, so we don't have to |
1663 | // explore unreachable blocks further. |
1664 | if (!DT.isReachableFromEntry(Current)) |
1665 | continue; |
1666 | |
1667 | for (BasicBlock *Pred : predecessors(Current)) |
1668 | WorkList.insert(Pred); |
1669 | |
1670 | if (WorkList.size() >= MemorySSAPathCheckLimit) |
1671 | return None; |
1672 | } |
1673 | NumCFGSuccess++; |
1674 | return {MaybeDeadAccess}; |
1675 | } |
1676 | return None; |
1677 | } |
1678 | |
1679 | // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is |
1680 | // potentially dead. |
1681 | return {MaybeDeadAccess}; |
1682 | } |
1683 | |
1684 | // Delete dead memory defs |
1685 | void deleteDeadInstruction(Instruction *SI) { |
1686 | MemorySSAUpdater Updater(&MSSA); |
1687 | SmallVector<Instruction *, 32> NowDeadInsts; |
1688 | NowDeadInsts.push_back(SI); |
1689 | --NumFastOther; |
1690 | |
1691 | while (!NowDeadInsts.empty()) { |
1692 | Instruction *DeadInst = NowDeadInsts.pop_back_val(); |
1693 | ++NumFastOther; |
1694 | |
1695 | // Try to preserve debug information attached to the dead instruction. |
1696 | salvageDebugInfo(*DeadInst); |
1697 | salvageKnowledge(DeadInst); |
1698 | |
1699 | // Remove the Instruction from MSSA. |
1700 | if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { |
1701 | if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) { |
1702 | SkipStores.insert(MD); |
1703 | } |
1704 | |
1705 | Updater.removeMemoryAccess(MA); |
1706 | } |
1707 | |
1708 | auto I = IOLs.find(DeadInst->getParent()); |
1709 | if (I != IOLs.end()) |
1710 | I->second.erase(DeadInst); |
1711 | // Remove its operands |
1712 | for (Use &O : DeadInst->operands()) |
1713 | if (Instruction *OpI = dyn_cast<Instruction>(O)) { |
1714 | O = nullptr; |
1715 | if (isInstructionTriviallyDead(OpI, &TLI)) |
1716 | NowDeadInsts.push_back(OpI); |
1717 | } |
1718 | |
1719 | EI.removeInstruction(DeadInst); |
1720 | DeadInst->eraseFromParent(); |
1721 | } |
1722 | } |
1723 | |
1724 | // Check for any extra throws between \p KillingI and \p DeadI that block |
1725 | // DSE. This only checks extra maythrows (those that aren't MemoryDef's). |
1726 | // MemoryDef that may throw are handled during the walk from one def to the |
1727 | // next. |
1728 | bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI, |
1729 | const Value *KillingUndObj) { |
1730 | // First see if we can ignore it by using the fact that KillingI is an |
1731 | // alloca/alloca like object that is not visible to the caller during |
1732 | // execution of the function. |
1733 | if (KillingUndObj && isInvisibleToCallerBeforeRet(KillingUndObj)) |
1734 | return false; |
1735 | |
1736 | if (KillingI->getParent() == DeadI->getParent()) |
1737 | return ThrowingBlocks.count(KillingI->getParent()); |
1738 | return !ThrowingBlocks.empty(); |
1739 | } |
1740 | |
1741 | // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following |
1742 | // instructions act as barriers: |
1743 | // * A memory instruction that may throw and \p KillingI accesses a non-stack |
1744 | // object. |
1745 | // * Atomic stores stronger that monotonic. |
1746 | bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) { |
1747 | // If DeadI may throw it acts as a barrier, unless we are to an |
1748 | // alloca/alloca like object that does not escape. |
1749 | if (DeadI->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) |
1750 | return true; |
1751 | |
1752 | // If DeadI is an atomic load/store stronger than monotonic, do not try to |
1753 | // eliminate/reorder it. |
1754 | if (DeadI->isAtomic()) { |
1755 | if (auto *LI = dyn_cast<LoadInst>(DeadI)) |
1756 | return isStrongerThanMonotonic(LI->getOrdering()); |
1757 | if (auto *SI = dyn_cast<StoreInst>(DeadI)) |
1758 | return isStrongerThanMonotonic(SI->getOrdering()); |
1759 | if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI)) |
1760 | return isStrongerThanMonotonic(ARMW->getOrdering()); |
1761 | if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI)) |
1762 | return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) || |
1763 | isStrongerThanMonotonic(CmpXchg->getFailureOrdering()); |
1764 | llvm_unreachable("other instructions should be skipped in MemorySSA")::llvm::llvm_unreachable_internal("other instructions should be skipped in MemorySSA" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1764); |
1765 | } |
1766 | return false; |
1767 | } |
1768 | |
1769 | /// Eliminate writes to objects that are not visible in the caller and are not |
1770 | /// accessed before returning from the function. |
1771 | bool eliminateDeadWritesAtEndOfFunction() { |
1772 | bool MadeChange = false; |
1773 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n" ; } } while (false) |
1774 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n" ; } } while (false) |
1775 | << "Trying to eliminate MemoryDefs at the end of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n" ; } } while (false); |
1776 | for (int I = MemDefs.size() - 1; I >= 0; I--) { |
1777 | MemoryDef *Def = MemDefs[I]; |
1778 | if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst())) |
1779 | continue; |
1780 | |
1781 | Instruction *DefI = Def->getMemoryInst(); |
1782 | auto DefLoc = getLocForWriteEx(DefI); |
1783 | if (!DefLoc) |
1784 | continue; |
1785 | |
1786 | // NOTE: Currently eliminating writes at the end of a function is limited |
1787 | // to MemoryDefs with a single underlying object, to save compile-time. In |
1788 | // practice it appears the case with multiple underlying objects is very |
1789 | // uncommon. If it turns out to be important, we can use |
1790 | // getUnderlyingObjects here instead. |
1791 | const Value *UO = getUnderlyingObject(DefLoc->Ptr); |
1792 | if (!isInvisibleToCallerAfterRet(UO)) |
1793 | continue; |
1794 | |
1795 | if (isWriteAtEndOfFunction(Def)) { |
1796 | // See through pointer-to-pointer bitcasts |
1797 | LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... MemoryDef is not accessed until the end " "of the function\n"; } } while (false) |
1798 | "of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " ... MemoryDef is not accessed until the end " "of the function\n"; } } while (false); |
1799 | deleteDeadInstruction(DefI); |
1800 | ++NumFastStores; |
1801 | MadeChange = true; |
1802 | } |
1803 | } |
1804 | return MadeChange; |
1805 | } |
1806 | |
1807 | /// \returns true if \p Def is a no-op store, either because it |
1808 | /// directly stores back a loaded value or stores zero to a calloced object. |
1809 | bool storeIsNoop(MemoryDef *Def, const Value *DefUO) { |
1810 | StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst()); |
1811 | MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst()); |
1812 | Constant *StoredConstant = nullptr; |
1813 | if (Store) |
1814 | StoredConstant = dyn_cast<Constant>(Store->getOperand(0)); |
1815 | if (MemSet) |
1816 | StoredConstant = dyn_cast<Constant>(MemSet->getValue()); |
1817 | |
1818 | if (StoredConstant && StoredConstant->isNullValue()) { |
1819 | auto *DefUOInst = dyn_cast<Instruction>(DefUO); |
1820 | if (DefUOInst) { |
1821 | if (isCallocLikeFn(DefUOInst, &TLI)) { |
1822 | auto *UnderlyingDef = |
1823 | cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst)); |
1824 | // If UnderlyingDef is the clobbering access of Def, no instructions |
1825 | // between them can modify the memory location. |
1826 | auto *ClobberDef = |
1827 | MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def); |
1828 | return UnderlyingDef == ClobberDef; |
1829 | } |
1830 | |
1831 | if (MemSet) { |
1832 | if (F.hasFnAttribute(Attribute::SanitizeMemory) || |
1833 | F.hasFnAttribute(Attribute::SanitizeAddress) || |
1834 | F.hasFnAttribute(Attribute::SanitizeHWAddress) || |
1835 | F.getName() == "calloc") |
1836 | return false; |
1837 | auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUOInst)); |
1838 | if (!Malloc) |
1839 | return false; |
1840 | auto *InnerCallee = Malloc->getCalledFunction(); |
1841 | if (!InnerCallee) |
1842 | return false; |
1843 | LibFunc Func; |
1844 | if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) || |
1845 | Func != LibFunc_malloc) |
1846 | return false; |
1847 | |
1848 | auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) { |
1849 | // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end |
1850 | // of malloc block |
1851 | auto *MallocBB = Malloc->getParent(), |
1852 | *MemsetBB = Memset->getParent(); |
1853 | if (MallocBB == MemsetBB) |
1854 | return true; |
1855 | auto *Ptr = Memset->getArgOperand(0); |
1856 | auto *TI = MallocBB->getTerminator(); |
1857 | ICmpInst::Predicate Pred; |
1858 | BasicBlock *TrueBB, *FalseBB; |
1859 | if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB, |
1860 | FalseBB))) |
1861 | return false; |
1862 | if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB) |
1863 | return false; |
1864 | return true; |
1865 | }; |
1866 | |
1867 | if (Malloc->getOperand(0) == MemSet->getLength()) { |
1868 | if (shouldCreateCalloc(Malloc, MemSet) && |
1869 | DT.dominates(Malloc, MemSet) && |
1870 | memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) { |
1871 | IRBuilder<> IRB(Malloc); |
1872 | const auto &DL = Malloc->getModule()->getDataLayout(); |
1873 | if (auto *Calloc = |
1874 | emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1), |
1875 | Malloc->getArgOperand(0), IRB, TLI)) { |
1876 | MemorySSAUpdater Updater(&MSSA); |
1877 | auto *LastDef = cast<MemoryDef>( |
1878 | Updater.getMemorySSA()->getMemoryAccess(Malloc)); |
1879 | auto *NewAccess = Updater.createMemoryAccessAfter( |
1880 | cast<Instruction>(Calloc), LastDef, LastDef); |
1881 | auto *NewAccessMD = cast<MemoryDef>(NewAccess); |
1882 | Updater.insertDef(NewAccessMD, /*RenameUses=*/true); |
1883 | Updater.removeMemoryAccess(Malloc); |
1884 | Malloc->replaceAllUsesWith(Calloc); |
1885 | Malloc->eraseFromParent(); |
1886 | return true; |
1887 | } |
1888 | return false; |
1889 | } |
1890 | } |
1891 | } |
1892 | } |
1893 | } |
1894 | |
1895 | if (!Store) |
1896 | return false; |
1897 | |
1898 | if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { |
1899 | if (LoadI->getPointerOperand() == Store->getOperand(1)) { |
1900 | // Get the defining access for the load. |
1901 | auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); |
1902 | // Fast path: the defining accesses are the same. |
1903 | if (LoadAccess == Def->getDefiningAccess()) |
1904 | return true; |
1905 | |
1906 | // Look through phi accesses. Recursively scan all phi accesses by |
1907 | // adding them to a worklist. Bail when we run into a memory def that |
1908 | // does not match LoadAccess. |
1909 | SetVector<MemoryAccess *> ToCheck; |
1910 | MemoryAccess *Current = |
1911 | MSSA.getWalker()->getClobberingMemoryAccess(Def); |
1912 | // We don't want to bail when we run into the store memory def. But, |
1913 | // the phi access may point to it. So, pretend like we've already |
1914 | // checked it. |
1915 | ToCheck.insert(Def); |
1916 | ToCheck.insert(Current); |
1917 | // Start at current (1) to simulate already having checked Def. |
1918 | for (unsigned I = 1; I < ToCheck.size(); ++I) { |
1919 | Current = ToCheck[I]; |
1920 | if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) { |
1921 | // Check all the operands. |
1922 | for (auto &Use : PhiAccess->incoming_values()) |
1923 | ToCheck.insert(cast<MemoryAccess>(&Use)); |
1924 | continue; |
1925 | } |
1926 | |
1927 | // If we found a memory def, bail. This happens when we have an |
1928 | // unrelated write in between an otherwise noop store. |
1929 | assert(isa<MemoryDef>(Current) &&(static_cast <bool> (isa<MemoryDef>(Current) && "Only MemoryDefs should reach here.") ? void (0) : __assert_fail ("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1930, __extension__ __PRETTY_FUNCTION__)) |
1930 | "Only MemoryDefs should reach here.")(static_cast <bool> (isa<MemoryDef>(Current) && "Only MemoryDefs should reach here.") ? void (0) : __assert_fail ("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1930, __extension__ __PRETTY_FUNCTION__)); |
1931 | // TODO: Skip no alias MemoryDefs that have no aliasing reads. |
1932 | // We are searching for the definition of the store's destination. |
1933 | // So, if that is the same definition as the load, then this is a |
1934 | // noop. Otherwise, fail. |
1935 | if (LoadAccess != Current) |
1936 | return false; |
1937 | } |
1938 | return true; |
1939 | } |
1940 | } |
1941 | |
1942 | return false; |
1943 | } |
1944 | }; |
1945 | |
1946 | static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, |
1947 | DominatorTree &DT, PostDominatorTree &PDT, |
1948 | const TargetLibraryInfo &TLI, |
1949 | const LoopInfo &LI) { |
1950 | bool MadeChange = false; |
1951 | |
1952 | DSEState State(F, AA, MSSA, DT, PDT, TLI, LI); |
1953 | // For each store: |
1954 | for (unsigned I = 0; I < State.MemDefs.size(); I++) { |
1955 | MemoryDef *KillingDef = State.MemDefs[I]; |
1956 | if (State.SkipStores.count(KillingDef)) |
1957 | continue; |
1958 | Instruction *KillingI = KillingDef->getMemoryInst(); |
1959 | |
1960 | Optional<MemoryLocation> MaybeKillingLoc; |
1961 | if (State.isMemTerminatorInst(KillingI)) |
1962 | MaybeKillingLoc = State.getLocForTerminator(KillingI).map( |
1963 | [](const std::pair<MemoryLocation, bool> &P) { return P.first; }); |
1964 | else |
1965 | MaybeKillingLoc = State.getLocForWriteEx(KillingI); |
1966 | |
1967 | if (!MaybeKillingLoc) { |
1968 | LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Failed to find analyzable write location for " << *KillingI << "\n"; } } while (false) |
1969 | << *KillingI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Failed to find analyzable write location for " << *KillingI << "\n"; } } while (false); |
1970 | continue; |
1971 | } |
1972 | MemoryLocation KillingLoc = *MaybeKillingLoc; |
1973 | assert(KillingLoc.Ptr && "KillingLoc should not be null")(static_cast <bool> (KillingLoc.Ptr && "KillingLoc should not be null" ) ? void (0) : __assert_fail ("KillingLoc.Ptr && \"KillingLoc should not be null\"" , "/build/llvm-toolchain-snapshot-14~++20211014061306+0fbd3aad75f9/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp" , 1973, __extension__ __PRETTY_FUNCTION__)); |
1974 | const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr); |
1975 | |
1976 | MemoryAccess *Current = KillingDef; |
Value stored to 'Current' during its initialization is never read | |
1977 | LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by " << *KillingDef << " (" << *KillingI << ")\n"; } } while (false) |
1978 | << *KillingDef << " (" << *KillingI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by " << *KillingDef << " (" << *KillingI << ")\n"; } } while (false); |
1979 | |
1980 | unsigned ScanLimit = MemorySSAScanLimit; |
1981 | unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit; |
1982 | unsigned PartialLimit = MemorySSAPartialStoreLimit; |
1983 | // Worklist of MemoryAccesses that may be killed by KillingDef. |
1984 | SetVector<MemoryAccess *> ToCheck; |
1985 | ToCheck.insert(KillingDef->getDefiningAccess()); |
1986 | |
1987 | bool Shortend = false; |
1988 | bool IsMemTerm = State.isMemTerminatorInst(KillingI); |
1989 | // Check if MemoryAccesses in the worklist are killed by KillingDef. |
1990 | for (unsigned I = 0; I < ToCheck.size(); I++) { |
1991 | Current = ToCheck[I]; |
1992 | if (State.SkipStores.count(Current)) |
1993 | continue; |
1994 | |
1995 | Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef( |
1996 | KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit, |
1997 | WalkerStepLimit, IsMemTerm, PartialLimit); |
1998 | |
1999 | if (!MaybeDeadAccess) { |
2000 | LLVM_DEBUG(dbgs() << " finished walk\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " finished walk\n"; } } while (false ); |
2001 | continue; |
2002 | } |
2003 | |
2004 | MemoryAccess *DeadAccess = *MaybeDeadAccess; |
2005 | LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " Checking if we can kill " << *DeadAccess; } } while (false); |
2006 | if (isa<MemoryPhi>(DeadAccess)) { |
2007 | LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "\n ... adding incoming values to worklist\n" ; } } while (false); |
2008 | for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) { |
2009 | MemoryAccess *IncomingAccess = cast<MemoryAccess>(V); |
2010 | BasicBlock *IncomingBlock = IncomingAccess->getBlock(); |
2011 | BasicBlock *PhiBlock = DeadAccess->getBlock(); |
2012 | |
2013 | // We only consider incoming MemoryAccesses that come before the |
2014 | // MemoryPhi. Otherwise we could discover candidates that do not |
2015 | // strictly dominate our starting def. |
2016 | if (State.PostOrderNumbers[IncomingBlock] > |
2017 | State.PostOrderNumbers[PhiBlock]) |
2018 | ToCheck.insert(IncomingAccess); |
2019 | } |
2020 | continue; |
2021 | } |
2022 | auto *DeadDefAccess = cast<MemoryDef>(DeadAccess); |
2023 | Instruction *DeadI = DeadDefAccess->getMemoryInst(); |
2024 | LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << " (" << *DeadI << ")\n" ; } } while (false); |
2025 | ToCheck.insert(DeadDefAccess->getDefiningAccess()); |
2026 | NumGetDomMemoryDefPassed++; |
2027 | |
2028 | if (!DebugCounter::shouldExecute(MemorySSACounter)) |
2029 | continue; |
2030 | |
2031 | MemoryLocation DeadLoc = *State.getLocForWriteEx(DeadI); |
2032 | |
2033 | if (IsMemTerm) { |
2034 | const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr); |
2035 | if (KillingUndObj != DeadUndObj) |
2036 | continue; |
2037 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI << "\n KILLER: " << *KillingI << '\n'; } } while (false) |
2038 | << "\n KILLER: " << *KillingI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI << "\n KILLER: " << *KillingI << '\n'; } } while (false); |
2039 | State.deleteDeadInstruction(DeadI); |
2040 | ++NumFastStores; |
2041 | MadeChange = true; |
2042 | } else { |
2043 | // Check if DeadI overwrites KillingI. |
2044 | int64_t KillingOffset = 0; |
2045 | int64_t DeadOffset = 0; |
2046 | OverwriteResult OR = State.isOverwrite( |
2047 | KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset); |
2048 | if (OR == OW_MaybePartial) { |
2049 | auto Iter = State.IOLs.insert( |
2050 | std::make_pair<BasicBlock *, InstOverlapIntervalsTy>( |
2051 | DeadI->getParent(), InstOverlapIntervalsTy())); |
2052 | auto &IOL = Iter.first->second; |
2053 | OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset, |
2054 | DeadOffset, DeadI, IOL); |
2055 | } |
2056 | |
2057 | if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) { |
2058 | auto *DeadSI = dyn_cast<StoreInst>(DeadI); |
2059 | auto *KillingSI = dyn_cast<StoreInst>(KillingI); |
2060 | // We are re-using tryToMergePartialOverlappingStores, which requires |
2061 | // DeadSI to dominate DeadSI. |
2062 | // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. |
2063 | if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) { |
2064 | if (Constant *Merged = tryToMergePartialOverlappingStores( |
2065 | KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL, |
2066 | State.BatchAA, &DT)) { |
2067 | |
2068 | // Update stored value of earlier store to merged constant. |
2069 | DeadSI->setOperand(0, Merged); |
2070 | ++NumModifiedStores; |
2071 | MadeChange = true; |
2072 | |
2073 | Shortend = true; |
2074 | // Remove killing store and remove any outstanding overlap |
2075 | // intervals for the updated store. |
2076 | State.deleteDeadInstruction(KillingSI); |
2077 | auto I = State.IOLs.find(DeadSI->getParent()); |
2078 | if (I != State.IOLs.end()) |
2079 | I->second.erase(DeadSI); |
2080 | break; |
2081 | } |
2082 | } |
2083 | } |
2084 | |
2085 | if (OR == OW_Complete) { |
2086 | LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI << "\n KILLER: " << *KillingI << '\n'; } } while (false) |
2087 | << "\n KILLER: " << *KillingI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI << "\n KILLER: " << *KillingI << '\n'; } } while (false); |
2088 | State.deleteDeadInstruction(DeadI); |
2089 | ++NumFastStores; |
2090 | MadeChange = true; |
2091 | } |
2092 | } |
2093 | } |
2094 | |
2095 | // Check if the store is a no-op. |
2096 | if (!Shortend && isRemovable(KillingI) && |
2097 | State.storeIsNoop(KillingDef, KillingUndObj)) { |
2098 | LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI << '\n'; } } while (false) |
2099 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("dse")) { dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI << '\n'; } } while (false); |
2100 | State.deleteDeadInstruction(KillingI); |
2101 | NumRedundantStores++; |
2102 | MadeChange = true; |
2103 | continue; |
2104 | } |
2105 | } |
2106 | |
2107 | if (EnablePartialOverwriteTracking) |
2108 | for (auto &KV : State.IOLs) |
2109 | MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI); |
2110 | |
2111 | MadeChange |= State.eliminateDeadWritesAtEndOfFunction(); |
2112 | return MadeChange; |
2113 | } |
2114 | } // end anonymous namespace |
2115 | |
2116 | //===----------------------------------------------------------------------===// |
2117 | // DSE Pass |
2118 | //===----------------------------------------------------------------------===// |
2119 | PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { |
2120 | AliasAnalysis &AA = AM.getResult<AAManager>(F); |
2121 | const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F); |
2122 | DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); |
2123 | MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); |
2124 | PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F); |
2125 | LoopInfo &LI = AM.getResult<LoopAnalysis>(F); |
2126 | |
2127 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); |
2128 | |
2129 | #ifdef LLVM_ENABLE_STATS1 |
2130 | if (AreStatisticsEnabled()) |
2131 | for (auto &I : instructions(F)) |
2132 | NumRemainingStores += isa<StoreInst>(&I); |
2133 | #endif |
2134 | |
2135 | if (!Changed) |
2136 | return PreservedAnalyses::all(); |
2137 | |
2138 | PreservedAnalyses PA; |
2139 | PA.preserveSet<CFGAnalyses>(); |
2140 | PA.preserve<MemorySSAAnalysis>(); |
2141 | PA.preserve<LoopAnalysis>(); |
2142 | return PA; |
2143 | } |
2144 | |
2145 | namespace { |
2146 | |
2147 | /// A legacy pass for the legacy pass manager that wraps \c DSEPass. |
2148 | class DSELegacyPass : public FunctionPass { |
2149 | public: |
2150 | static char ID; // Pass identification, replacement for typeid |
2151 | |
2152 | DSELegacyPass() : FunctionPass(ID) { |
2153 | initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); |
2154 | } |
2155 | |
2156 | bool runOnFunction(Function &F) override { |
2157 | if (skipFunction(F)) |
2158 | return false; |
2159 | |
2160 | AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
2161 | DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
2162 | const TargetLibraryInfo &TLI = |
2163 | getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); |
2164 | MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); |
2165 | PostDominatorTree &PDT = |
2166 | getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); |
2167 | LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); |
2168 | |
2169 | bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI); |
2170 | |
2171 | #ifdef LLVM_ENABLE_STATS1 |
2172 | if (AreStatisticsEnabled()) |
2173 | for (auto &I : instructions(F)) |
2174 | NumRemainingStores += isa<StoreInst>(&I); |
2175 | #endif |
2176 | |
2177 | return Changed; |
2178 | } |
2179 | |
2180 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
2181 | AU.setPreservesCFG(); |
2182 | AU.addRequired<AAResultsWrapperPass>(); |
2183 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
2184 | AU.addPreserved<GlobalsAAWrapperPass>(); |
2185 | AU.addRequired<DominatorTreeWrapperPass>(); |
2186 | AU.addPreserved<DominatorTreeWrapperPass>(); |
2187 | AU.addRequired<PostDominatorTreeWrapperPass>(); |
2188 | AU.addRequired<MemorySSAWrapperPass>(); |
2189 | AU.addPreserved<PostDominatorTreeWrapperPass>(); |
2190 | AU.addPreserved<MemorySSAWrapperPass>(); |
2191 | AU.addRequired<LoopInfoWrapperPass>(); |
2192 | AU.addPreserved<LoopInfoWrapperPass>(); |
2193 | } |
2194 | }; |
2195 | |
2196 | } // end anonymous namespace |
2197 | |
2198 | char DSELegacyPass::ID = 0; |
2199 | |
2200 | INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { |
2201 | false)static void *initializeDSELegacyPassPassOnce(PassRegistry & Registry) { |
2202 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); |
2203 | INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry); |
2204 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); |
2205 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); |
2206 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); |
2207 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); |
2208 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); |
2209 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); |
2210 | INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } |
2211 | false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse", &DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <DSELegacyPass>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag ; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce , std::ref(Registry)); } |
2212 | |
2213 | FunctionPass *llvm::createDeadStoreEliminationPass() { |
2214 | return new DSELegacyPass(); |
2215 | } |