Bug Summary

File:llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Warning:line 745, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DeadStoreElimination.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

1//===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The code below implements dead store elimination using MemorySSA. It uses
10// the following general approach: given a MemoryDef, walk upwards to find
11// clobbering MemoryDefs that may be killed by the starting def. Then check
12// that there are no uses that may read the location of the original MemoryDef
13// in between both MemoryDefs. A bit more concretely:
14//
15// For all MemoryDefs StartDef:
16// 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
17// upwards.
18// 2. Check that there are no reads between EarlierAccess and the StartDef by
19// checking all uses starting at EarlierAccess and walking until we see
20// StartDef.
21// 3. For each found CurrentDef, check that:
22// 1. There are no barrier instructions between CurrentDef and StartDef (like
23// throws or stores with ordering constraints).
24// 2. StartDef is executed whenever CurrentDef is executed.
25// 3. StartDef completely overwrites CurrentDef.
26// 4. Erase CurrentDef from the function and MemorySSA.
27//
28//===----------------------------------------------------------------------===//
29
30#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/DenseMap.h"
33#include "llvm/ADT/MapVector.h"
34#include "llvm/ADT/PostOrderIterator.h"
35#include "llvm/ADT/SetVector.h"
36#include "llvm/ADT/SmallPtrSet.h"
37#include "llvm/ADT/SmallVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
40#include "llvm/Analysis/AliasAnalysis.h"
41#include "llvm/Analysis/CaptureTracking.h"
42#include "llvm/Analysis/GlobalsModRef.h"
43#include "llvm/Analysis/LoopInfo.h"
44#include "llvm/Analysis/MemoryBuiltins.h"
45#include "llvm/Analysis/MemoryLocation.h"
46#include "llvm/Analysis/MemorySSA.h"
47#include "llvm/Analysis/MemorySSAUpdater.h"
48#include "llvm/Analysis/MustExecute.h"
49#include "llvm/Analysis/PostDominators.h"
50#include "llvm/Analysis/TargetLibraryInfo.h"
51#include "llvm/Analysis/ValueTracking.h"
52#include "llvm/IR/Argument.h"
53#include "llvm/IR/BasicBlock.h"
54#include "llvm/IR/Constant.h"
55#include "llvm/IR/Constants.h"
56#include "llvm/IR/DataLayout.h"
57#include "llvm/IR/Dominators.h"
58#include "llvm/IR/Function.h"
59#include "llvm/IR/IRBuilder.h"
60#include "llvm/IR/InstIterator.h"
61#include "llvm/IR/InstrTypes.h"
62#include "llvm/IR/Instruction.h"
63#include "llvm/IR/Instructions.h"
64#include "llvm/IR/IntrinsicInst.h"
65#include "llvm/IR/Intrinsics.h"
66#include "llvm/IR/LLVMContext.h"
67#include "llvm/IR/Module.h"
68#include "llvm/IR/PassManager.h"
69#include "llvm/IR/PatternMatch.h"
70#include "llvm/IR/Value.h"
71#include "llvm/InitializePasses.h"
72#include "llvm/Pass.h"
73#include "llvm/Support/Casting.h"
74#include "llvm/Support/CommandLine.h"
75#include "llvm/Support/Debug.h"
76#include "llvm/Support/DebugCounter.h"
77#include "llvm/Support/ErrorHandling.h"
78#include "llvm/Support/MathExtras.h"
79#include "llvm/Support/raw_ostream.h"
80#include "llvm/Transforms/Scalar.h"
81#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82#include "llvm/Transforms/Utils/BuildLibCalls.h"
83#include "llvm/Transforms/Utils/Local.h"
84#include <algorithm>
85#include <cassert>
86#include <cstddef>
87#include <cstdint>
88#include <iterator>
89#include <map>
90#include <utility>
91
92using namespace llvm;
93using namespace PatternMatch;
94
95#define DEBUG_TYPE"dse" "dse"
96
97STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores"
, "Number of stores remaining after DSE"}
;
98STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores"
, "Number of redundant stores deleted"}
;
99STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores"
, "Number of stores deleted"}
;
100STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther",
"Number of other instrs removed"}
;
101STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials"
, "Number of stores dead by later partials"}
;
102STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores"
, "Number of stores modified"}
;
103STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks",
"Number of stores modified"}
;
104STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified"
}
;
105STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess"
, "Number of stores modified"}
;
106STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
107 "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
;
108STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
109 "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
;
110
111DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
112 "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
;
113
114static cl::opt<bool>
115EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
116 cl::init(true), cl::Hidden,
117 cl::desc("Enable partial-overwrite tracking in DSE"));
118
119static cl::opt<bool>
120EnablePartialStoreMerging("enable-dse-partial-store-merging",
121 cl::init(true), cl::Hidden,
122 cl::desc("Enable partial store merging in DSE"));
123
124static cl::opt<unsigned>
125 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
126 cl::desc("The number of memory instructions to scan for "
127 "dead store elimination (default = 150)"));
128static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
129 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
130 cl::desc("The maximum number of steps while walking upwards to find "
131 "MemoryDefs that may be killed (default = 90)"));
132
133static cl::opt<unsigned> MemorySSAPartialStoreLimit(
134 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
135 cl::desc("The maximum number candidates that only partially overwrite the "
136 "killing MemoryDef to consider"
137 " (default = 5)"));
138
139static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
140 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
141 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
142 "other stores per basic block (default = 5000)"));
143
144static cl::opt<unsigned> MemorySSASameBBStepCost(
145 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
146 cl::desc(
147 "The cost of a step in the same basic block as the killing MemoryDef"
148 "(default = 1)"));
149
150static cl::opt<unsigned>
151 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
152 cl::Hidden,
153 cl::desc("The cost of a step in a different basic "
154 "block than the killing MemoryDef"
155 "(default = 5)"));
156
157static cl::opt<unsigned> MemorySSAPathCheckLimit(
158 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
159 cl::desc("The maximum number of blocks to check when trying to prove that "
160 "all paths to an exit go through a killing block (default = 50)"));
161
162//===----------------------------------------------------------------------===//
163// Helper functions
164//===----------------------------------------------------------------------===//
165using OverlapIntervalsTy = std::map<int64_t, int64_t>;
166using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
167
168/// Does this instruction write some memory? This only returns true for things
169/// that we can analyze with other helpers below.
170static bool hasAnalyzableMemoryWrite(Instruction *I,
171 const TargetLibraryInfo &TLI) {
172 if (isa<StoreInst>(I))
173 return true;
174 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
175 switch (II->getIntrinsicID()) {
176 default:
177 return false;
178 case Intrinsic::memset:
179 case Intrinsic::memmove:
180 case Intrinsic::memcpy:
181 case Intrinsic::memcpy_inline:
182 case Intrinsic::memcpy_element_unordered_atomic:
183 case Intrinsic::memmove_element_unordered_atomic:
184 case Intrinsic::memset_element_unordered_atomic:
185 case Intrinsic::init_trampoline:
186 case Intrinsic::lifetime_end:
187 case Intrinsic::masked_store:
188 return true;
189 }
190 }
191 if (auto *CB = dyn_cast<CallBase>(I)) {
192 LibFunc LF;
193 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
194 switch (LF) {
195 case LibFunc_strcpy:
196 case LibFunc_strncpy:
197 case LibFunc_strcat:
198 case LibFunc_strncat:
199 return true;
200 default:
201 return false;
202 }
203 }
204 }
205 return false;
206}
207
208/// Return a Location stored to by the specified instruction. If isRemovable
209/// returns true, this function and getLocForRead completely describe the memory
210/// operations for this instruction.
211static MemoryLocation getLocForWrite(Instruction *Inst,
212 const TargetLibraryInfo &TLI) {
213 if (StoreInst *SI
7.1
'SI' is null
7.1
'SI' is null
= dyn_cast<StoreInst>(Inst))
7
Assuming 'Inst' is not a 'StoreInst'
8
Taking false branch
214 return MemoryLocation::get(SI);
215
216 // memcpy/memmove/memset.
217 if (auto *MI
9.1
'MI' is null
9.1
'MI' is null
= dyn_cast<AnyMemIntrinsic>(Inst))
9
Assuming 'Inst' is not a 'AnyMemIntrinsic'
10
Taking false branch
218 return MemoryLocation::getForDest(MI);
219
220 if (IntrinsicInst *II
11.1
'II' is non-null
11.1
'II' is non-null
= dyn_cast<IntrinsicInst>(Inst)) {
11
Assuming 'Inst' is a 'IntrinsicInst'
12
Taking true branch
221 switch (II->getIntrinsicID()) {
13
Control jumps to the 'default' case at line 222
222 default:
223 return MemoryLocation(); // Unhandled intrinsic.
14
Calling default constructor for 'MemoryLocation'
16
Returning from default constructor for 'MemoryLocation'
224 case Intrinsic::init_trampoline:
225 return MemoryLocation::getAfter(II->getArgOperand(0));
226 case Intrinsic::masked_store:
227 return MemoryLocation::getForArgument(II, 1, TLI);
228 case Intrinsic::lifetime_end: {
229 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
230 return MemoryLocation(II->getArgOperand(1), Len);
231 }
232 }
233 }
234 if (auto *CB = dyn_cast<CallBase>(Inst))
235 // All the supported TLI functions so far happen to have dest as their
236 // first argument.
237 return MemoryLocation::getAfter(CB->getArgOperand(0));
238 return MemoryLocation();
239}
240
241/// If the value of this instruction and the memory it writes to is unused, may
242/// we delete this instruction?
243static bool isRemovable(Instruction *I) {
244 // Don't remove volatile/atomic stores.
245 if (StoreInst *SI = dyn_cast<StoreInst>(I))
246 return SI->isUnordered();
247
248 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
249 switch (II->getIntrinsicID()) {
250 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")::llvm::llvm_unreachable_internal("doesn't pass 'hasAnalyzableMemoryWrite' predicate"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 250)
;
251 case Intrinsic::lifetime_end:
252 // Never remove dead lifetime_end's, e.g. because it is followed by a
253 // free.
254 return false;
255 case Intrinsic::init_trampoline:
256 // Always safe to remove init_trampoline.
257 return true;
258 case Intrinsic::memset:
259 case Intrinsic::memmove:
260 case Intrinsic::memcpy:
261 case Intrinsic::memcpy_inline:
262 // Don't remove volatile memory intrinsics.
263 return !cast<MemIntrinsic>(II)->isVolatile();
264 case Intrinsic::memcpy_element_unordered_atomic:
265 case Intrinsic::memmove_element_unordered_atomic:
266 case Intrinsic::memset_element_unordered_atomic:
267 case Intrinsic::masked_store:
268 return true;
269 }
270 }
271
272 // note: only get here for calls with analyzable writes - i.e. libcalls
273 if (auto *CB = dyn_cast<CallBase>(I))
274 return CB->use_empty();
275
276 return false;
277}
278
279/// Returns true if the end of this instruction can be safely shortened in
280/// length.
281static bool isShortenableAtTheEnd(Instruction *I) {
282 // Don't shorten stores for now
283 if (isa<StoreInst>(I))
284 return false;
285
286 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
287 switch (II->getIntrinsicID()) {
288 default: return false;
289 case Intrinsic::memset:
290 case Intrinsic::memcpy:
291 case Intrinsic::memcpy_element_unordered_atomic:
292 case Intrinsic::memset_element_unordered_atomic:
293 // Do shorten memory intrinsics.
294 // FIXME: Add memmove if it's also safe to transform.
295 return true;
296 }
297 }
298
299 // Don't shorten libcalls calls for now.
300
301 return false;
302}
303
304/// Returns true if the beginning of this instruction can be safely shortened
305/// in length.
306static bool isShortenableAtTheBeginning(Instruction *I) {
307 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
308 // easily done by offsetting the source address.
309 return isa<AnyMemSetInst>(I);
310}
311
312static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
313 const TargetLibraryInfo &TLI,
314 const Function *F) {
315 uint64_t Size;
316 ObjectSizeOpts Opts;
317 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
318
319 if (getObjectSize(V, Size, DL, &TLI, Opts))
320 return Size;
321 return MemoryLocation::UnknownSize;
322}
323
324namespace {
325
326enum OverwriteResult {
327 OW_Begin,
328 OW_Complete,
329 OW_End,
330 OW_PartialEarlierWithFullLater,
331 OW_MaybePartial,
332 OW_Unknown
333};
334
335} // end anonymous namespace
336
337/// Check if two instruction are masked stores that completely
338/// overwrite one another. More specifically, \p Later has to
339/// overwrite \p Earlier.
340static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
341 const Instruction *Earlier,
342 BatchAAResults &AA) {
343 const auto *IIL = dyn_cast<IntrinsicInst>(Later);
344 const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
345 if (IIL == nullptr || IIE == nullptr)
346 return OW_Unknown;
347 if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
348 IIE->getIntrinsicID() != Intrinsic::masked_store)
349 return OW_Unknown;
350 // Pointers.
351 Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
352 Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
353 if (LP != EP && !AA.isMustAlias(LP, EP))
354 return OW_Unknown;
355 // Masks.
356 // TODO: check that Later's mask is a superset of the Earlier's mask.
357 if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
358 return OW_Unknown;
359 return OW_Complete;
360}
361
362/// Return 'OW_Complete' if a store to the 'Later' location completely
363/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
364/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
365/// beginning of the 'Earlier' location is overwritten by 'Later'.
366/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
367/// overwritten by a latter (smaller) store which doesn't write outside the big
368/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
369/// NOTE: This function must only be called if both \p Later and \p Earlier
370/// write to the same underlying object with valid \p EarlierOff and \p
371/// LaterOff.
372static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
373 const MemoryLocation &Earlier,
374 int64_t EarlierOff, int64_t LaterOff,
375 Instruction *DepWrite,
376 InstOverlapIntervalsTy &IOL) {
377 const uint64_t LaterSize = Later.Size.getValue();
378 const uint64_t EarlierSize = Earlier.Size.getValue();
379 // We may now overlap, although the overlap is not complete. There might also
380 // be other incomplete overlaps, and together, they might cover the complete
381 // earlier write.
382 // Note: The correctness of this logic depends on the fact that this function
383 // is not even called providing DepWrite when there are any intervening reads.
384 if (EnablePartialOverwriteTracking &&
385 LaterOff < int64_t(EarlierOff + EarlierSize) &&
386 int64_t(LaterOff + LaterSize) >= EarlierOff) {
387
388 // Insert our part of the overlap into the map.
389 auto &IM = IOL[DepWrite];
390 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOffdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
391 << ", " << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
392 << ") Later [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
393 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
;
394
395 // Make sure that we only insert non-overlapping intervals and combine
396 // adjacent intervals. The intervals are stored in the map with the ending
397 // offset as the key (in the half-open sense) and the starting offset as
398 // the value.
399 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
400
401 // Find any intervals ending at, or after, LaterIntStart which start
402 // before LaterIntEnd.
403 auto ILI = IM.lower_bound(LaterIntStart);
404 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
405 // This existing interval is overlapped with the current store somewhere
406 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
407 // intervals and adjusting our start and end.
408 LaterIntStart = std::min(LaterIntStart, ILI->second);
409 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
410 ILI = IM.erase(ILI);
411
412 // Continue erasing and adjusting our end in case other previous
413 // intervals are also overlapped with the current store.
414 //
415 // |--- ealier 1 ---| |--- ealier 2 ---|
416 // |------- later---------|
417 //
418 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
419 assert(ILI->second > LaterIntStart && "Unexpected interval")(static_cast <bool> (ILI->second > LaterIntStart &&
"Unexpected interval") ? void (0) : __assert_fail ("ILI->second > LaterIntStart && \"Unexpected interval\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 419, __extension__ __PRETTY_FUNCTION__))
;
420 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
421 ILI = IM.erase(ILI);
422 }
423 }
424
425 IM[LaterIntEnd] = LaterIntStart;
426
427 ILI = IM.begin();
428 if (ILI->second <= EarlierOff &&
429 ILI->first >= int64_t(EarlierOff + EarlierSize)) {
430 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
431 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
432 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
433 << ") Composite Later [" << ILI->second << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
434 << ILI->first << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
;
435 ++NumCompletePartials;
436 return OW_Complete;
437 }
438 }
439
440 // Check for an earlier store which writes to all the memory locations that
441 // the later store writes to.
442 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
443 int64_t(EarlierOff + EarlierSize) > LaterOff &&
444 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
445 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
446 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
447 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
448 << ") by a later store [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
449 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
;
450 // TODO: Maybe come up with a better name?
451 return OW_PartialEarlierWithFullLater;
452 }
453
454 // Another interesting case is if the later store overwrites the end of the
455 // earlier store.
456 //
457 // |--earlier--|
458 // |-- later --|
459 //
460 // In this case we may want to trim the size of earlier to avoid generating
461 // writes to addresses which will definitely be overwritten later
462 if (!EnablePartialOverwriteTracking &&
463 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
464 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
465 return OW_End;
466
467 // Finally, we also need to check if the later store overwrites the beginning
468 // of the earlier store.
469 //
470 // |--earlier--|
471 // |-- later --|
472 //
473 // In this case we may want to move the destination address and trim the size
474 // of earlier to avoid generating writes to addresses which will definitely
475 // be overwritten later.
476 if (!EnablePartialOverwriteTracking &&
477 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
478 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 479, __extension__ __PRETTY_FUNCTION__))
479 "Expect to be handled as OW_Complete")(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 479, __extension__ __PRETTY_FUNCTION__))
;
480 return OW_Begin;
481 }
482 // Otherwise, they don't completely overlap.
483 return OW_Unknown;
484}
485
486/// Returns true if the memory which is accessed by the second instruction is not
487/// modified between the first and the second instruction.
488/// Precondition: Second instruction must be dominated by the first
489/// instruction.
490static bool
491memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
492 BatchAAResults &AA, const DataLayout &DL,
493 DominatorTree *DT) {
494 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
495 // instructions which can modify the memory location accessed by SecondI.
496 //
497 // While doing the walk keep track of the address to check. It might be
498 // different in different basic blocks due to PHI translation.
499 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
500 SmallVector<BlockAddressPair, 16> WorkList;
501 // Keep track of the address we visited each block with. Bail out if we
502 // visit a block with different addresses.
503 DenseMap<BasicBlock *, Value *> Visited;
504
505 BasicBlock::iterator FirstBBI(FirstI);
506 ++FirstBBI;
507 BasicBlock::iterator SecondBBI(SecondI);
508 BasicBlock *FirstBB = FirstI->getParent();
509 BasicBlock *SecondBB = SecondI->getParent();
510 MemoryLocation MemLoc;
511 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI))
512 MemLoc = MemoryLocation::getForDest(MemSet);
513 else
514 MemLoc = MemoryLocation::get(SecondI);
515
516 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
517
518 // Start checking the SecondBB.
519 WorkList.push_back(
520 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
521 bool isFirstBlock = true;
522
523 // Check all blocks going backward until we reach the FirstBB.
524 while (!WorkList.empty()) {
525 BlockAddressPair Current = WorkList.pop_back_val();
526 BasicBlock *B = Current.first;
527 PHITransAddr &Addr = Current.second;
528 Value *Ptr = Addr.getAddr();
529
530 // Ignore instructions before FirstI if this is the FirstBB.
531 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
532
533 BasicBlock::iterator EI;
534 if (isFirstBlock) {
535 // Ignore instructions after SecondI if this is the first visit of SecondBB.
536 assert(B == SecondBB && "first block is not the store block")(static_cast <bool> (B == SecondBB && "first block is not the store block"
) ? void (0) : __assert_fail ("B == SecondBB && \"first block is not the store block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 536, __extension__ __PRETTY_FUNCTION__))
;
537 EI = SecondBBI;
538 isFirstBlock = false;
539 } else {
540 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
541 // In this case we also have to look at instructions after SecondI.
542 EI = B->end();
543 }
544 for (; BI != EI; ++BI) {
545 Instruction *I = &*BI;
546 if (I->mayWriteToMemory() && I != SecondI)
547 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
548 return false;
549 }
550 if (B != FirstBB) {
551 assert(B != &FirstBB->getParent()->getEntryBlock() &&(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 552, __extension__ __PRETTY_FUNCTION__))
552 "Should not hit the entry block because SI must be dominated by LI")(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 552, __extension__ __PRETTY_FUNCTION__))
;
553 for (BasicBlock *Pred : predecessors(B)) {
554 PHITransAddr PredAddr = Addr;
555 if (PredAddr.NeedsPHITranslationFromBlock(B)) {
556 if (!PredAddr.IsPotentiallyPHITranslatable())
557 return false;
558 if (PredAddr.PHITranslateValue(B, Pred, DT, false))
559 return false;
560 }
561 Value *TranslatedPtr = PredAddr.getAddr();
562 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
563 if (!Inserted.second) {
564 // We already visited this block before. If it was with a different
565 // address - bail out!
566 if (TranslatedPtr != Inserted.first->second)
567 return false;
568 // ... otherwise just skip it.
569 continue;
570 }
571 WorkList.push_back(std::make_pair(Pred, PredAddr));
572 }
573 }
574 }
575 return true;
576}
577
578static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
579 uint64_t &EarlierSize, int64_t LaterStart,
580 uint64_t LaterSize, bool IsOverwriteEnd) {
581 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
582 Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
583
584 // We assume that memet/memcpy operates in chunks of the "largest" native
585 // type size and aligned on the same value. That means optimal start and size
586 // of memset/memcpy should be modulo of preferred alignment of that type. That
587 // is it there is no any sense in trying to reduce store size any further
588 // since any "extra" stores comes for free anyway.
589 // On the other hand, maximum alignment we can achieve is limited by alignment
590 // of initial store.
591
592 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
593 // "largest" native type.
594 // Note: What is the proper way to get that value?
595 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
596 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
597
598 int64_t ToRemoveStart = 0;
599 uint64_t ToRemoveSize = 0;
600 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
601 // maintained on the remaining store.
602 if (IsOverwriteEnd) {
603 // Calculate required adjustment for 'LaterStart'in order to keep remaining
604 // store size aligned on 'PerfAlign'.
605 uint64_t Off =
606 offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
607 ToRemoveStart = LaterStart + Off;
608 if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
609 return false;
610 ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
611 } else {
612 ToRemoveStart = EarlierStart;
613 assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 614, __extension__ __PRETTY_FUNCTION__))
614 "Not overlapping accesses?")(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 614, __extension__ __PRETTY_FUNCTION__))
;
615 ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
616 // Calculate required adjustment for 'ToRemoveSize'in order to keep
617 // start of the remaining store aligned on 'PerfAlign'.
618 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
619 if (Off != 0) {
620 if (ToRemoveSize <= (PrefAlign.value() - Off))
621 return false;
622 ToRemoveSize -= PrefAlign.value() - Off;
623 }
624 assert(isAligned(PrefAlign, ToRemoveSize) &&(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 625, __extension__ __PRETTY_FUNCTION__))
625 "Should preserve selected alignment")(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 }
627
628 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")(static_cast <bool> (ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"
) ? void (0) : __assert_fail ("ToRemoveSize > 0 && \"Shouldn't reach here if nothing to remove\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 628, __extension__ __PRETTY_FUNCTION__))
;
629 assert(EarlierSize > ToRemoveSize && "Can't remove more than original size")(static_cast <bool> (EarlierSize > ToRemoveSize &&
"Can't remove more than original size") ? void (0) : __assert_fail
("EarlierSize > ToRemoveSize && \"Can't remove more than original size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 629, __extension__ __PRETTY_FUNCTION__))
;
630
631 uint64_t NewSize = EarlierSize - ToRemoveSize;
632 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
633 // When shortening an atomic memory intrinsic, the newly shortened
634 // length must remain an integer multiple of the element size.
635 const uint32_t ElementSize = AMI->getElementSizeInBytes();
636 if (0 != NewSize % ElementSize)
637 return false;
638 }
639
640 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
641 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
642 << *EarlierWrite << "\n KILLER [" << ToRemoveStart << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
643 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
;
644
645 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
646 Value *TrimmedLength =
647 ConstantInt::get(EarlierWriteLength->getType(), NewSize);
648 EarlierIntrinsic->setLength(TrimmedLength);
649 EarlierIntrinsic->setDestAlignment(PrefAlign);
650
651 if (!IsOverwriteEnd) {
652 Value *OrigDest = EarlierIntrinsic->getRawDest();
653 Type *Int8PtrTy =
654 Type::getInt8PtrTy(EarlierIntrinsic->getContext(),
655 OrigDest->getType()->getPointerAddressSpace());
656 Value *Dest = OrigDest;
657 if (OrigDest->getType() != Int8PtrTy)
658 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", EarlierWrite);
659 Value *Indices[1] = {
660 ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
661 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds(
662 Type::getInt8Ty(EarlierIntrinsic->getContext()),
663 Dest, Indices, "", EarlierWrite);
664 NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
665 if (NewDestGEP->getType() != OrigDest->getType())
666 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(),
667 "", EarlierWrite);
668 EarlierIntrinsic->setDest(NewDestGEP);
669 }
670
671 // Finally update start and size of earlier access.
672 if (!IsOverwriteEnd)
673 EarlierStart += ToRemoveSize;
674 EarlierSize = NewSize;
675
676 return true;
677}
678
679static bool tryToShortenEnd(Instruction *EarlierWrite,
680 OverlapIntervalsTy &IntervalMap,
681 int64_t &EarlierStart, uint64_t &EarlierSize) {
682 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
683 return false;
684
685 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
686 int64_t LaterStart = OII->second;
687 uint64_t LaterSize = OII->first - LaterStart;
688
689 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
;
690
691 if (LaterStart > EarlierStart &&
692 // Note: "LaterStart - EarlierStart" is known to be positive due to
693 // preceding check.
694 (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
695 // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
696 // be non negative due to preceding checks.
697 LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
698 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
699 LaterSize, true)) {
700 IntervalMap.erase(OII);
701 return true;
702 }
703 }
704 return false;
705}
706
707static bool tryToShortenBegin(Instruction *EarlierWrite,
708 OverlapIntervalsTy &IntervalMap,
709 int64_t &EarlierStart, uint64_t &EarlierSize) {
710 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
711 return false;
712
713 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
714 int64_t LaterStart = OII->second;
715 uint64_t LaterSize = OII->first - LaterStart;
716
717 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 717, __extension__ __PRETTY_FUNCTION__))
;
718
719 if (LaterStart <= EarlierStart &&
720 // Note: "EarlierStart - LaterStart" is known to be non negative due to
721 // preceding check.
722 LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
723 // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
724 // positive due to preceding checks.
725 assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 726, __extension__ __PRETTY_FUNCTION__))
726 "Should have been handled as OW_Complete")(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 726, __extension__ __PRETTY_FUNCTION__))
;
727 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
728 LaterSize, false)) {
729 IntervalMap.erase(OII);
730 return true;
731 }
732 }
733 return false;
734}
735
736static bool removePartiallyOverlappedStores(const DataLayout &DL,
737 InstOverlapIntervalsTy &IOL,
738 const TargetLibraryInfo &TLI) {
739 bool Changed = false;
740 for (auto OI : IOL) {
741 Instruction *EarlierWrite = OI.first;
742 MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
6
Calling 'getLocForWrite'
17
Returning from 'getLocForWrite'
743 assert(isRemovable(EarlierWrite) && "Expect only removable instruction")(static_cast <bool> (isRemovable(EarlierWrite) &&
"Expect only removable instruction") ? void (0) : __assert_fail
("isRemovable(EarlierWrite) && \"Expect only removable instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 743, __extension__ __PRETTY_FUNCTION__))
;
18
'?' condition is true
744
745 const Value *Ptr = Loc.Ptr->stripPointerCasts();
19
Called C++ object pointer is null
746 int64_t EarlierStart = 0;
747 uint64_t EarlierSize = Loc.Size.getValue();
748 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
749 OverlapIntervalsTy &IntervalMap = OI.second;
750 Changed |=
751 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
752 if (IntervalMap.empty())
753 continue;
754 Changed |=
755 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
756 }
757 return Changed;
758}
759
760static Constant *tryToMergePartialOverlappingStores(
761 StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
762 int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA,
763 DominatorTree *DT) {
764
765 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
766 DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
767 Later && isa<ConstantInt>(Later->getValueOperand()) &&
768 DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
769 memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
770 // If the store we find is:
771 // a) partially overwritten by the store to 'Loc'
772 // b) the later store is fully contained in the earlier one and
773 // c) they both have a constant value
774 // d) none of the two stores need padding
775 // Merge the two stores, replacing the earlier store's value with a
776 // merge of both values.
777 // TODO: Deal with other constant types (vectors, etc), and probably
778 // some mem intrinsics (if needed)
779
780 APInt EarlierValue =
781 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
782 APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
783 unsigned LaterBits = LaterValue.getBitWidth();
784 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth())(static_cast <bool> (EarlierValue.getBitWidth() > LaterValue
.getBitWidth()) ? void (0) : __assert_fail ("EarlierValue.getBitWidth() > LaterValue.getBitWidth()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 784, __extension__ __PRETTY_FUNCTION__))
;
785 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
786
787 // Offset of the smaller store inside the larger store
788 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
789 unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
790 BitOffsetDiff - LaterBits
791 : BitOffsetDiff;
792 APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
793 LShiftAmount + LaterBits);
794 // Clear the bits we'll be replacing, then OR with the smaller
795 // store, shifted appropriately.
796 APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
797 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlierdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
798 << "\n Later: " << *Laterdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
799 << "\n Merged Value: " << Merged << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
;
800 return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
801 }
802 return nullptr;
803}
804
805namespace {
806// Returns true if \p I is an intrisnic that does not read or write memory.
807bool isNoopIntrinsic(Instruction *I) {
808 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
809 switch (II->getIntrinsicID()) {
810 case Intrinsic::lifetime_start:
811 case Intrinsic::lifetime_end:
812 case Intrinsic::invariant_end:
813 case Intrinsic::launder_invariant_group:
814 case Intrinsic::assume:
815 return true;
816 case Intrinsic::dbg_addr:
817 case Intrinsic::dbg_declare:
818 case Intrinsic::dbg_label:
819 case Intrinsic::dbg_value:
820 llvm_unreachable("Intrinsic should not be modeled in MemorySSA")::llvm::llvm_unreachable_internal("Intrinsic should not be modeled in MemorySSA"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 820)
;
821 default:
822 return false;
823 }
824 }
825 return false;
826}
827
828// Check if we can ignore \p D for DSE.
829bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller,
830 const TargetLibraryInfo &TLI) {
831 Instruction *DI = D->getMemoryInst();
832 // Calls that only access inaccessible memory cannot read or write any memory
833 // locations we consider for elimination.
834 if (auto *CB = dyn_cast<CallBase>(DI))
835 if (CB->onlyAccessesInaccessibleMemory()) {
836 if (isAllocLikeFn(DI, &TLI))
837 return false;
838 return true;
839 }
840 // We can eliminate stores to locations not visible to the caller across
841 // throwing instructions.
842 if (DI->mayThrow() && !DefVisibleToCaller)
843 return true;
844
845 // We can remove the dead stores, irrespective of the fence and its ordering
846 // (release/acquire/seq_cst). Fences only constraints the ordering of
847 // already visible stores, it does not make a store visible to other
848 // threads. So, skipping over a fence does not change a store from being
849 // dead.
850 if (isa<FenceInst>(DI))
851 return true;
852
853 // Skip intrinsics that do not really read or modify memory.
854 if (isNoopIntrinsic(DI))
855 return true;
856
857 return false;
858}
859
860struct DSEState {
861 Function &F;
862 AliasAnalysis &AA;
863
864 /// The single BatchAA instance that is used to cache AA queries. It will
865 /// not be invalidated over the whole run. This is safe, because:
866 /// 1. Only memory writes are removed, so the alias cache for memory
867 /// locations remains valid.
868 /// 2. No new instructions are added (only instructions removed), so cached
869 /// information for a deleted value cannot be accessed by a re-used new
870 /// value pointer.
871 BatchAAResults BatchAA;
872
873 MemorySSA &MSSA;
874 DominatorTree &DT;
875 PostDominatorTree &PDT;
876 const TargetLibraryInfo &TLI;
877 const DataLayout &DL;
878 const LoopInfo &LI;
879
880 // Whether the function contains any irreducible control flow, useful for
881 // being accurately able to detect loops.
882 bool ContainsIrreducibleLoops;
883
884 // All MemoryDefs that potentially could kill other MemDefs.
885 SmallVector<MemoryDef *, 64> MemDefs;
886 // Any that should be skipped as they are already deleted
887 SmallPtrSet<MemoryAccess *, 4> SkipStores;
888 // Keep track of all of the objects that are invisible to the caller before
889 // the function returns.
890 // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
891 DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
892 // Keep track of all of the objects that are invisible to the caller after
893 // the function returns.
894 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
895 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
896 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
897 // Post-order numbers for each basic block. Used to figure out if memory
898 // accesses are executed before another access.
899 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
900
901 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
902 /// basic block.
903 DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
904
905 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
906 PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
907 const LoopInfo &LI)
908 : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
909 DL(F.getParent()->getDataLayout()), LI(LI) {}
910
911 static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
912 DominatorTree &DT, PostDominatorTree &PDT,
913 const TargetLibraryInfo &TLI, const LoopInfo &LI) {
914 DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
915 // Collect blocks with throwing instructions not modeled in MemorySSA and
916 // alloc-like objects.
917 unsigned PO = 0;
918 for (BasicBlock *BB : post_order(&F)) {
919 State.PostOrderNumbers[BB] = PO++;
920 for (Instruction &I : *BB) {
921 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
922 if (I.mayThrow() && !MA)
923 State.ThrowingBlocks.insert(I.getParent());
924
925 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
926 if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
927 (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
928 State.MemDefs.push_back(MD);
929 }
930 }
931
932 // Treat byval or inalloca arguments the same as Allocas, stores to them are
933 // dead at the end of the function.
934 for (Argument &AI : F.args())
935 if (AI.hasPassPointeeByValueCopyAttr()) {
936 // For byval, the caller doesn't know the address of the allocation.
937 if (AI.hasByValAttr())
938 State.InvisibleToCallerBeforeRet.insert({&AI, true});
939 State.InvisibleToCallerAfterRet.insert({&AI, true});
940 }
941
942 // Collect whether there is any irreducible control flow in the function.
943 State.ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
944
945 return State;
946 }
947
948 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
949 /// instruction) completely overwrites a store to the 'Earlier' location.
950 /// (by \p EarlierI instruction).
951 /// Return OW_MaybePartial if \p Later does not completely overwrite
952 /// \p Earlier, but they both write to the same underlying object. In that
953 /// case, use isPartialOverwrite to check if \p Later partially overwrites
954 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
955 OverwriteResult
956 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
957 const MemoryLocation &Later, const MemoryLocation &Earlier,
958 int64_t &EarlierOff, int64_t &LaterOff) {
959 // AliasAnalysis does not always account for loops. Limit overwrite checks
960 // to dependencies for which we can guarantee they are independant of any
961 // loops they are in.
962 if (!isGuaranteedLoopIndependent(EarlierI, LaterI, Earlier))
963 return OW_Unknown;
964
965 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
966 // get imprecise values here, though (except for unknown sizes).
967 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
968 // In case no constant size is known, try to an IR values for the number
969 // of bytes written and check if they match.
970 const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI);
971 const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI);
972 if (LaterMemI && EarlierMemI) {
973 const Value *LaterV = LaterMemI->getLength();
974 const Value *EarlierV = EarlierMemI->getLength();
975 if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later))
976 return OW_Complete;
977 }
978
979 // Masked stores have imprecise locations, but we can reason about them
980 // to some extent.
981 return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA);
982 }
983
984 const uint64_t LaterSize = Later.Size.getValue();
985 const uint64_t EarlierSize = Earlier.Size.getValue();
986
987 // Query the alias information
988 AliasResult AAR = BatchAA.alias(Later, Earlier);
989
990 // If the start pointers are the same, we just have to compare sizes to see if
991 // the later store was larger than the earlier store.
992 if (AAR == AliasResult::MustAlias) {
993 // Make sure that the Later size is >= the Earlier size.
994 if (LaterSize >= EarlierSize)
995 return OW_Complete;
996 }
997
998 // If we hit a partial alias we may have a full overwrite
999 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
1000 int32_t Off = AAR.getOffset();
1001 if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize)
1002 return OW_Complete;
1003 }
1004
1005 // Check to see if the later store is to the entire object (either a global,
1006 // an alloca, or a byval/inalloca argument). If so, then it clearly
1007 // overwrites any other store to the same object.
1008 const Value *P1 = Earlier.Ptr->stripPointerCasts();
1009 const Value *P2 = Later.Ptr->stripPointerCasts();
1010 const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
1011
1012 // If we can't resolve the same pointers to the same object, then we can't
1013 // analyze them at all.
1014 if (UO1 != UO2)
1015 return OW_Unknown;
1016
1017 // If the "Later" store is to a recognizable object, get its size.
1018 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F);
1019 if (ObjectSize != MemoryLocation::UnknownSize)
1020 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
1021 return OW_Complete;
1022
1023 // Okay, we have stores to two completely different pointers. Try to
1024 // decompose the pointer into a "base + constant_offset" form. If the base
1025 // pointers are equal, then we can reason about the two stores.
1026 EarlierOff = 0;
1027 LaterOff = 0;
1028 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
1029 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
1030
1031 // If the base pointers still differ, we have two completely different stores.
1032 if (BP1 != BP2)
1033 return OW_Unknown;
1034
1035 // The later access completely overlaps the earlier store if and only if
1036 // both start and end of the earlier one is "inside" the later one:
1037 // |<->|--earlier--|<->|
1038 // |-------later-------|
1039 // Accesses may overlap if and only if start of one of them is "inside"
1040 // another one:
1041 // |<->|--earlier--|<----->|
1042 // |-------later-------|
1043 // OR
1044 // |----- earlier -----|
1045 // |<->|---later---|<----->|
1046 //
1047 // We have to be careful here as *Off is signed while *.Size is unsigned.
1048
1049 // Check if the earlier access starts "not before" the later one.
1050 if (EarlierOff >= LaterOff) {
1051 // If the earlier access ends "not after" the later access then the earlier
1052 // one is completely overwritten by the later one.
1053 if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
1054 return OW_Complete;
1055 // If start of the earlier access is "before" end of the later access then
1056 // accesses overlap.
1057 else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
1058 return OW_MaybePartial;
1059 }
1060 // If start of the later access is "before" end of the earlier access then
1061 // accesses overlap.
1062 else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
1063 return OW_MaybePartial;
1064 }
1065
1066 // Can reach here only if accesses are known not to overlap. There is no
1067 // dedicated code to indicate no overlap so signal "unknown".
1068 return OW_Unknown;
1069 }
1070
1071 bool isInvisibleToCallerAfterRet(const Value *V) {
1072 if (isa<AllocaInst>(V))
1073 return true;
1074 auto I = InvisibleToCallerAfterRet.insert({V, false});
1075 if (I.second) {
1076 if (!isInvisibleToCallerBeforeRet(V)) {
1077 I.first->second = false;
1078 } else {
1079 auto *Inst = dyn_cast<Instruction>(V);
1080 if (Inst && isAllocLikeFn(Inst, &TLI))
1081 I.first->second = !PointerMayBeCaptured(V, true, false);
1082 }
1083 }
1084 return I.first->second;
1085 }
1086
1087 bool isInvisibleToCallerBeforeRet(const Value *V) {
1088 if (isa<AllocaInst>(V))
1089 return true;
1090 auto I = InvisibleToCallerBeforeRet.insert({V, false});
1091 if (I.second) {
1092 auto *Inst = dyn_cast<Instruction>(V);
1093 if (Inst && isAllocLikeFn(Inst, &TLI))
1094 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1095 // with the killing MemoryDef. But we refrain from doing so for now to
1096 // limit compile-time and this does not cause any changes to the number
1097 // of stores removed on a large test set in practice.
1098 I.first->second = !PointerMayBeCaptured(V, false, true);
1099 }
1100 return I.first->second;
1101 }
1102
1103 Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1104 if (!I->mayWriteToMemory())
1105 return None;
1106
1107 if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1108 return {MemoryLocation::getForDest(MTI)};
1109
1110 if (auto *CB = dyn_cast<CallBase>(I)) {
1111 // If the functions may write to memory we do not know about, bail out.
1112 if (!CB->onlyAccessesArgMemory() &&
1113 !CB->onlyAccessesInaccessibleMemOrArgMem())
1114 return None;
1115
1116 LibFunc LF;
1117 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1118 switch (LF) {
1119 case LibFunc_strcpy:
1120 case LibFunc_strncpy:
1121 case LibFunc_strcat:
1122 case LibFunc_strncat:
1123 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1124 default:
1125 break;
1126 }
1127 }
1128 switch (CB->getIntrinsicID()) {
1129 case Intrinsic::init_trampoline:
1130 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1131 case Intrinsic::masked_store:
1132 return {MemoryLocation::getForArgument(CB, 1, TLI)};
1133 default:
1134 break;
1135 }
1136 return None;
1137 }
1138
1139 return MemoryLocation::getOrNone(I);
1140 }
1141
1142 /// Returns true if \p UseInst completely overwrites \p DefLoc
1143 /// (stored by \p DefInst).
1144 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1145 Instruction *UseInst) {
1146 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1147 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1148 // MemoryDef.
1149 if (!UseInst->mayWriteToMemory())
1150 return false;
1151
1152 if (auto *CB = dyn_cast<CallBase>(UseInst))
1153 if (CB->onlyAccessesInaccessibleMemory())
1154 return false;
1155
1156 int64_t InstWriteOffset, DepWriteOffset;
1157 if (auto CC = getLocForWriteEx(UseInst))
1158 return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset,
1159 InstWriteOffset) == OW_Complete;
1160 return false;
1161 }
1162
1163 /// Returns true if \p Def is not read before returning from the function.
1164 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1165 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1166 << *Def->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1167 << ") is at the end the function \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
;
1168
1169 auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1170 if (!MaybeLoc) {
1171 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... could not get location for write.\n"
; } } while (false)
;
1172 return false;
1173 }
1174
1175 SmallVector<MemoryAccess *, 4> WorkList;
1176 SmallPtrSet<MemoryAccess *, 8> Visited;
1177 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1178 if (!Visited.insert(Acc).second)
1179 return;
1180 for (Use &U : Acc->uses())
1181 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1182 };
1183 PushMemUses(Def);
1184 for (unsigned I = 0; I < WorkList.size(); I++) {
1185 if (WorkList.size() >= MemorySSAScanLimit) {
1186 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit exploration limit.\n"; }
} while (false)
;
1187 return false;
1188 }
1189
1190 MemoryAccess *UseAccess = WorkList[I];
1191 // Simply adding the users of MemoryPhi to the worklist is not enough,
1192 // because we might miss read clobbers in different iterations of a loop,
1193 // for example.
1194 // TODO: Add support for phi translation to handle the loop case.
1195 if (isa<MemoryPhi>(UseAccess))
1196 return false;
1197
1198 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1199 // of times this is called and/or caching it.
1200 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1201 if (isReadClobber(*MaybeLoc, UseInst)) {
1202 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit read clobber " <<
*UseInst << ".\n"; } } while (false)
;
1203 return false;
1204 }
1205
1206 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1207 PushMemUses(UseDef);
1208 }
1209 return true;
1210 }
1211
1212 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1213 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1214 /// indicating whether \p I is a free-like call.
1215 Optional<std::pair<MemoryLocation, bool>>
1216 getLocForTerminator(Instruction *I) const {
1217 uint64_t Len;
1218 Value *Ptr;
1219 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1220 m_Value(Ptr))))
1221 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1222
1223 if (auto *CB = dyn_cast<CallBase>(I)) {
1224 if (isFreeCall(I, &TLI))
1225 return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1226 true)};
1227 }
1228
1229 return None;
1230 }
1231
1232 /// Returns true if \p I is a memory terminator instruction like
1233 /// llvm.lifetime.end or free.
1234 bool isMemTerminatorInst(Instruction *I) const {
1235 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1236 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1237 isFreeCall(I, &TLI);
1238 }
1239
1240 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1241 /// instruction \p AccessI.
1242 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1243 Instruction *MaybeTerm) {
1244 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1245 getLocForTerminator(MaybeTerm);
1246
1247 if (!MaybeTermLoc)
1248 return false;
1249
1250 // If the terminator is a free-like call, all accesses to the underlying
1251 // object can be considered terminated.
1252 if (getUnderlyingObject(Loc.Ptr) !=
1253 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1254 return false;
1255
1256 auto TermLoc = MaybeTermLoc->first;
1257 if (MaybeTermLoc->second) {
1258 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1259 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1260 }
1261 int64_t InstWriteOffset, DepWriteOffset;
1262 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset,
1263 InstWriteOffset) == OW_Complete;
1264 }
1265
1266 // Returns true if \p Use may read from \p DefLoc.
1267 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1268 if (isNoopIntrinsic(UseInst))
1269 return false;
1270
1271 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1272 // treated as read clobber.
1273 if (auto SI = dyn_cast<StoreInst>(UseInst))
1274 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1275
1276 if (!UseInst->mayReadFromMemory())
1277 return false;
1278
1279 if (auto *CB = dyn_cast<CallBase>(UseInst))
1280 if (CB->onlyAccessesInaccessibleMemory())
1281 return false;
1282
1283 // NOTE: For calls, the number of stores removed could be slightly improved
1284 // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1285 // be expensive compared to the benefits in practice. For now, avoid more
1286 // expensive analysis to limit compile-time.
1287 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1288 }
1289
1290 /// Returns true if a dependency between \p Current and \p KillingDef is
1291 /// guaranteed to be loop invariant for the loops that they are in. Either
1292 /// because they are known to be in the same block, in the same loop level or
1293 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1294 /// during execution of the containing function.
1295 bool isGuaranteedLoopIndependent(const Instruction *Current,
1296 const Instruction *KillingDef,
1297 const MemoryLocation &CurrentLoc) {
1298 // If the dependency is within the same block or loop level (being careful
1299 // of irreducible loops), we know that AA will return a valid result for the
1300 // memory dependency. (Both at the function level, outside of any loop,
1301 // would also be valid but we currently disable that to limit compile time).
1302 if (Current->getParent() == KillingDef->getParent())
1303 return true;
1304 const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1305 if (!ContainsIrreducibleLoops && CurrentLI &&
1306 CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1307 return true;
1308 // Otherwise check the memory location is invariant to any loops.
1309 return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1310 }
1311
1312 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1313 /// loop. In particular, this guarantees that it only references a single
1314 /// MemoryLocation during execution of the containing function.
1315 bool isGuaranteedLoopInvariant(const Value *Ptr) {
1316 auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) {
1317 Ptr = Ptr->stripPointerCasts();
1318 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1319 if (isa<AllocaInst>(Ptr))
1320 return true;
1321
1322 if (isAllocLikeFn(I, &TLI))
1323 return true;
1324
1325 return false;
1326 }
1327 return true;
1328 };
1329
1330 Ptr = Ptr->stripPointerCasts();
1331 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1332 if (I->getParent()->isEntryBlock())
1333 return true;
1334 }
1335 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1336 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1337 GEP->hasAllConstantIndices();
1338 }
1339 return IsGuaranteedLoopInvariantBase(Ptr);
1340 }
1341
1342 // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1343 // no read access between them or on any other path to a function exit block
1344 // if \p DefLoc is not accessible after the function returns. If there is no
1345 // such MemoryDef, return None. The returned value may not (completely)
1346 // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1347 // MemoryUse (read).
1348 Optional<MemoryAccess *>
1349 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1350 const MemoryLocation &DefLoc, const Value *DefUO,
1351 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1352 bool IsMemTerm, unsigned &PartialLimit) {
1353 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1354 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1355 return None;
1356 }
1357
1358 MemoryAccess *Current = StartAccess;
1359 Instruction *KillingI = KillingDef->getMemoryInst();
1360 LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " trying to get dominating access\n"
; } } while (false)
;
1361
1362 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1363 Optional<MemoryLocation> CurrentLoc;
1364 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1365 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1366 dbgs() << " visiting " << *Current;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1367 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1368 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1369 << ")";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1370 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1371 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
;
1372
1373 // Reached TOP.
1374 if (MSSA.isLiveOnEntryDef(Current)) {
1375 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found LiveOnEntryDef\n"; }
} while (false)
;
1376 return None;
1377 }
1378
1379 // Cost of a step. Accesses in the same block are more likely to be valid
1380 // candidates for elimination, hence consider them cheaper.
1381 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1382 ? MemorySSASameBBStepCost
1383 : MemorySSAOtherBBStepCost;
1384 if (WalkerStepLimit <= StepCost) {
1385 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit walker step limit\n";
} } while (false)
;
1386 return None;
1387 }
1388 WalkerStepLimit -= StepCost;
1389
1390 // Return for MemoryPhis. They cannot be eliminated directly and the
1391 // caller is responsible for traversing them.
1392 if (isa<MemoryPhi>(Current)) {
1393 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found MemoryPhi\n"; } } while
(false)
;
1394 return Current;
1395 }
1396
1397 // Below, check if CurrentDef is a valid candidate to be eliminated by
1398 // KillingDef. If it is not, check the next candidate.
1399 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1400 Instruction *CurrentI = CurrentDef->getMemoryInst();
1401
1402 if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO), TLI))
1403 continue;
1404
1405 // Before we try to remove anything, check for any extra throwing
1406 // instructions that block us from DSEing
1407 if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
1408 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, may throw!\n"; } } while
(false)
;
1409 return None;
1410 }
1411
1412 // Check for anything that looks like it will be a barrier to further
1413 // removal
1414 if (isDSEBarrier(DefUO, CurrentI)) {
1415 LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, barrier\n"; } } while
(false)
;
1416 return None;
1417 }
1418
1419 // If Current is known to be on path that reads DefLoc or is a read
1420 // clobber, bail out, as the path is not profitable. We skip this check
1421 // for intrinsic calls, because the code knows how to handle memcpy
1422 // intrinsics.
1423 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
1424 return None;
1425
1426 // Quick check if there are direct uses that are read-clobbers.
1427 if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
1428 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1429 return !MSSA.dominates(StartAccess, UseOrDef) &&
1430 isReadClobber(DefLoc, UseOrDef->getMemoryInst());
1431 return false;
1432 })) {
1433 LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found a read clobber\n"; }
} while (false)
;
1434 return None;
1435 }
1436
1437 // If Current cannot be analyzed or is not removable, check the next
1438 // candidate.
1439 if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI))
1440 continue;
1441
1442 // If Current does not have an analyzable write location, skip it
1443 CurrentLoc = getLocForWriteEx(CurrentI);
1444 if (!CurrentLoc)
1445 continue;
1446
1447 // AliasAnalysis does not account for loops. Limit elimination to
1448 // candidates for which we can guarantee they always store to the same
1449 // memory location and not located in different loops.
1450 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1451 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... not guaranteed loop independent\n"
; } } while (false)
;
1452 WalkerStepLimit -= 1;
1453 continue;
1454 }
1455
1456 if (IsMemTerm) {
1457 // If the killing def is a memory terminator (e.g. lifetime.end), check
1458 // the next candidate if the current Current does not write the same
1459 // underlying object as the terminator.
1460 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI))
1461 continue;
1462 } else {
1463 int64_t InstWriteOffset, DepWriteOffset;
1464 auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc,
1465 DepWriteOffset, InstWriteOffset);
1466 // If Current does not write to the same object as KillingDef, check
1467 // the next candidate.
1468 if (OR == OW_Unknown)
1469 continue;
1470 else if (OR == OW_MaybePartial) {
1471 // If KillingDef only partially overwrites Current, check the next
1472 // candidate if the partial step limit is exceeded. This aggressively
1473 // limits the number of candidates for partial store elimination,
1474 // which are less likely to be removable in the end.
1475 if (PartialLimit <= 1) {
1476 WalkerStepLimit -= 1;
1477 continue;
1478 }
1479 PartialLimit -= 1;
1480 }
1481 }
1482 break;
1483 };
1484
1485 // Accesses to objects accessible after the function returns can only be
1486 // eliminated if the access is killed along all paths to the exit. Collect
1487 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1488 // they cover all paths from EarlierAccess to any function exit.
1489 SmallPtrSet<Instruction *, 16> KillingDefs;
1490 KillingDefs.insert(KillingDef->getMemoryInst());
1491 MemoryAccess *EarlierAccess = Current;
1492 Instruction *EarlierMemInst =
1493 cast<MemoryDef>(EarlierAccess)->getMemoryInst();
1494 LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
1495 << *EarlierMemInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
;
1496
1497 SmallSetVector<MemoryAccess *, 32> WorkList;
1498 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1499 for (Use &U : Acc->uses())
1500 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1501 };
1502 PushMemUses(EarlierAccess);
1503
1504 // Check if EarlierDef may be read.
1505 for (unsigned I = 0; I < WorkList.size(); I++) {
1506 MemoryAccess *UseAccess = WorkList[I];
1507
1508 LLVM_DEBUG(dbgs() << " " << *UseAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " " << *UseAccess; } } while
(false)
;
1509 // Bail out if the number of accesses to check exceeds the scan limit.
1510 if (ScanLimit < (WorkList.size() - I)) {
1511 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1512 return None;
1513 }
1514 --ScanLimit;
1515 NumDomMemDefChecks++;
1516
1517 if (isa<MemoryPhi>(UseAccess)) {
1518 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1519 return DT.properlyDominates(KI->getParent(),
1520 UseAccess->getBlock());
1521 })) {
1522 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing block\n"
; } } while (false)
;
1523 continue;
1524 }
1525 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding PHI uses\n"; } }
while (false)
;
1526 PushMemUses(UseAccess);
1527 continue;
1528 }
1529
1530 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1531 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *UseInst << ")\n"
; } } while (false)
;
1532
1533 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1534 return DT.dominates(KI, UseInst);
1535 })) {
1536 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing def\n"
; } } while (false)
;
1537 continue;
1538 }
1539
1540 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1541 // MemoryAccesses. We do not have to check it's users.
1542 if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
1543 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1544 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1545 << " ... skipping, memterminator invalidates following accesses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
;
1546 continue;
1547 }
1548
1549 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1550 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... adding uses of intrinsic\n"
; } } while (false)
;
1551 PushMemUses(UseAccess);
1552 continue;
1553 }
1554
1555 if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
1556 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found throwing instruction\n"
; } } while (false)
;
1557 return None;
1558 }
1559
1560 // Uses which may read the original MemoryDef mean we cannot eliminate the
1561 // original MD. Stop walk.
1562 if (isReadClobber(*CurrentLoc, UseInst)) {
1563 LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found read clobber\n"; } }
while (false)
;
1564 return None;
1565 }
1566
1567 // If this worklist walks back to the original memory access (and the
1568 // pointer is not guarenteed loop invariant) then we cannot assume that a
1569 // store kills itself.
1570 if (EarlierAccess == UseAccess &&
1571 !isGuaranteedLoopInvariant(CurrentLoc->Ptr)) {
1572 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found not loop invariant self access\n"
; } } while (false)
;
1573 return None;
1574 }
1575 // Otherwise, for the KillingDef and EarlierAccess we only have to check
1576 // if it reads the memory location.
1577 // TODO: It would probably be better to check for self-reads before
1578 // calling the function.
1579 if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
1580 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping killing def/dom access\n"
; } } while (false)
;
1581 continue;
1582 }
1583
1584 // Check all uses for MemoryDefs, except for defs completely overwriting
1585 // the original location. Otherwise we have to check uses of *all*
1586 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1587 // miss cases like the following
1588 // 1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1589 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1590 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1591 // (The Use points to the *first* Def it may alias)
1592 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1593 // stores [0,1]
1594 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1595 if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
1596 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1597 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1598 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
1599 if (!isInvisibleToCallerAfterRet(DefUO)) {
1600 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
1601 << " ... found killing def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
;
1602 KillingDefs.insert(UseInst);
1603 }
1604 } else {
1605 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found preceeding def " <<
*UseInst << "\n"; } } while (false)
1606 << " ... found preceeding def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found preceeding def " <<
*UseInst << "\n"; } } while (false)
;
1607 return None;
1608 }
1609 } else
1610 PushMemUses(UseDef);
1611 }
1612 }
1613
1614 // For accesses to locations visible after the function returns, make sure
1615 // that the location is killed (=overwritten) along all paths from
1616 // EarlierAccess to the exit.
1617 if (!isInvisibleToCallerAfterRet(DefUO)) {
1618 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1619 for (Instruction *KD : KillingDefs)
1620 KillingBlocks.insert(KD->getParent());
1621 assert(!KillingBlocks.empty() &&(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1622, __extension__ __PRETTY_FUNCTION__))
1622 "Expected at least a single killing block")(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1622, __extension__ __PRETTY_FUNCTION__))
;
1623
1624 // Find the common post-dominator of all killing blocks.
1625 BasicBlock *CommonPred = *KillingBlocks.begin();
1626 for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1627 I != E; I++) {
1628 if (!CommonPred)
1629 break;
1630 CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1631 }
1632
1633 // If CommonPred is in the set of killing blocks, just check if it
1634 // post-dominates EarlierAccess.
1635 if (KillingBlocks.count(CommonPred)) {
1636 if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
1637 return {EarlierAccess};
1638 return None;
1639 }
1640
1641 // If the common post-dominator does not post-dominate EarlierAccess,
1642 // there is a path from EarlierAccess to an exit not going through a
1643 // killing block.
1644 if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
1645 SetVector<BasicBlock *> WorkList;
1646
1647 // If CommonPred is null, there are multiple exits from the function.
1648 // They all have to be added to the worklist.
1649 if (CommonPred)
1650 WorkList.insert(CommonPred);
1651 else
1652 for (BasicBlock *R : PDT.roots())
1653 WorkList.insert(R);
1654
1655 NumCFGTries++;
1656 // Check if all paths starting from an exit node go through one of the
1657 // killing blocks before reaching EarlierAccess.
1658 for (unsigned I = 0; I < WorkList.size(); I++) {
1659 NumCFGChecks++;
1660 BasicBlock *Current = WorkList[I];
1661 if (KillingBlocks.count(Current))
1662 continue;
1663 if (Current == EarlierAccess->getBlock())
1664 return None;
1665
1666 // EarlierAccess is reachable from the entry, so we don't have to
1667 // explore unreachable blocks further.
1668 if (!DT.isReachableFromEntry(Current))
1669 continue;
1670
1671 for (BasicBlock *Pred : predecessors(Current))
1672 WorkList.insert(Pred);
1673
1674 if (WorkList.size() >= MemorySSAPathCheckLimit)
1675 return None;
1676 }
1677 NumCFGSuccess++;
1678 return {EarlierAccess};
1679 }
1680 return None;
1681 }
1682
1683 // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1684 // potentially dead.
1685 return {EarlierAccess};
1686 }
1687
1688 // Delete dead memory defs
1689 void deleteDeadInstruction(Instruction *SI) {
1690 MemorySSAUpdater Updater(&MSSA);
1691 SmallVector<Instruction *, 32> NowDeadInsts;
1692 NowDeadInsts.push_back(SI);
1693 --NumFastOther;
1694
1695 while (!NowDeadInsts.empty()) {
1696 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1697 ++NumFastOther;
1698
1699 // Try to preserve debug information attached to the dead instruction.
1700 salvageDebugInfo(*DeadInst);
1701 salvageKnowledge(DeadInst);
1702
1703 // Remove the Instruction from MSSA.
1704 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1705 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1706 SkipStores.insert(MD);
1707 }
1708 Updater.removeMemoryAccess(MA);
1709 }
1710
1711 auto I = IOLs.find(DeadInst->getParent());
1712 if (I != IOLs.end())
1713 I->second.erase(DeadInst);
1714 // Remove its operands
1715 for (Use &O : DeadInst->operands())
1716 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1717 O = nullptr;
1718 if (isInstructionTriviallyDead(OpI, &TLI))
1719 NowDeadInsts.push_back(OpI);
1720 }
1721
1722 DeadInst->eraseFromParent();
1723 }
1724 }
1725
1726 // Check for any extra throws between SI and NI that block DSE. This only
1727 // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1728 // throw are handled during the walk from one def to the next.
1729 bool mayThrowBetween(Instruction *SI, Instruction *NI,
1730 const Value *SILocUnd) {
1731 // First see if we can ignore it by using the fact that SI is an
1732 // alloca/alloca like object that is not visible to the caller during
1733 // execution of the function.
1734 if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
1735 return false;
1736
1737 if (SI->getParent() == NI->getParent())
1738 return ThrowingBlocks.count(SI->getParent());
1739 return !ThrowingBlocks.empty();
1740 }
1741
1742 // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1743 // act as barriers:
1744 // * A memory instruction that may throw and \p SI accesses a non-stack
1745 // object.
1746 // * Atomic stores stronger that monotonic.
1747 bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
1748 // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1749 // like object that does not escape.
1750 if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
1751 return true;
1752
1753 // If NI is an atomic load/store stronger than monotonic, do not try to
1754 // eliminate/reorder it.
1755 if (NI->isAtomic()) {
1756 if (auto *LI = dyn_cast<LoadInst>(NI))
1757 return isStrongerThanMonotonic(LI->getOrdering());
1758 if (auto *SI = dyn_cast<StoreInst>(NI))
1759 return isStrongerThanMonotonic(SI->getOrdering());
1760 if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
1761 return isStrongerThanMonotonic(ARMW->getOrdering());
1762 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
1763 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1764 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1765 llvm_unreachable("other instructions should be skipped in MemorySSA")::llvm::llvm_unreachable_internal("other instructions should be skipped in MemorySSA"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1765)
;
1766 }
1767 return false;
1768 }
1769
1770 /// Eliminate writes to objects that are not visible in the caller and are not
1771 /// accessed before returning from the function.
1772 bool eliminateDeadWritesAtEndOfFunction() {
1773 bool MadeChange = false;
1774 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1775 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1776 << "Trying to eliminate MemoryDefs at the end of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
;
1777 for (int I = MemDefs.size() - 1; I >= 0; I--) {
1778 MemoryDef *Def = MemDefs[I];
1779 if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1780 continue;
1781
1782 Instruction *DefI = Def->getMemoryInst();
1783 auto DefLoc = getLocForWriteEx(DefI);
1784 if (!DefLoc)
1785 continue;
1786
1787 // NOTE: Currently eliminating writes at the end of a function is limited
1788 // to MemoryDefs with a single underlying object, to save compile-time. In
1789 // practice it appears the case with multiple underlying objects is very
1790 // uncommon. If it turns out to be important, we can use
1791 // getUnderlyingObjects here instead.
1792 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1793 if (!UO || !isInvisibleToCallerAfterRet(UO))
1794 continue;
1795
1796 if (isWriteAtEndOfFunction(Def)) {
1797 // See through pointer-to-pointer bitcasts
1798 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
1799 "of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
;
1800 deleteDeadInstruction(DefI);
1801 ++NumFastStores;
1802 MadeChange = true;
1803 }
1804 }
1805 return MadeChange;
1806 }
1807
1808 /// \returns true if \p Def is a no-op store, either because it
1809 /// directly stores back a loaded value or stores zero to a calloced object.
1810 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
1811 StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1812 MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1813 Constant *StoredConstant = nullptr;
1814 if (Store)
1815 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1816 if (MemSet)
1817 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1818
1819 if (StoredConstant && StoredConstant->isNullValue()) {
1820 auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1821 if (DefUOInst) {
1822 if (isCallocLikeFn(DefUOInst, &TLI)) {
1823 auto *UnderlyingDef =
1824 cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1825 // If UnderlyingDef is the clobbering access of Def, no instructions
1826 // between them can modify the memory location.
1827 auto *ClobberDef =
1828 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1829 return UnderlyingDef == ClobberDef;
1830 }
1831
1832 if (MemSet) {
1833 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
1834 F.hasFnAttribute(Attribute::SanitizeAddress) ||
1835 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
1836 F.getName() == "calloc")
1837 return false;
1838 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUOInst));
1839 if (!Malloc)
1840 return false;
1841 auto *InnerCallee = Malloc->getCalledFunction();
1842 if (!InnerCallee)
1843 return false;
1844 LibFunc Func;
1845 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
1846 Func != LibFunc_malloc)
1847 return false;
1848 if (Malloc->getOperand(0) == MemSet->getLength()) {
1849 if (DT.dominates(Malloc, MemSet) && PDT.dominates(MemSet, Malloc) &&
1850 memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) {
1851 IRBuilder<> IRB(Malloc);
1852 const auto &DL = Malloc->getModule()->getDataLayout();
1853 if (auto *Calloc =
1854 emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1),
1855 Malloc->getArgOperand(0), IRB, TLI)) {
1856 MemorySSAUpdater Updater(&MSSA);
1857 auto *LastDef = cast<MemoryDef>(
1858 Updater.getMemorySSA()->getMemoryAccess(Malloc));
1859 auto *NewAccess = Updater.createMemoryAccessAfter(
1860 cast<Instruction>(Calloc), LastDef, LastDef);
1861 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
1862 Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
1863 Updater.removeMemoryAccess(Malloc);
1864 Malloc->replaceAllUsesWith(Calloc);
1865 Malloc->eraseFromParent();
1866 return true;
1867 }
1868 return false;
1869 }
1870 }
1871 }
1872 }
1873 }
1874
1875 if (!Store)
1876 return false;
1877
1878 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1879 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1880 // Get the defining access for the load.
1881 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1882 // Fast path: the defining accesses are the same.
1883 if (LoadAccess == Def->getDefiningAccess())
1884 return true;
1885
1886 // Look through phi accesses. Recursively scan all phi accesses by
1887 // adding them to a worklist. Bail when we run into a memory def that
1888 // does not match LoadAccess.
1889 SetVector<MemoryAccess *> ToCheck;
1890 MemoryAccess *Current =
1891 MSSA.getWalker()->getClobberingMemoryAccess(Def);
1892 // We don't want to bail when we run into the store memory def. But,
1893 // the phi access may point to it. So, pretend like we've already
1894 // checked it.
1895 ToCheck.insert(Def);
1896 ToCheck.insert(Current);
1897 // Start at current (1) to simulate already having checked Def.
1898 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1899 Current = ToCheck[I];
1900 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1901 // Check all the operands.
1902 for (auto &Use : PhiAccess->incoming_values())
1903 ToCheck.insert(cast<MemoryAccess>(&Use));
1904 continue;
1905 }
1906
1907 // If we found a memory def, bail. This happens when we have an
1908 // unrelated write in between an otherwise noop store.
1909 assert(isa<MemoryDef>(Current) &&(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1910, __extension__ __PRETTY_FUNCTION__))
1910 "Only MemoryDefs should reach here.")(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1910, __extension__ __PRETTY_FUNCTION__))
;
1911 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1912 // We are searching for the definition of the store's destination.
1913 // So, if that is the same definition as the load, then this is a
1914 // noop. Otherwise, fail.
1915 if (LoadAccess != Current)
1916 return false;
1917 }
1918 return true;
1919 }
1920 }
1921
1922 return false;
1923 }
1924};
1925
1926static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1927 DominatorTree &DT, PostDominatorTree &PDT,
1928 const TargetLibraryInfo &TLI,
1929 const LoopInfo &LI) {
1930 bool MadeChange = false;
1931
1932 DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI, LI);
1933 // For each store:
1934 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1
Assuming the condition is false
2
Loop condition is false. Execution continues on line 2087
1935 MemoryDef *KillingDef = State.MemDefs[I];
1936 if (State.SkipStores.count(KillingDef))
1937 continue;
1938 Instruction *SI = KillingDef->getMemoryInst();
1939
1940 Optional<MemoryLocation> MaybeSILoc;
1941 if (State.isMemTerminatorInst(SI))
1942 MaybeSILoc = State.getLocForTerminator(SI).map(
1943 [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1944 else
1945 MaybeSILoc = State.getLocForWriteEx(SI);
1946
1947 if (!MaybeSILoc) {
1948 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
1949 << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
;
1950 continue;
1951 }
1952 MemoryLocation SILoc = *MaybeSILoc;
1953 assert(SILoc.Ptr && "SILoc should not be null")(static_cast <bool> (SILoc.Ptr && "SILoc should not be null"
) ? void (0) : __assert_fail ("SILoc.Ptr && \"SILoc should not be null\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1953, __extension__ __PRETTY_FUNCTION__))
;
1954 const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
1955
1956 MemoryAccess *Current = KillingDef;
1957 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
1958 << *Current << " (" << *SI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
;
1959
1960 unsigned ScanLimit = MemorySSAScanLimit;
1961 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1962 unsigned PartialLimit = MemorySSAPartialStoreLimit;
1963 // Worklist of MemoryAccesses that may be killed by KillingDef.
1964 SetVector<MemoryAccess *> ToCheck;
1965
1966 if (SILocUnd)
1967 ToCheck.insert(KillingDef->getDefiningAccess());
1968
1969 bool Shortend = false;
1970 bool IsMemTerm = State.isMemTerminatorInst(SI);
1971 // Check if MemoryAccesses in the worklist are killed by KillingDef.
1972 for (unsigned I = 0; I < ToCheck.size(); I++) {
1973 Current = ToCheck[I];
1974 if (State.SkipStores.count(Current))
1975 continue;
1976
1977 Optional<MemoryAccess *> Next = State.getDomMemoryDef(
1978 KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
1979 IsMemTerm, PartialLimit);
1980
1981 if (!Next) {
1982 LLVM_DEBUG(dbgs() << " finished walk\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " finished walk\n"; } } while (false
)
;
1983 continue;
1984 }
1985
1986 MemoryAccess *EarlierAccess = *Next;
1987 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking if we can kill " <<
*EarlierAccess; } } while (false)
;
1988 if (isa<MemoryPhi>(EarlierAccess)) {
1989 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding incoming values to worklist\n"
; } } while (false)
;
1990 for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
1991 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1992 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1993 BasicBlock *PhiBlock = EarlierAccess->getBlock();
1994
1995 // We only consider incoming MemoryAccesses that come before the
1996 // MemoryPhi. Otherwise we could discover candidates that do not
1997 // strictly dominate our starting def.
1998 if (State.PostOrderNumbers[IncomingBlock] >
1999 State.PostOrderNumbers[PhiBlock])
2000 ToCheck.insert(IncomingAccess);
2001 }
2002 continue;
2003 }
2004 auto *NextDef = cast<MemoryDef>(EarlierAccess);
2005 Instruction *NI = NextDef->getMemoryInst();
2006 LLVM_DEBUG(dbgs() << " (" << *NI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *NI << ")\n"; }
} while (false)
;
2007 ToCheck.insert(NextDef->getDefiningAccess());
2008 NumGetDomMemoryDefPassed++;
2009
2010 if (!DebugCounter::shouldExecute(MemorySSACounter))
2011 continue;
2012
2013 MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2014
2015 if (IsMemTerm) {
2016 const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
2017 if (SILocUnd != NIUnd)
2018 continue;
2019 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
2020 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
2021 State.deleteDeadInstruction(NI);
2022 ++NumFastStores;
2023 MadeChange = true;
2024 } else {
2025 // Check if NI overwrites SI.
2026 int64_t InstWriteOffset, DepWriteOffset;
2027 OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc,
2028 DepWriteOffset, InstWriteOffset);
2029 if (OR == OW_MaybePartial) {
2030 auto Iter = State.IOLs.insert(
2031 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2032 NI->getParent(), InstOverlapIntervalsTy()));
2033 auto &IOL = Iter.first->second;
2034 OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
2035 NI, IOL);
2036 }
2037
2038 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2039 auto *Earlier = dyn_cast<StoreInst>(NI);
2040 auto *Later = dyn_cast<StoreInst>(SI);
2041 // We are re-using tryToMergePartialOverlappingStores, which requires
2042 // Earlier to domiante Later.
2043 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2044 if (Earlier && Later && DT.dominates(Earlier, Later)) {
2045 if (Constant *Merged = tryToMergePartialOverlappingStores(
2046 Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
2047 State.BatchAA, &DT)) {
2048
2049 // Update stored value of earlier store to merged constant.
2050 Earlier->setOperand(0, Merged);
2051 ++NumModifiedStores;
2052 MadeChange = true;
2053
2054 Shortend = true;
2055 // Remove later store and remove any outstanding overlap intervals
2056 // for the updated store.
2057 State.deleteDeadInstruction(Later);
2058 auto I = State.IOLs.find(Earlier->getParent());
2059 if (I != State.IOLs.end())
2060 I->second.erase(Earlier);
2061 break;
2062 }
2063 }
2064 }
2065
2066 if (OR == OW_Complete) {
2067 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
2068 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
2069 State.deleteDeadInstruction(NI);
2070 ++NumFastStores;
2071 MadeChange = true;
2072 }
2073 }
2074 }
2075
2076 // Check if the store is a no-op.
2077 if (!Shortend && isRemovable(SI) &&
2078 State.storeIsNoop(KillingDef, SILocUnd)) {
2079 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove No-Op Store:\n DEAD: "
<< *SI << '\n'; } } while (false)
;
2080 State.deleteDeadInstruction(SI);
2081 NumRedundantStores++;
2082 MadeChange = true;
2083 continue;
2084 }
2085 }
2086
2087 if (EnablePartialOverwriteTracking)
3
Assuming the condition is true
4
Taking true branch
2088 for (auto &KV : State.IOLs)
2089 MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
5
Calling 'removePartiallyOverlappedStores'
2090
2091 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2092 return MadeChange;
2093}
2094} // end anonymous namespace
2095
2096//===----------------------------------------------------------------------===//
2097// DSE Pass
2098//===----------------------------------------------------------------------===//
2099PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2100 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2101 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2102 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2103 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2104 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2105 LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2106
2107 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2108
2109#ifdef LLVM_ENABLE_STATS1
2110 if (AreStatisticsEnabled())
2111 for (auto &I : instructions(F))
2112 NumRemainingStores += isa<StoreInst>(&I);
2113#endif
2114
2115 if (!Changed)
2116 return PreservedAnalyses::all();
2117
2118 PreservedAnalyses PA;
2119 PA.preserveSet<CFGAnalyses>();
2120 PA.preserve<MemorySSAAnalysis>();
2121 PA.preserve<LoopAnalysis>();
2122 return PA;
2123}
2124
2125namespace {
2126
2127/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2128class DSELegacyPass : public FunctionPass {
2129public:
2130 static char ID; // Pass identification, replacement for typeid
2131
2132 DSELegacyPass() : FunctionPass(ID) {
2133 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2134 }
2135
2136 bool runOnFunction(Function &F) override {
2137 if (skipFunction(F))
2138 return false;
2139
2140 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2141 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2142 const TargetLibraryInfo &TLI =
2143 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2144 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2145 PostDominatorTree &PDT =
2146 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2147 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2148
2149 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2150
2151#ifdef LLVM_ENABLE_STATS1
2152 if (AreStatisticsEnabled())
2153 for (auto &I : instructions(F))
2154 NumRemainingStores += isa<StoreInst>(&I);
2155#endif
2156
2157 return Changed;
2158 }
2159
2160 void getAnalysisUsage(AnalysisUsage &AU) const override {
2161 AU.setPreservesCFG();
2162 AU.addRequired<AAResultsWrapperPass>();
2163 AU.addRequired<TargetLibraryInfoWrapperPass>();
2164 AU.addPreserved<GlobalsAAWrapperPass>();
2165 AU.addRequired<DominatorTreeWrapperPass>();
2166 AU.addPreserved<DominatorTreeWrapperPass>();
2167 AU.addRequired<PostDominatorTreeWrapperPass>();
2168 AU.addRequired<MemorySSAWrapperPass>();
2169 AU.addPreserved<PostDominatorTreeWrapperPass>();
2170 AU.addPreserved<MemorySSAWrapperPass>();
2171 AU.addRequired<LoopInfoWrapperPass>();
2172 AU.addPreserved<LoopInfoWrapperPass>();
2173 }
2174};
2175
2176} // end anonymous namespace
2177
2178char DSELegacyPass::ID = 0;
2179
2180INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2181 false)static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2182INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
2183INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry);
2184INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
2185INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
2186INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
2187INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry);
2188INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
2189INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);
2190INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2191 false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2192
2193FunctionPass *llvm::createDeadStoreEliminationPass() {
2194 return new DSELegacyPass();
2195}

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Analysis/MemoryLocation.h

1//===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file provides utility analysis objects describing memory locations.
10/// These are used both by the Alias Analysis infrastructure and more
11/// specialized memory analysis layers.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
16#define LLVM_ANALYSIS_MEMORYLOCATION_H
17
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/IR/Metadata.h"
21#include "llvm/Support/TypeSize.h"
22
23namespace llvm {
24
25class CallBase;
26class Instruction;
27class LoadInst;
28class StoreInst;
29class MemTransferInst;
30class MemIntrinsic;
31class AtomicCmpXchgInst;
32class AtomicMemTransferInst;
33class AtomicMemIntrinsic;
34class AtomicRMWInst;
35class AnyMemTransferInst;
36class AnyMemIntrinsic;
37class TargetLibraryInfo;
38class VAArgInst;
39
40// Represents the size of a MemoryLocation. Logically, it's an
41// Optional<uint63_t> that also carries a bit to represent whether the integer
42// it contains, N, is 'precise'. Precise, in this context, means that we know
43// that the area of storage referenced by the given MemoryLocation must be
44// precisely N bytes. An imprecise value is formed as the union of two or more
45// precise values, and can conservatively represent all of the values unioned
46// into it. Importantly, imprecise values are an *upper-bound* on the size of a
47// MemoryLocation.
48//
49// Concretely, a precise MemoryLocation is (%p, 4) in
50// store i32 0, i32* %p
51//
52// Since we know that %p must be at least 4 bytes large at this point.
53// Otherwise, we have UB. An example of an imprecise MemoryLocation is (%p, 4)
54// at the memcpy in
55//
56// %n = select i1 %foo, i64 1, i64 4
57// call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %baz, i64 %n, i32 1,
58// i1 false)
59//
60// ...Since we'll copy *up to* 4 bytes into %p, but we can't guarantee that
61// we'll ever actually do so.
62//
63// If asked to represent a pathologically large value, this will degrade to
64// None.
65class LocationSize {
66 enum : uint64_t {
67 BeforeOrAfterPointer = ~uint64_t(0),
68 AfterPointer = BeforeOrAfterPointer - 1,
69 MapEmpty = BeforeOrAfterPointer - 2,
70 MapTombstone = BeforeOrAfterPointer - 3,
71 ImpreciseBit = uint64_t(1) << 63,
72
73 // The maximum value we can represent without falling back to 'unknown'.
74 MaxValue = (MapTombstone - 1) & ~ImpreciseBit,
75 };
76
77 uint64_t Value;
78
79 // Hack to support implicit construction. This should disappear when the
80 // public LocationSize ctor goes away.
81 enum DirectConstruction { Direct };
82
83 constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {}
84
85 static_assert(AfterPointer & ImpreciseBit,
86 "AfterPointer is imprecise by definition.");
87 static_assert(BeforeOrAfterPointer & ImpreciseBit,
88 "BeforeOrAfterPointer is imprecise by definition.");
89
90public:
91 // FIXME: Migrate all users to construct via either `precise` or `upperBound`,
92 // to make it more obvious at the callsite the kind of size that they're
93 // providing.
94 //
95 // Since the overwhelming majority of users of this provide precise values,
96 // this assumes the provided value is precise.
97 constexpr LocationSize(uint64_t Raw)
98 : Value(Raw > MaxValue ? AfterPointer : Raw) {}
99
100 static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
101 static LocationSize precise(TypeSize Value) {
102 if (Value.isScalable())
103 return afterPointer();
104 return precise(Value.getFixedSize());
105 }
106
107 static LocationSize upperBound(uint64_t Value) {
108 // You can't go lower than 0, so give a precise result.
109 if (LLVM_UNLIKELY(Value == 0)__builtin_expect((bool)(Value == 0), false))
110 return precise(0);
111 if (LLVM_UNLIKELY(Value > MaxValue)__builtin_expect((bool)(Value > MaxValue), false))
112 return afterPointer();
113 return LocationSize(Value | ImpreciseBit, Direct);
114 }
115 static LocationSize upperBound(TypeSize Value) {
116 if (Value.isScalable())
117 return afterPointer();
118 return upperBound(Value.getFixedSize());
119 }
120
121 /// Any location after the base pointer (but still within the underlying
122 /// object).
123 constexpr static LocationSize afterPointer() {
124 return LocationSize(AfterPointer, Direct);
125 }
126
127 /// Any location before or after the base pointer (but still within the
128 /// underlying object).
129 constexpr static LocationSize beforeOrAfterPointer() {
130 return LocationSize(BeforeOrAfterPointer, Direct);
131 }
132
133 // Sentinel values, generally used for maps.
134 constexpr static LocationSize mapTombstone() {
135 return LocationSize(MapTombstone, Direct);
136 }
137 constexpr static LocationSize mapEmpty() {
138 return LocationSize(MapEmpty, Direct);
139 }
140
141 // Returns a LocationSize that can correctly represent either `*this` or
142 // `Other`.
143 LocationSize unionWith(LocationSize Other) const {
144 if (Other == *this)
145 return *this;
146
147 if (Value == BeforeOrAfterPointer || Other.Value == BeforeOrAfterPointer)
148 return beforeOrAfterPointer();
149 if (Value == AfterPointer || Other.Value == AfterPointer)
150 return afterPointer();
151
152 return upperBound(std::max(getValue(), Other.getValue()));
153 }
154
155 bool hasValue() const {
156 return Value != AfterPointer && Value != BeforeOrAfterPointer;
157 }
158 uint64_t getValue() const {
159 assert(hasValue() && "Getting value from an unknown LocationSize!")(static_cast <bool> (hasValue() && "Getting value from an unknown LocationSize!"
) ? void (0) : __assert_fail ("hasValue() && \"Getting value from an unknown LocationSize!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Analysis/MemoryLocation.h"
, 159, __extension__ __PRETTY_FUNCTION__))
;
160 return Value & ~ImpreciseBit;
161 }
162
163 // Returns whether or not this value is precise. Note that if a value is
164 // precise, it's guaranteed to not be unknown.
165 bool isPrecise() const {
166 return (Value & ImpreciseBit) == 0;
167 }
168
169 // Convenience method to check if this LocationSize's value is 0.
170 bool isZero() const { return hasValue() && getValue() == 0; }
171
172 /// Whether accesses before the base pointer are possible.
173 bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }
174
175 bool operator==(const LocationSize &Other) const {
176 return Value == Other.Value;
177 }
178
179 bool operator!=(const LocationSize &Other) const {
180 return !(*this == Other);
181 }
182
183 // Ordering operators are not provided, since it's unclear if there's only one
184 // reasonable way to compare:
185 // - values that don't exist against values that do, and
186 // - precise values to imprecise values
187
188 void print(raw_ostream &OS) const;
189
190 // Returns an opaque value that represents this LocationSize. Cannot be
191 // reliably converted back into a LocationSize.
192 uint64_t toRaw() const { return Value; }
193};
194
195inline raw_ostream &operator<<(raw_ostream &OS, LocationSize Size) {
196 Size.print(OS);
197 return OS;
198}
199
200/// Representation for a specific memory location.
201///
202/// This abstraction can be used to represent a specific location in memory.
203/// The goal of the location is to represent enough information to describe
204/// abstract aliasing, modification, and reference behaviors of whatever
205/// value(s) are stored in memory at the particular location.
206///
207/// The primary user of this interface is LLVM's Alias Analysis, but other
208/// memory analyses such as MemoryDependence can use it as well.
209class MemoryLocation {
210public:
211 /// UnknownSize - This is a special value which can be used with the
212 /// size arguments in alias queries to indicate that the caller does not
213 /// know the sizes of the potential memory references.
214 enum : uint64_t { UnknownSize = ~UINT64_C(0)0UL };
215
216 /// The address of the start of the location.
217 const Value *Ptr;
218
219 /// The maximum size of the location, in address-units, or
220 /// UnknownSize if the size is not known.
221 ///
222 /// Note that an unknown size does not mean the pointer aliases the entire
223 /// virtual address space, because there are restrictions on stepping out of
224 /// one object and into another. See
225 /// http://llvm.org/docs/LangRef.html#pointeraliasing
226 LocationSize Size;
227
228 /// The metadata nodes which describes the aliasing of the location (each
229 /// member is null if that kind of information is unavailable).
230 AAMDNodes AATags;
231
232 void print(raw_ostream &OS) const { OS << *Ptr << " " << Size << "\n"; }
233
234 /// Return a location with information about the memory reference by the given
235 /// instruction.
236 static MemoryLocation get(const LoadInst *LI);
237 static MemoryLocation get(const StoreInst *SI);
238 static MemoryLocation get(const VAArgInst *VI);
239 static MemoryLocation get(const AtomicCmpXchgInst *CXI);
240 static MemoryLocation get(const AtomicRMWInst *RMWI);
241 static MemoryLocation get(const Instruction *Inst) {
242 return *MemoryLocation::getOrNone(Inst);
243 }
244 static Optional<MemoryLocation> getOrNone(const Instruction *Inst);
245
246 /// Return a location representing the source of a memory transfer.
247 static MemoryLocation getForSource(const MemTransferInst *MTI);
248 static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
249 static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
250
251 /// Return a location representing the destination of a memory set or
252 /// transfer.
253 static MemoryLocation getForDest(const MemIntrinsic *MI);
254 static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
255 static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
256
257 /// Return a location representing a particular argument of a call.
258 static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
259 const TargetLibraryInfo *TLI);
260 static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
261 const TargetLibraryInfo &TLI) {
262 return getForArgument(Call, ArgIdx, &TLI);
263 }
264
265 /// Return a location that may access any location after Ptr, while remaining
266 /// within the underlying object.
267 static MemoryLocation getAfter(const Value *Ptr,
268 const AAMDNodes &AATags = AAMDNodes()) {
269 return MemoryLocation(Ptr, LocationSize::afterPointer(), AATags);
270 }
271
272 /// Return a location that may access any location before or after Ptr, while
273 /// remaining within the underlying object.
274 static MemoryLocation
275 getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags = AAMDNodes()) {
276 return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags);
277 }
278
279 // Return the exact size if the exact size is known at compiletime,
280 // otherwise return MemoryLocation::UnknownSize.
281 static uint64_t getSizeOrUnknown(const TypeSize &T) {
282 return T.isScalable() ? UnknownSize : T.getFixedSize();
283 }
284
285 MemoryLocation()
286 : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()), AATags() {}
15
Null pointer value stored to 'Loc.Ptr'
287
288 explicit MemoryLocation(const Value *Ptr, LocationSize Size,
289 const AAMDNodes &AATags = AAMDNodes())
290 : Ptr(Ptr), Size(Size), AATags(AATags) {}
291
292 MemoryLocation getWithNewPtr(const Value *NewPtr) const {
293 MemoryLocation Copy(*this);
294 Copy.Ptr = NewPtr;
295 return Copy;
296 }
297
298 MemoryLocation getWithNewSize(LocationSize NewSize) const {
299 MemoryLocation Copy(*this);
300 Copy.Size = NewSize;
301 return Copy;
302 }
303
304 MemoryLocation getWithoutAATags() const {
305 MemoryLocation Copy(*this);
306 Copy.AATags = AAMDNodes();
307 return Copy;
308 }
309
310 bool operator==(const MemoryLocation &Other) const {
311 return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags;
312 }
313};
314
315// Specialize DenseMapInfo.
316template <> struct DenseMapInfo<LocationSize> {
317 static inline LocationSize getEmptyKey() {
318 return LocationSize::mapEmpty();
319 }
320 static inline LocationSize getTombstoneKey() {
321 return LocationSize::mapTombstone();
322 }
323 static unsigned getHashValue(const LocationSize &Val) {
324 return DenseMapInfo<uint64_t>::getHashValue(Val.toRaw());
325 }
326 static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) {
327 return LHS == RHS;
328 }
329};
330
331template <> struct DenseMapInfo<MemoryLocation> {
332 static inline MemoryLocation getEmptyKey() {
333 return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(),
334 DenseMapInfo<LocationSize>::getEmptyKey());
335 }
336 static inline MemoryLocation getTombstoneKey() {
337 return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(),
338 DenseMapInfo<LocationSize>::getTombstoneKey());
339 }
340 static unsigned getHashValue(const MemoryLocation &Val) {
341 return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
342 DenseMapInfo<LocationSize>::getHashValue(Val.Size) ^
343 DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
344 }
345 static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
346 return LHS == RHS;
347 }
348};
349}
350
351#endif