Bug Summary

File:llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Warning:line 1271, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DeadStoreElimination.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-20-210906-17489-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

1//===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The code below implements dead store elimination using MemorySSA. It uses
10// the following general approach: given a MemoryDef, walk upwards to find
11// clobbering MemoryDefs that may be killed by the starting def. Then check
12// that there are no uses that may read the location of the original MemoryDef
13// in between both MemoryDefs. A bit more concretely:
14//
15// For all MemoryDefs StartDef:
16// 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
17// upwards.
18// 2. Check that there are no reads between EarlierAccess and the StartDef by
19// checking all uses starting at EarlierAccess and walking until we see
20// StartDef.
21// 3. For each found CurrentDef, check that:
22// 1. There are no barrier instructions between CurrentDef and StartDef (like
23// throws or stores with ordering constraints).
24// 2. StartDef is executed whenever CurrentDef is executed.
25// 3. StartDef completely overwrites CurrentDef.
26// 4. Erase CurrentDef from the function and MemorySSA.
27//
28//===----------------------------------------------------------------------===//
29
30#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/DenseMap.h"
33#include "llvm/ADT/MapVector.h"
34#include "llvm/ADT/PostOrderIterator.h"
35#include "llvm/ADT/SetVector.h"
36#include "llvm/ADT/SmallPtrSet.h"
37#include "llvm/ADT/SmallVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
40#include "llvm/Analysis/AliasAnalysis.h"
41#include "llvm/Analysis/CaptureTracking.h"
42#include "llvm/Analysis/GlobalsModRef.h"
43#include "llvm/Analysis/MemoryBuiltins.h"
44#include "llvm/Analysis/MemoryLocation.h"
45#include "llvm/Analysis/MemorySSA.h"
46#include "llvm/Analysis/MemorySSAUpdater.h"
47#include "llvm/Analysis/PostDominators.h"
48#include "llvm/Analysis/TargetLibraryInfo.h"
49#include "llvm/Analysis/ValueTracking.h"
50#include "llvm/IR/Argument.h"
51#include "llvm/IR/BasicBlock.h"
52#include "llvm/IR/Constant.h"
53#include "llvm/IR/Constants.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/Dominators.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/InstIterator.h"
58#include "llvm/IR/InstrTypes.h"
59#include "llvm/IR/Instruction.h"
60#include "llvm/IR/Instructions.h"
61#include "llvm/IR/IntrinsicInst.h"
62#include "llvm/IR/Intrinsics.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/PassManager.h"
66#include "llvm/IR/PatternMatch.h"
67#include "llvm/IR/Value.h"
68#include "llvm/InitializePasses.h"
69#include "llvm/Pass.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CommandLine.h"
72#include "llvm/Support/Debug.h"
73#include "llvm/Support/DebugCounter.h"
74#include "llvm/Support/ErrorHandling.h"
75#include "llvm/Support/MathExtras.h"
76#include "llvm/Support/raw_ostream.h"
77#include "llvm/Transforms/Scalar.h"
78#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
79#include "llvm/Transforms/Utils/Local.h"
80#include <algorithm>
81#include <cassert>
82#include <cstddef>
83#include <cstdint>
84#include <iterator>
85#include <map>
86#include <utility>
87
88using namespace llvm;
89using namespace PatternMatch;
90
91#define DEBUG_TYPE"dse" "dse"
92
93STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores"
, "Number of stores remaining after DSE"}
;
94STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores"
, "Number of redundant stores deleted"}
;
95STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores"
, "Number of stores deleted"}
;
96STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther",
"Number of other instrs removed"}
;
97STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials"
, "Number of stores dead by later partials"}
;
98STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores"
, "Number of stores modified"}
;
99STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks",
"Number of stores modified"}
;
100STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified"
}
;
101STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess"
, "Number of stores modified"}
;
102STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
103 "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
;
104STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
105 "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
;
106
107DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
108 "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
;
109
110static cl::opt<bool>
111EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
112 cl::init(true), cl::Hidden,
113 cl::desc("Enable partial-overwrite tracking in DSE"));
114
115static cl::opt<bool>
116EnablePartialStoreMerging("enable-dse-partial-store-merging",
117 cl::init(true), cl::Hidden,
118 cl::desc("Enable partial store merging in DSE"));
119
120static cl::opt<unsigned>
121 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
122 cl::desc("The number of memory instructions to scan for "
123 "dead store elimination (default = 100)"));
124static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
125 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
126 cl::desc("The maximum number of steps while walking upwards to find "
127 "MemoryDefs that may be killed (default = 90)"));
128
129static cl::opt<unsigned> MemorySSAPartialStoreLimit(
130 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
131 cl::desc("The maximum number candidates that only partially overwrite the "
132 "killing MemoryDef to consider"
133 " (default = 5)"));
134
135static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
136 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
137 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
138 "other stores per basic block (default = 5000)"));
139
140static cl::opt<unsigned> MemorySSASameBBStepCost(
141 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
142 cl::desc(
143 "The cost of a step in the same basic block as the killing MemoryDef"
144 "(default = 1)"));
145
146static cl::opt<unsigned>
147 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
148 cl::Hidden,
149 cl::desc("The cost of a step in a different basic "
150 "block than the killing MemoryDef"
151 "(default = 5)"));
152
153static cl::opt<unsigned> MemorySSAPathCheckLimit(
154 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
155 cl::desc("The maximum number of blocks to check when trying to prove that "
156 "all paths to an exit go through a killing block (default = 50)"));
157
158//===----------------------------------------------------------------------===//
159// Helper functions
160//===----------------------------------------------------------------------===//
161using OverlapIntervalsTy = std::map<int64_t, int64_t>;
162using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
163
164/// Does this instruction write some memory? This only returns true for things
165/// that we can analyze with other helpers below.
166static bool hasAnalyzableMemoryWrite(Instruction *I,
167 const TargetLibraryInfo &TLI) {
168 if (isa<StoreInst>(I))
77
'I' is not a 'StoreInst'
78
Taking false branch
169 return true;
170 if (IntrinsicInst *II
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
= dyn_cast<IntrinsicInst>(I)) {
79
'I' is not a 'IntrinsicInst'
80
Taking false branch
171 switch (II->getIntrinsicID()) {
172 default:
173 return false;
174 case Intrinsic::memset:
175 case Intrinsic::memmove:
176 case Intrinsic::memcpy:
177 case Intrinsic::memcpy_inline:
178 case Intrinsic::memcpy_element_unordered_atomic:
179 case Intrinsic::memmove_element_unordered_atomic:
180 case Intrinsic::memset_element_unordered_atomic:
181 case Intrinsic::init_trampoline:
182 case Intrinsic::lifetime_end:
183 case Intrinsic::masked_store:
184 return true;
185 }
186 }
187 if (auto *CB
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
= dyn_cast<CallBase>(I)) {
81
Assuming 'I' is a 'CallBase'
82
Taking true branch
188 LibFunc LF;
189 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
83
Calling 'TargetLibraryInfo::getLibFunc'
88
Returning from 'TargetLibraryInfo::getLibFunc'
89
Assuming the condition is true
90
Calling 'TargetLibraryInfo::has'
99
Returning from 'TargetLibraryInfo::has'
100
Assuming the condition is true
101
Taking true branch
190 switch (LF) {
102
Control jumps to 'case LibFunc_strcpy:' at line 191
191 case LibFunc_strcpy:
192 case LibFunc_strncpy:
193 case LibFunc_strcat:
194 case LibFunc_strncat:
195 return true;
103
Returning the value 1, which participates in a condition later
196 default:
197 return false;
198 }
199 }
200 }
201 return false;
202}
203
204/// Return a Location stored to by the specified instruction. If isRemovable
205/// returns true, this function and getLocForRead completely describe the memory
206/// operations for this instruction.
207static MemoryLocation getLocForWrite(Instruction *Inst,
208 const TargetLibraryInfo &TLI) {
209 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
210 return MemoryLocation::get(SI);
211
212 // memcpy/memmove/memset.
213 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
214 return MemoryLocation::getForDest(MI);
215
216 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
217 switch (II->getIntrinsicID()) {
218 default:
219 return MemoryLocation(); // Unhandled intrinsic.
220 case Intrinsic::init_trampoline:
221 return MemoryLocation::getAfter(II->getArgOperand(0));
222 case Intrinsic::masked_store:
223 return MemoryLocation::getForArgument(II, 1, TLI);
224 case Intrinsic::lifetime_end: {
225 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
226 return MemoryLocation(II->getArgOperand(1), Len);
227 }
228 }
229 }
230 if (auto *CB = dyn_cast<CallBase>(Inst))
231 // All the supported TLI functions so far happen to have dest as their
232 // first argument.
233 return MemoryLocation::getAfter(CB->getArgOperand(0));
234 return MemoryLocation();
235}
236
237/// If the value of this instruction and the memory it writes to is unused, may
238/// we delete this instruction?
239static bool isRemovable(Instruction *I) {
240 // Don't remove volatile/atomic stores.
241 if (StoreInst *SI = dyn_cast<StoreInst>(I))
242 return SI->isUnordered();
243
244 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
245 switch (II->getIntrinsicID()) {
246 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")::llvm::llvm_unreachable_internal("doesn't pass 'hasAnalyzableMemoryWrite' predicate"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 246)
;
247 case Intrinsic::lifetime_end:
248 // Never remove dead lifetime_end's, e.g. because it is followed by a
249 // free.
250 return false;
251 case Intrinsic::init_trampoline:
252 // Always safe to remove init_trampoline.
253 return true;
254 case Intrinsic::memset:
255 case Intrinsic::memmove:
256 case Intrinsic::memcpy:
257 case Intrinsic::memcpy_inline:
258 // Don't remove volatile memory intrinsics.
259 return !cast<MemIntrinsic>(II)->isVolatile();
260 case Intrinsic::memcpy_element_unordered_atomic:
261 case Intrinsic::memmove_element_unordered_atomic:
262 case Intrinsic::memset_element_unordered_atomic:
263 case Intrinsic::masked_store:
264 return true;
265 }
266 }
267
268 // note: only get here for calls with analyzable writes - i.e. libcalls
269 if (auto *CB = dyn_cast<CallBase>(I))
270 return CB->use_empty();
271
272 return false;
273}
274
275/// Returns true if the end of this instruction can be safely shortened in
276/// length.
277static bool isShortenableAtTheEnd(Instruction *I) {
278 // Don't shorten stores for now
279 if (isa<StoreInst>(I))
280 return false;
281
282 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
283 switch (II->getIntrinsicID()) {
284 default: return false;
285 case Intrinsic::memset:
286 case Intrinsic::memcpy:
287 case Intrinsic::memcpy_element_unordered_atomic:
288 case Intrinsic::memset_element_unordered_atomic:
289 // Do shorten memory intrinsics.
290 // FIXME: Add memmove if it's also safe to transform.
291 return true;
292 }
293 }
294
295 // Don't shorten libcalls calls for now.
296
297 return false;
298}
299
300/// Returns true if the beginning of this instruction can be safely shortened
301/// in length.
302static bool isShortenableAtTheBeginning(Instruction *I) {
303 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
304 // easily done by offsetting the source address.
305 return isa<AnyMemSetInst>(I);
306}
307
308static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
309 const TargetLibraryInfo &TLI,
310 const Function *F) {
311 uint64_t Size;
312 ObjectSizeOpts Opts;
313 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
314
315 if (getObjectSize(V, Size, DL, &TLI, Opts))
316 return Size;
317 return MemoryLocation::UnknownSize;
318}
319
320namespace {
321
322enum OverwriteResult {
323 OW_Begin,
324 OW_Complete,
325 OW_End,
326 OW_PartialEarlierWithFullLater,
327 OW_MaybePartial,
328 OW_Unknown
329};
330
331} // end anonymous namespace
332
333/// Check if two instruction are masked stores that completely
334/// overwrite one another. More specifically, \p Later has to
335/// overwrite \p Earlier.
336static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
337 const Instruction *Earlier,
338 BatchAAResults &AA) {
339 const auto *IIL = dyn_cast<IntrinsicInst>(Later);
340 const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
341 if (IIL == nullptr || IIE == nullptr)
342 return OW_Unknown;
343 if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
344 IIE->getIntrinsicID() != Intrinsic::masked_store)
345 return OW_Unknown;
346 // Pointers.
347 Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
348 Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
349 if (LP != EP && !AA.isMustAlias(LP, EP))
350 return OW_Unknown;
351 // Masks.
352 // TODO: check that Later's mask is a superset of the Earlier's mask.
353 if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
354 return OW_Unknown;
355 return OW_Complete;
356}
357
358/// Return 'OW_Complete' if a store to the 'Later' location completely
359/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
360/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
361/// beginning of the 'Earlier' location is overwritten by 'Later'.
362/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
363/// overwritten by a latter (smaller) store which doesn't write outside the big
364/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
365/// NOTE: This function must only be called if both \p Later and \p Earlier
366/// write to the same underlying object with valid \p EarlierOff and \p
367/// LaterOff.
368static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
369 const MemoryLocation &Earlier,
370 int64_t EarlierOff, int64_t LaterOff,
371 Instruction *DepWrite,
372 InstOverlapIntervalsTy &IOL) {
373 const uint64_t LaterSize = Later.Size.getValue();
374 const uint64_t EarlierSize = Earlier.Size.getValue();
375 // We may now overlap, although the overlap is not complete. There might also
376 // be other incomplete overlaps, and together, they might cover the complete
377 // earlier write.
378 // Note: The correctness of this logic depends on the fact that this function
379 // is not even called providing DepWrite when there are any intervening reads.
380 if (EnablePartialOverwriteTracking &&
381 LaterOff < int64_t(EarlierOff + EarlierSize) &&
382 int64_t(LaterOff + LaterSize) >= EarlierOff) {
383
384 // Insert our part of the overlap into the map.
385 auto &IM = IOL[DepWrite];
386 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOffdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
387 << ", " << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
388 << ") Later [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
389 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
;
390
391 // Make sure that we only insert non-overlapping intervals and combine
392 // adjacent intervals. The intervals are stored in the map with the ending
393 // offset as the key (in the half-open sense) and the starting offset as
394 // the value.
395 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
396
397 // Find any intervals ending at, or after, LaterIntStart which start
398 // before LaterIntEnd.
399 auto ILI = IM.lower_bound(LaterIntStart);
400 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
401 // This existing interval is overlapped with the current store somewhere
402 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
403 // intervals and adjusting our start and end.
404 LaterIntStart = std::min(LaterIntStart, ILI->second);
405 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
406 ILI = IM.erase(ILI);
407
408 // Continue erasing and adjusting our end in case other previous
409 // intervals are also overlapped with the current store.
410 //
411 // |--- ealier 1 ---| |--- ealier 2 ---|
412 // |------- later---------|
413 //
414 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
415 assert(ILI->second > LaterIntStart && "Unexpected interval")(static_cast <bool> (ILI->second > LaterIntStart &&
"Unexpected interval") ? void (0) : __assert_fail ("ILI->second > LaterIntStart && \"Unexpected interval\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 415, __extension__ __PRETTY_FUNCTION__))
;
416 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
417 ILI = IM.erase(ILI);
418 }
419 }
420
421 IM[LaterIntEnd] = LaterIntStart;
422
423 ILI = IM.begin();
424 if (ILI->second <= EarlierOff &&
425 ILI->first >= int64_t(EarlierOff + EarlierSize)) {
426 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
427 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
428 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
429 << ") Composite Later [" << ILI->second << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
430 << ILI->first << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
;
431 ++NumCompletePartials;
432 return OW_Complete;
433 }
434 }
435
436 // Check for an earlier store which writes to all the memory locations that
437 // the later store writes to.
438 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
439 int64_t(EarlierOff + EarlierSize) > LaterOff &&
440 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
441 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
442 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
443 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
444 << ") by a later store [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
445 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
;
446 // TODO: Maybe come up with a better name?
447 return OW_PartialEarlierWithFullLater;
448 }
449
450 // Another interesting case is if the later store overwrites the end of the
451 // earlier store.
452 //
453 // |--earlier--|
454 // |-- later --|
455 //
456 // In this case we may want to trim the size of earlier to avoid generating
457 // writes to addresses which will definitely be overwritten later
458 if (!EnablePartialOverwriteTracking &&
459 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
460 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
461 return OW_End;
462
463 // Finally, we also need to check if the later store overwrites the beginning
464 // of the earlier store.
465 //
466 // |--earlier--|
467 // |-- later --|
468 //
469 // In this case we may want to move the destination address and trim the size
470 // of earlier to avoid generating writes to addresses which will definitely
471 // be overwritten later.
472 if (!EnablePartialOverwriteTracking &&
473 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
474 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 475, __extension__ __PRETTY_FUNCTION__))
475 "Expect to be handled as OW_Complete")(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 475, __extension__ __PRETTY_FUNCTION__))
;
476 return OW_Begin;
477 }
478 // Otherwise, they don't completely overlap.
479 return OW_Unknown;
480}
481
482/// Returns true if the memory which is accessed by the second instruction is not
483/// modified between the first and the second instruction.
484/// Precondition: Second instruction must be dominated by the first
485/// instruction.
486static bool
487memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
488 BatchAAResults &AA, const DataLayout &DL,
489 DominatorTree *DT) {
490 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
491 // instructions which can modify the memory location accessed by SecondI.
492 //
493 // While doing the walk keep track of the address to check. It might be
494 // different in different basic blocks due to PHI translation.
495 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
496 SmallVector<BlockAddressPair, 16> WorkList;
497 // Keep track of the address we visited each block with. Bail out if we
498 // visit a block with different addresses.
499 DenseMap<BasicBlock *, Value *> Visited;
500
501 BasicBlock::iterator FirstBBI(FirstI);
502 ++FirstBBI;
503 BasicBlock::iterator SecondBBI(SecondI);
504 BasicBlock *FirstBB = FirstI->getParent();
505 BasicBlock *SecondBB = SecondI->getParent();
506 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
507 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
508
509 // Start checking the SecondBB.
510 WorkList.push_back(
511 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
512 bool isFirstBlock = true;
513
514 // Check all blocks going backward until we reach the FirstBB.
515 while (!WorkList.empty()) {
516 BlockAddressPair Current = WorkList.pop_back_val();
517 BasicBlock *B = Current.first;
518 PHITransAddr &Addr = Current.second;
519 Value *Ptr = Addr.getAddr();
520
521 // Ignore instructions before FirstI if this is the FirstBB.
522 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
523
524 BasicBlock::iterator EI;
525 if (isFirstBlock) {
526 // Ignore instructions after SecondI if this is the first visit of SecondBB.
527 assert(B == SecondBB && "first block is not the store block")(static_cast <bool> (B == SecondBB && "first block is not the store block"
) ? void (0) : __assert_fail ("B == SecondBB && \"first block is not the store block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 527, __extension__ __PRETTY_FUNCTION__))
;
528 EI = SecondBBI;
529 isFirstBlock = false;
530 } else {
531 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
532 // In this case we also have to look at instructions after SecondI.
533 EI = B->end();
534 }
535 for (; BI != EI; ++BI) {
536 Instruction *I = &*BI;
537 if (I->mayWriteToMemory() && I != SecondI)
538 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
539 return false;
540 }
541 if (B != FirstBB) {
542 assert(B != &FirstBB->getParent()->getEntryBlock() &&(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
543 "Should not hit the entry block because SI must be dominated by LI")(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 for (BasicBlock *Pred : predecessors(B)) {
545 PHITransAddr PredAddr = Addr;
546 if (PredAddr.NeedsPHITranslationFromBlock(B)) {
547 if (!PredAddr.IsPotentiallyPHITranslatable())
548 return false;
549 if (PredAddr.PHITranslateValue(B, Pred, DT, false))
550 return false;
551 }
552 Value *TranslatedPtr = PredAddr.getAddr();
553 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
554 if (!Inserted.second) {
555 // We already visited this block before. If it was with a different
556 // address - bail out!
557 if (TranslatedPtr != Inserted.first->second)
558 return false;
559 // ... otherwise just skip it.
560 continue;
561 }
562 WorkList.push_back(std::make_pair(Pred, PredAddr));
563 }
564 }
565 }
566 return true;
567}
568
569static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
570 uint64_t &EarlierSize, int64_t LaterStart,
571 uint64_t LaterSize, bool IsOverwriteEnd) {
572 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
573 Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
574
575 // We assume that memet/memcpy operates in chunks of the "largest" native
576 // type size and aligned on the same value. That means optimal start and size
577 // of memset/memcpy should be modulo of preferred alignment of that type. That
578 // is it there is no any sense in trying to reduce store size any further
579 // since any "extra" stores comes for free anyway.
580 // On the other hand, maximum alignment we can achieve is limited by alignment
581 // of initial store.
582
583 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
584 // "largest" native type.
585 // Note: What is the proper way to get that value?
586 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
587 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
588
589 int64_t ToRemoveStart = 0;
590 uint64_t ToRemoveSize = 0;
591 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
592 // maintained on the remaining store.
593 if (IsOverwriteEnd) {
594 // Calculate required adjustment for 'LaterStart'in order to keep remaining
595 // store size aligned on 'PerfAlign'.
596 uint64_t Off =
597 offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
598 ToRemoveStart = LaterStart + Off;
599 if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
600 return false;
601 ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
602 } else {
603 ToRemoveStart = EarlierStart;
604 assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
605 "Not overlapping accesses?")(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
;
606 ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
607 // Calculate required adjustment for 'ToRemoveSize'in order to keep
608 // start of the remaining store aligned on 'PerfAlign'.
609 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
610 if (Off != 0) {
611 if (ToRemoveSize <= (PrefAlign.value() - Off))
612 return false;
613 ToRemoveSize -= PrefAlign.value() - Off;
614 }
615 assert(isAligned(PrefAlign, ToRemoveSize) &&(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
616 "Should preserve selected alignment")(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
;
617 }
618
619 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")(static_cast <bool> (ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"
) ? void (0) : __assert_fail ("ToRemoveSize > 0 && \"Shouldn't reach here if nothing to remove\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 assert(EarlierSize > ToRemoveSize && "Can't remove more than original size")(static_cast <bool> (EarlierSize > ToRemoveSize &&
"Can't remove more than original size") ? void (0) : __assert_fail
("EarlierSize > ToRemoveSize && \"Can't remove more than original size\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 620, __extension__ __PRETTY_FUNCTION__))
;
621
622 uint64_t NewSize = EarlierSize - ToRemoveSize;
623 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
624 // When shortening an atomic memory intrinsic, the newly shortened
625 // length must remain an integer multiple of the element size.
626 const uint32_t ElementSize = AMI->getElementSizeInBytes();
627 if (0 != NewSize % ElementSize)
628 return false;
629 }
630
631 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
632 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
633 << *EarlierWrite << "\n KILLER [" << ToRemoveStart << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
634 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
;
635
636 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
637 Value *TrimmedLength =
638 ConstantInt::get(EarlierWriteLength->getType(), NewSize);
639 EarlierIntrinsic->setLength(TrimmedLength);
640 EarlierIntrinsic->setDestAlignment(PrefAlign);
641
642 if (!IsOverwriteEnd) {
643 Value *Indices[1] = {
644 ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
645 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
646 EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
647 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
648 NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
649 EarlierIntrinsic->setDest(NewDestGEP);
650 }
651
652 // Finally update start and size of earlier access.
653 if (!IsOverwriteEnd)
654 EarlierStart += ToRemoveSize;
655 EarlierSize = NewSize;
656
657 return true;
658}
659
660static bool tryToShortenEnd(Instruction *EarlierWrite,
661 OverlapIntervalsTy &IntervalMap,
662 int64_t &EarlierStart, uint64_t &EarlierSize) {
663 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
664 return false;
665
666 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
667 int64_t LaterStart = OII->second;
668 uint64_t LaterSize = OII->first - LaterStart;
669
670 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 670, __extension__ __PRETTY_FUNCTION__))
;
671
672 if (LaterStart > EarlierStart &&
673 // Note: "LaterStart - EarlierStart" is known to be positive due to
674 // preceding check.
675 (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
676 // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
677 // be non negative due to preceding checks.
678 LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
679 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
680 LaterSize, true)) {
681 IntervalMap.erase(OII);
682 return true;
683 }
684 }
685 return false;
686}
687
688static bool tryToShortenBegin(Instruction *EarlierWrite,
689 OverlapIntervalsTy &IntervalMap,
690 int64_t &EarlierStart, uint64_t &EarlierSize) {
691 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
692 return false;
693
694 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
695 int64_t LaterStart = OII->second;
696 uint64_t LaterSize = OII->first - LaterStart;
697
698 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 698, __extension__ __PRETTY_FUNCTION__))
;
699
700 if (LaterStart <= EarlierStart &&
701 // Note: "EarlierStart - LaterStart" is known to be non negative due to
702 // preceding check.
703 LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
704 // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
705 // positive due to preceding checks.
706 assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
707 "Should have been handled as OW_Complete")(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
;
708 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
709 LaterSize, false)) {
710 IntervalMap.erase(OII);
711 return true;
712 }
713 }
714 return false;
715}
716
717static bool removePartiallyOverlappedStores(const DataLayout &DL,
718 InstOverlapIntervalsTy &IOL,
719 const TargetLibraryInfo &TLI) {
720 bool Changed = false;
721 for (auto OI : IOL) {
722 Instruction *EarlierWrite = OI.first;
723 MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
724 assert(isRemovable(EarlierWrite) && "Expect only removable instruction")(static_cast <bool> (isRemovable(EarlierWrite) &&
"Expect only removable instruction") ? void (0) : __assert_fail
("isRemovable(EarlierWrite) && \"Expect only removable instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 724, __extension__ __PRETTY_FUNCTION__))
;
725
726 const Value *Ptr = Loc.Ptr->stripPointerCasts();
727 int64_t EarlierStart = 0;
728 uint64_t EarlierSize = Loc.Size.getValue();
729 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
730 OverlapIntervalsTy &IntervalMap = OI.second;
731 Changed |=
732 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
733 if (IntervalMap.empty())
734 continue;
735 Changed |=
736 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
737 }
738 return Changed;
739}
740
741static Constant *tryToMergePartialOverlappingStores(
742 StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
743 int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA,
744 DominatorTree *DT) {
745
746 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
747 DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
748 Later && isa<ConstantInt>(Later->getValueOperand()) &&
749 DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
750 memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
751 // If the store we find is:
752 // a) partially overwritten by the store to 'Loc'
753 // b) the later store is fully contained in the earlier one and
754 // c) they both have a constant value
755 // d) none of the two stores need padding
756 // Merge the two stores, replacing the earlier store's value with a
757 // merge of both values.
758 // TODO: Deal with other constant types (vectors, etc), and probably
759 // some mem intrinsics (if needed)
760
761 APInt EarlierValue =
762 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
763 APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
764 unsigned LaterBits = LaterValue.getBitWidth();
765 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth())(static_cast <bool> (EarlierValue.getBitWidth() > LaterValue
.getBitWidth()) ? void (0) : __assert_fail ("EarlierValue.getBitWidth() > LaterValue.getBitWidth()"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 765, __extension__ __PRETTY_FUNCTION__))
;
766 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
767
768 // Offset of the smaller store inside the larger store
769 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
770 unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
771 BitOffsetDiff - LaterBits
772 : BitOffsetDiff;
773 APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
774 LShiftAmount + LaterBits);
775 // Clear the bits we'll be replacing, then OR with the smaller
776 // store, shifted appropriately.
777 APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
778 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlierdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
779 << "\n Later: " << *Laterdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
780 << "\n Merged Value: " << Merged << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
;
781 return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
782 }
783 return nullptr;
784}
785
786namespace {
787// Returns true if \p I is an intrisnic that does not read or write memory.
788bool isNoopIntrinsic(Instruction *I) {
789 if (const IntrinsicInst *II
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
= dyn_cast<IntrinsicInst>(I)) {
27
Assuming 'I' is not a 'IntrinsicInst'
28
Taking false branch
56
Assuming 'I' is not a 'IntrinsicInst'
57
Taking false branch
790 switch (II->getIntrinsicID()) {
791 case Intrinsic::lifetime_start:
792 case Intrinsic::lifetime_end:
793 case Intrinsic::invariant_end:
794 case Intrinsic::launder_invariant_group:
795 case Intrinsic::assume:
796 return true;
797 case Intrinsic::dbg_addr:
798 case Intrinsic::dbg_declare:
799 case Intrinsic::dbg_label:
800 case Intrinsic::dbg_value:
801 llvm_unreachable("Intrinsic should not be modeled in MemorySSA")::llvm::llvm_unreachable_internal("Intrinsic should not be modeled in MemorySSA"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 801)
;
802 default:
803 return false;
804 }
805 }
806 return false;
29
Returning zero, which participates in a condition later
58
Returning zero, which participates in a condition later
807}
808
809// Check if we can ignore \p D for DSE.
810bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
811 Instruction *DI = D->getMemoryInst();
812 // Calls that only access inaccessible memory cannot read or write any memory
813 // locations we consider for elimination.
814 if (auto *CB
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
= dyn_cast<CallBase>(DI))
21
Assuming 'DI' is not a 'CallBase'
22
Taking false branch
815 if (CB->onlyAccessesInaccessibleMemory())
816 return true;
817
818 // We can eliminate stores to locations not visible to the caller across
819 // throwing instructions.
820 if (DI->mayThrow() && !DefVisibleToCaller)
23
Assuming the condition is false
821 return true;
822
823 // We can remove the dead stores, irrespective of the fence and its ordering
824 // (release/acquire/seq_cst). Fences only constraints the ordering of
825 // already visible stores, it does not make a store visible to other
826 // threads. So, skipping over a fence does not change a store from being
827 // dead.
828 if (isa<FenceInst>(DI))
24
Assuming 'DI' is not a 'FenceInst'
25
Taking false branch
829 return true;
830
831 // Skip intrinsics that do not really read or modify memory.
832 if (isNoopIntrinsic(D->getMemoryInst()))
26
Calling 'isNoopIntrinsic'
30
Returning from 'isNoopIntrinsic'
31
Taking false branch
833 return true;
834
835 return false;
32
Returning zero, which participates in a condition later
836}
837
838struct DSEState {
839 Function &F;
840 AliasAnalysis &AA;
841
842 /// The single BatchAA instance that is used to cache AA queries. It will
843 /// not be invalidated over the whole run. This is safe, because:
844 /// 1. Only memory writes are removed, so the alias cache for memory
845 /// locations remains valid.
846 /// 2. No new instructions are added (only instructions removed), so cached
847 /// information for a deleted value cannot be accessed by a re-used new
848 /// value pointer.
849 BatchAAResults BatchAA;
850
851 MemorySSA &MSSA;
852 DominatorTree &DT;
853 PostDominatorTree &PDT;
854 const TargetLibraryInfo &TLI;
855 const DataLayout &DL;
856
857 // All MemoryDefs that potentially could kill other MemDefs.
858 SmallVector<MemoryDef *, 64> MemDefs;
859 // Any that should be skipped as they are already deleted
860 SmallPtrSet<MemoryAccess *, 4> SkipStores;
861 // Keep track of all of the objects that are invisible to the caller before
862 // the function returns.
863 // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
864 DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
865 // Keep track of all of the objects that are invisible to the caller after
866 // the function returns.
867 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
868 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
869 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
870 // Post-order numbers for each basic block. Used to figure out if memory
871 // accesses are executed before another access.
872 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
873
874 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
875 /// basic block.
876 DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
877
878 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
879 PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
880 : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
881 DL(F.getParent()->getDataLayout()) {}
882
883 static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
884 DominatorTree &DT, PostDominatorTree &PDT,
885 const TargetLibraryInfo &TLI) {
886 DSEState State(F, AA, MSSA, DT, PDT, TLI);
887 // Collect blocks with throwing instructions not modeled in MemorySSA and
888 // alloc-like objects.
889 unsigned PO = 0;
890 for (BasicBlock *BB : post_order(&F)) {
891 State.PostOrderNumbers[BB] = PO++;
892 for (Instruction &I : *BB) {
893 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
894 if (I.mayThrow() && !MA)
895 State.ThrowingBlocks.insert(I.getParent());
896
897 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
898 if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
899 (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
900 State.MemDefs.push_back(MD);
901 }
902 }
903
904 // Treat byval or inalloca arguments the same as Allocas, stores to them are
905 // dead at the end of the function.
906 for (Argument &AI : F.args())
907 if (AI.hasPassPointeeByValueCopyAttr()) {
908 // For byval, the caller doesn't know the address of the allocation.
909 if (AI.hasByValAttr())
910 State.InvisibleToCallerBeforeRet.insert({&AI, true});
911 State.InvisibleToCallerAfterRet.insert({&AI, true});
912 }
913
914 return State;
915 }
916
917 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
918 /// instruction) completely overwrites a store to the 'Earlier' location.
919 /// (by \p EarlierI instruction).
920 /// Return OW_MaybePartial if \p Later does not completely overwrite
921 /// \p Earlier, but they both write to the same underlying object. In that
922 /// case, use isPartialOverwrite to check if \p Later partially overwrites
923 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
924 OverwriteResult
925 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
926 const MemoryLocation &Later, const MemoryLocation &Earlier,
927 int64_t &EarlierOff, int64_t &LaterOff) {
928 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
929 // get imprecise values here, though (except for unknown sizes).
930 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
931 // In case no constant size is known, try to an IR values for the number
932 // of bytes written and check if they match.
933 const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI);
934 const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI);
935 if (LaterMemI && EarlierMemI) {
936 const Value *LaterV = LaterMemI->getLength();
937 const Value *EarlierV = EarlierMemI->getLength();
938 if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later))
939 return OW_Complete;
940 }
941
942 // Masked stores have imprecise locations, but we can reason about them
943 // to some extent.
944 return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA);
945 }
946
947 const uint64_t LaterSize = Later.Size.getValue();
948 const uint64_t EarlierSize = Earlier.Size.getValue();
949
950 // Query the alias information
951 AliasResult AAR = BatchAA.alias(Later, Earlier);
952
953 // If the start pointers are the same, we just have to compare sizes to see if
954 // the later store was larger than the earlier store.
955 if (AAR == AliasResult::MustAlias) {
956 // Make sure that the Later size is >= the Earlier size.
957 if (LaterSize >= EarlierSize)
958 return OW_Complete;
959 }
960
961 // If we hit a partial alias we may have a full overwrite
962 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
963 int32_t Off = AAR.getOffset();
964 if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize)
965 return OW_Complete;
966 }
967
968 // Check to see if the later store is to the entire object (either a global,
969 // an alloca, or a byval/inalloca argument). If so, then it clearly
970 // overwrites any other store to the same object.
971 const Value *P1 = Earlier.Ptr->stripPointerCasts();
972 const Value *P2 = Later.Ptr->stripPointerCasts();
973 const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
974
975 // If we can't resolve the same pointers to the same object, then we can't
976 // analyze them at all.
977 if (UO1 != UO2)
978 return OW_Unknown;
979
980 // If the "Later" store is to a recognizable object, get its size.
981 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F);
982 if (ObjectSize != MemoryLocation::UnknownSize)
983 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
984 return OW_Complete;
985
986 // Okay, we have stores to two completely different pointers. Try to
987 // decompose the pointer into a "base + constant_offset" form. If the base
988 // pointers are equal, then we can reason about the two stores.
989 EarlierOff = 0;
990 LaterOff = 0;
991 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
992 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
993
994 // If the base pointers still differ, we have two completely different stores.
995 if (BP1 != BP2)
996 return OW_Unknown;
997
998 // The later access completely overlaps the earlier store if and only if
999 // both start and end of the earlier one is "inside" the later one:
1000 // |<->|--earlier--|<->|
1001 // |-------later-------|
1002 // Accesses may overlap if and only if start of one of them is "inside"
1003 // another one:
1004 // |<->|--earlier--|<----->|
1005 // |-------later-------|
1006 // OR
1007 // |----- earlier -----|
1008 // |<->|---later---|<----->|
1009 //
1010 // We have to be careful here as *Off is signed while *.Size is unsigned.
1011
1012 // Check if the earlier access starts "not before" the later one.
1013 if (EarlierOff >= LaterOff) {
1014 // If the earlier access ends "not after" the later access then the earlier
1015 // one is completely overwritten by the later one.
1016 if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
1017 return OW_Complete;
1018 // If start of the earlier access is "before" end of the later access then
1019 // accesses overlap.
1020 else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
1021 return OW_MaybePartial;
1022 }
1023 // If start of the later access is "before" end of the earlier access then
1024 // accesses overlap.
1025 else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
1026 return OW_MaybePartial;
1027 }
1028
1029 // Can reach here only if accesses are known not to overlap. There is no
1030 // dedicated code to indicate no overlap so signal "unknown".
1031 return OW_Unknown;
1032 }
1033
1034 bool isInvisibleToCallerAfterRet(const Value *V) {
1035 if (isa<AllocaInst>(V))
1036 return true;
1037 auto I = InvisibleToCallerAfterRet.insert({V, false});
1038 if (I.second) {
1039 if (!isInvisibleToCallerBeforeRet(V)) {
1040 I.first->second = false;
1041 } else {
1042 auto *Inst = dyn_cast<Instruction>(V);
1043 if (Inst && isAllocLikeFn(Inst, &TLI))
1044 I.first->second = !PointerMayBeCaptured(V, true, false);
1045 }
1046 }
1047 return I.first->second;
1048 }
1049
1050 bool isInvisibleToCallerBeforeRet(const Value *V) {
1051 if (isa<AllocaInst>(V))
1052 return true;
1053 auto I = InvisibleToCallerBeforeRet.insert({V, false});
1054 if (I.second) {
1055 auto *Inst = dyn_cast<Instruction>(V);
1056 if (Inst && isAllocLikeFn(Inst, &TLI))
1057 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1058 // with the killing MemoryDef. But we refrain from doing so for now to
1059 // limit compile-time and this does not cause any changes to the number
1060 // of stores removed on a large test set in practice.
1061 I.first->second = !PointerMayBeCaptured(V, false, true);
1062 }
1063 return I.first->second;
1064 }
1065
1066 Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1067 if (!I->mayWriteToMemory())
1068 return None;
1069
1070 if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1071 return {MemoryLocation::getForDest(MTI)};
1072
1073 if (auto *CB = dyn_cast<CallBase>(I)) {
1074 // If the functions may write to memory we do not know about, bail out.
1075 if (!CB->onlyAccessesArgMemory() &&
1076 !CB->onlyAccessesInaccessibleMemOrArgMem())
1077 return None;
1078
1079 LibFunc LF;
1080 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1081 switch (LF) {
1082 case LibFunc_strcpy:
1083 case LibFunc_strncpy:
1084 case LibFunc_strcat:
1085 case LibFunc_strncat:
1086 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1087 default:
1088 break;
1089 }
1090 }
1091 switch (CB->getIntrinsicID()) {
1092 case Intrinsic::init_trampoline:
1093 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1094 case Intrinsic::masked_store:
1095 return {MemoryLocation::getForArgument(CB, 1, TLI)};
1096 default:
1097 break;
1098 }
1099 return None;
1100 }
1101
1102 return MemoryLocation::getOrNone(I);
1103 }
1104
1105 /// Returns true if \p UseInst completely overwrites \p DefLoc
1106 /// (stored by \p DefInst).
1107 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1108 Instruction *UseInst) {
1109 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1110 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1111 // MemoryDef.
1112 if (!UseInst->mayWriteToMemory())
1113 return false;
1114
1115 if (auto *CB = dyn_cast<CallBase>(UseInst))
1116 if (CB->onlyAccessesInaccessibleMemory())
1117 return false;
1118
1119 int64_t InstWriteOffset, DepWriteOffset;
1120 if (auto CC = getLocForWriteEx(UseInst))
1121 return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset,
1122 InstWriteOffset) == OW_Complete;
1123 return false;
1124 }
1125
1126 /// Returns true if \p Def is not read before returning from the function.
1127 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1128 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1129 << *Def->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1130 << ") is at the end the function \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
;
1131
1132 auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1133 if (!MaybeLoc) {
1134 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... could not get location for write.\n"
; } } while (false)
;
1135 return false;
1136 }
1137
1138 SmallVector<MemoryAccess *, 4> WorkList;
1139 SmallPtrSet<MemoryAccess *, 8> Visited;
1140 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1141 if (!Visited.insert(Acc).second)
1142 return;
1143 for (Use &U : Acc->uses())
1144 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1145 };
1146 PushMemUses(Def);
1147 for (unsigned I = 0; I < WorkList.size(); I++) {
1148 if (WorkList.size() >= MemorySSAScanLimit) {
1149 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit exploration limit.\n"; }
} while (false)
;
1150 return false;
1151 }
1152
1153 MemoryAccess *UseAccess = WorkList[I];
1154 // Simply adding the users of MemoryPhi to the worklist is not enough,
1155 // because we might miss read clobbers in different iterations of a loop,
1156 // for example.
1157 // TODO: Add support for phi translation to handle the loop case.
1158 if (isa<MemoryPhi>(UseAccess))
1159 return false;
1160
1161 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1162 // of times this is called and/or caching it.
1163 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1164 if (isReadClobber(*MaybeLoc, UseInst)) {
1165 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit read clobber " <<
*UseInst << ".\n"; } } while (false)
;
1166 return false;
1167 }
1168
1169 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1170 PushMemUses(UseDef);
1171 }
1172 return true;
1173 }
1174
1175 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1176 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1177 /// indicating whether \p I is a free-like call.
1178 Optional<std::pair<MemoryLocation, bool>>
1179 getLocForTerminator(Instruction *I) const {
1180 uint64_t Len;
1181 Value *Ptr;
1182 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1183 m_Value(Ptr))))
1184 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1185
1186 if (auto *CB = dyn_cast<CallBase>(I)) {
1187 if (isFreeCall(I, &TLI))
1188 return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1189 true)};
1190 }
1191
1192 return None;
1193 }
1194
1195 /// Returns true if \p I is a memory terminator instruction like
1196 /// llvm.lifetime.end or free.
1197 bool isMemTerminatorInst(Instruction *I) const {
1198 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1199 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1200 isFreeCall(I, &TLI);
1201 }
1202
1203 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1204 /// instruction \p AccessI.
1205 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1206 Instruction *MaybeTerm) {
1207 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1208 getLocForTerminator(MaybeTerm);
1209
1210 if (!MaybeTermLoc)
1211 return false;
1212
1213 // If the terminator is a free-like call, all accesses to the underlying
1214 // object can be considered terminated.
1215 if (getUnderlyingObject(Loc.Ptr) !=
1216 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1217 return false;
1218
1219 auto TermLoc = MaybeTermLoc->first;
1220 if (MaybeTermLoc->second) {
1221 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1222 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1223 }
1224 int64_t InstWriteOffset, DepWriteOffset;
1225 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset,
1226 InstWriteOffset) == OW_Complete;
1227 }
1228
1229 // Returns true if \p Use may read from \p DefLoc.
1230 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1231 if (isNoopIntrinsic(UseInst))
55
Calling 'isNoopIntrinsic'
59
Returning from 'isNoopIntrinsic'
60
Taking false branch
1232 return false;
1233
1234 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1235 // treated as read clobber.
1236 if (auto SI
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
= dyn_cast<StoreInst>(UseInst))
61
Assuming 'UseInst' is not a 'StoreInst'
62
Taking false branch
1237 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1238
1239 if (!UseInst->mayReadFromMemory())
63
Assuming the condition is true
64
Taking true branch
1240 return false;
65
Returning zero, which participates in a condition later
1241
1242 if (auto *CB = dyn_cast<CallBase>(UseInst))
1243 if (CB->onlyAccessesInaccessibleMemory())
1244 return false;
1245
1246 // NOTE: For calls, the number of stores removed could be slightly improved
1247 // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1248 // be expensive compared to the benefits in practice. For now, avoid more
1249 // expensive analysis to limit compile-time.
1250 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1251 }
1252
1253 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1254 /// loop. In particular, this guarantees that it only references a single
1255 /// MemoryLocation during execution of the containing function.
1256 bool IsGuaranteedLoopInvariant(Value *Ptr) {
1257 auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1258 Ptr = Ptr->stripPointerCasts();
1259 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1260 if (isa<AllocaInst>(Ptr))
1261 return true;
1262
1263 if (isAllocLikeFn(I, &TLI))
1264 return true;
1265
1266 return false;
1267 }
1268 return true;
1269 };
1270
1271 Ptr = Ptr->stripPointerCasts();
121
Called C++ object pointer is null
1272 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1273 if (I->getParent()->isEntryBlock())
1274 return true;
1275 }
1276 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1277 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1278 GEP->hasAllConstantIndices();
1279 }
1280 return IsGuaranteedLoopInvariantBase(Ptr);
1281 }
1282
1283 // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1284 // no read access between them or on any other path to a function exit block
1285 // if \p DefLoc is not accessible after the function returns. If there is no
1286 // such MemoryDef, return None. The returned value may not (completely)
1287 // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1288 // MemoryUse (read).
1289 Optional<MemoryAccess *>
1290 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1291 const MemoryLocation &DefLoc, const Value *DefUO,
1292 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1293 bool IsMemTerm, unsigned &PartialLimit) {
1294 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1
Assuming 'ScanLimit' is not equal to 0
2
Assuming 'WalkerStepLimit' is not equal to 0
3
Taking false branch
1295 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1296 return None;
1297 }
1298
1299 MemoryAccess *Current = StartAccess;
1300 Instruction *KillingI = KillingDef->getMemoryInst();
1301 bool StepAgain;
1302 LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " trying to get dominating access\n"
; } } while (false)
;
4
Assuming 'DebugFlag' is false
5
Loop condition is false. Exiting loop
1303
1304 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1305 Optional<MemoryLocation> CurrentLoc;
1306 do {
1307 StepAgain = false;
1308 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
6
Loop condition is false. Exiting loop
1309 dbgs() << " visiting " << *Current;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1310 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1311 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1312 << ")";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1313 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1314 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
;
1315
1316 // Reached TOP.
1317 if (MSSA.isLiveOnEntryDef(Current)) {
7
Calling 'MemorySSA::isLiveOnEntryDef'
10
Returning from 'MemorySSA::isLiveOnEntryDef'
11
Taking false branch
1318 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found LiveOnEntryDef\n"; }
} while (false)
;
1319 return None;
1320 }
1321
1322 // Cost of a step. Accesses in the same block are more likely to be valid
1323 // candidates for elimination, hence consider them cheaper.
1324 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
12
Assuming the condition is false
13
'?' condition is false
1325 ? MemorySSASameBBStepCost
1326 : MemorySSAOtherBBStepCost;
1327 if (WalkerStepLimit <= StepCost) {
14
Assuming 'WalkerStepLimit' is > 'StepCost'
15
Taking false branch
1328 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit walker step limit\n";
} } while (false)
;
1329 return None;
1330 }
1331 WalkerStepLimit -= StepCost;
1332
1333 // Return for MemoryPhis. They cannot be eliminated directly and the
1334 // caller is responsible for traversing them.
1335 if (isa<MemoryPhi>(Current)) {
16
Assuming 'Current' is not a 'MemoryPhi'
17
Taking false branch
1336 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found MemoryPhi\n"; } } while
(false)
;
1337 return Current;
1338 }
1339
1340 // Below, check if CurrentDef is a valid candidate to be eliminated by
1341 // KillingDef. If it is not, check the next candidate.
1342 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
18
'Current' is a 'MemoryDef'
1343 Instruction *CurrentI = CurrentDef->getMemoryInst();
1344
1345 if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
19
Assuming the condition is false
20
Calling 'canSkipDef'
33
Returning from 'canSkipDef'
34
Taking false branch
1346 StepAgain = true;
1347 Current = CurrentDef->getDefiningAccess();
1348 continue;
1349 }
1350
1351 // Before we try to remove anything, check for any extra throwing
1352 // instructions that block us from DSEing
1353 if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
35
Calling 'DSEState::mayThrowBetween'
44
Returning from 'DSEState::mayThrowBetween'
45
Taking false branch
1354 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, may throw!\n"; } } while
(false)
;
1355 return None;
1356 }
1357
1358 // Check for anything that looks like it will be a barrier to further
1359 // removal
1360 if (isDSEBarrier(DefUO, CurrentI)) {
46
Calling 'DSEState::isDSEBarrier'
51
Returning from 'DSEState::isDSEBarrier'
52
Taking false branch
1361 LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, barrier\n"; } } while
(false)
;
1362 return None;
1363 }
1364
1365 // If Current is known to be on path that reads DefLoc or is a read
1366 // clobber, bail out, as the path is not profitable. We skip this check
1367 // for intrinsic calls, because the code knows how to handle memcpy
1368 // intrinsics.
1369 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
53
Assuming 'CurrentI' is not a 'IntrinsicInst'
54
Calling 'DSEState::isReadClobber'
66
Returning from 'DSEState::isReadClobber'
67
Taking false branch
1370 return None;
1371
1372 // Quick check if there are direct uses that are read-clobbers.
1373 if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
68
Calling 'any_of<llvm::iterator_range<llvm::Value::use_iterator_impl<llvm::Use>>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
74
Returning from 'any_of<llvm::iterator_range<llvm::Value::use_iterator_impl<llvm::Use>>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
75
Taking false branch
1374 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1375 return !MSSA.dominates(StartAccess, UseOrDef) &&
1376 isReadClobber(DefLoc, UseOrDef->getMemoryInst());
1377 return false;
1378 })) {
1379 LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found a read clobber\n"; }
} while (false)
;
1380 return None;
1381 }
1382
1383 // If Current cannot be analyzed or is not removable, check the next
1384 // candidate.
1385 if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
76
Calling 'hasAnalyzableMemoryWrite'
104
Returning from 'hasAnalyzableMemoryWrite'
105
Assuming the condition is false
106
Taking false branch
1386 StepAgain = true;
1387 Current = CurrentDef->getDefiningAccess();
1388 continue;
1389 }
1390
1391 // If Current does not have an analyzable write location, skip it
1392 CurrentLoc = getLocForWriteEx(CurrentI);
107
Null pointer value stored to 'CurrentLoc.Storage..value.Ptr'
1393 if (!CurrentLoc) {
108
Calling 'Optional::operator bool'
116
Returning from 'Optional::operator bool'
117
Taking false branch
1394 StepAgain = true;
1395 Current = CurrentDef->getDefiningAccess();
1396 continue;
1397 }
1398
1399 // AliasAnalysis does not account for loops. Limit elimination to
1400 // candidates for which we can guarantee they always store to the same
1401 // memory location and not multiple locations in a loop.
1402 if (Current->getBlock() != KillingDef->getBlock() &&
118
Assuming the condition is true
1403 !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
119
Passing null pointer value via 1st parameter 'Ptr'
120
Calling 'DSEState::IsGuaranteedLoopInvariant'
1404 StepAgain = true;
1405 Current = CurrentDef->getDefiningAccess();
1406 WalkerStepLimit -= 1;
1407 continue;
1408 }
1409
1410 if (IsMemTerm) {
1411 // If the killing def is a memory terminator (e.g. lifetime.end), check
1412 // the next candidate if the current Current does not write the same
1413 // underlying object as the terminator.
1414 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1415 StepAgain = true;
1416 Current = CurrentDef->getDefiningAccess();
1417 }
1418 continue;
1419 } else {
1420 int64_t InstWriteOffset, DepWriteOffset;
1421 auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc,
1422 DepWriteOffset, InstWriteOffset);
1423 // If Current does not write to the same object as KillingDef, check
1424 // the next candidate.
1425 if (OR == OW_Unknown) {
1426 StepAgain = true;
1427 Current = CurrentDef->getDefiningAccess();
1428 } else if (OR == OW_MaybePartial) {
1429 // If KillingDef only partially overwrites Current, check the next
1430 // candidate if the partial step limit is exceeded. This aggressively
1431 // limits the number of candidates for partial store elimination,
1432 // which are less likely to be removable in the end.
1433 if (PartialLimit <= 1) {
1434 StepAgain = true;
1435 Current = CurrentDef->getDefiningAccess();
1436 WalkerStepLimit -= 1;
1437 continue;
1438 }
1439 PartialLimit -= 1;
1440 }
1441 }
1442 } while (StepAgain);
1443
1444 // Accesses to objects accessible after the function returns can only be
1445 // eliminated if the access is killed along all paths to the exit. Collect
1446 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1447 // they cover all paths from EarlierAccess to any function exit.
1448 SmallPtrSet<Instruction *, 16> KillingDefs;
1449 KillingDefs.insert(KillingDef->getMemoryInst());
1450 MemoryAccess *EarlierAccess = Current;
1451 Instruction *EarlierMemInst =
1452 cast<MemoryDef>(EarlierAccess)->getMemoryInst();
1453 LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
1454 << *EarlierMemInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
;
1455
1456 SmallSetVector<MemoryAccess *, 32> WorkList;
1457 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1458 for (Use &U : Acc->uses())
1459 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1460 };
1461 PushMemUses(EarlierAccess);
1462
1463 // Optimistically collect all accesses for reads. If we do not find any
1464 // read clobbers, add them to the cache.
1465 SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
1466 if (!EarlierMemInst->mayReadFromMemory())
1467 KnownNoReads.insert(EarlierAccess);
1468 // Check if EarlierDef may be read.
1469 for (unsigned I = 0; I < WorkList.size(); I++) {
1470 MemoryAccess *UseAccess = WorkList[I];
1471
1472 LLVM_DEBUG(dbgs() << " " << *UseAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " " << *UseAccess; } } while
(false)
;
1473 // Bail out if the number of accesses to check exceeds the scan limit.
1474 if (ScanLimit < (WorkList.size() - I)) {
1475 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1476 return None;
1477 }
1478 --ScanLimit;
1479 NumDomMemDefChecks++;
1480 KnownNoReads.insert(UseAccess);
1481
1482 if (isa<MemoryPhi>(UseAccess)) {
1483 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1484 return DT.properlyDominates(KI->getParent(),
1485 UseAccess->getBlock());
1486 })) {
1487 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing block\n"
; } } while (false)
;
1488 continue;
1489 }
1490 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding PHI uses\n"; } }
while (false)
;
1491 PushMemUses(UseAccess);
1492 continue;
1493 }
1494
1495 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1496 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *UseInst << ")\n"
; } } while (false)
;
1497
1498 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1499 return DT.dominates(KI, UseInst);
1500 })) {
1501 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing def\n"
; } } while (false)
;
1502 continue;
1503 }
1504
1505 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1506 // MemoryAccesses. We do not have to check it's users.
1507 if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
1508 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1509 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1510 << " ... skipping, memterminator invalidates following accesses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
;
1511 continue;
1512 }
1513
1514 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1515 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... adding uses of intrinsic\n"
; } } while (false)
;
1516 PushMemUses(UseAccess);
1517 continue;
1518 }
1519
1520 if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
1521 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found throwing instruction\n"
; } } while (false)
;
1522 return None;
1523 }
1524
1525 // Uses which may read the original MemoryDef mean we cannot eliminate the
1526 // original MD. Stop walk.
1527 if (isReadClobber(*CurrentLoc, UseInst)) {
1528 LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found read clobber\n"; } }
while (false)
;
1529 return None;
1530 }
1531
1532 // For the KillingDef and EarlierAccess we only have to check if it reads
1533 // the memory location.
1534 // TODO: It would probably be better to check for self-reads before
1535 // calling the function.
1536 if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
1537 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping killing def/dom access\n"
; } } while (false)
;
1538 continue;
1539 }
1540
1541 // Check all uses for MemoryDefs, except for defs completely overwriting
1542 // the original location. Otherwise we have to check uses of *all*
1543 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1544 // miss cases like the following
1545 // 1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1546 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1547 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1548 // (The Use points to the *first* Def it may alias)
1549 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1550 // stores [0,1]
1551 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1552 if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
1553 if (!isInvisibleToCallerAfterRet(DefUO) &&
1554 UseAccess != EarlierAccess) {
1555 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1556 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1557 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
1558
1559 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
1560 << " ... found killing def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
;
1561 KillingDefs.insert(UseInst);
1562 }
1563 }
1564 } else
1565 PushMemUses(UseDef);
1566 }
1567 }
1568
1569 // For accesses to locations visible after the function returns, make sure
1570 // that the location is killed (=overwritten) along all paths from
1571 // EarlierAccess to the exit.
1572 if (!isInvisibleToCallerAfterRet(DefUO)) {
1573 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1574 for (Instruction *KD : KillingDefs)
1575 KillingBlocks.insert(KD->getParent());
1576 assert(!KillingBlocks.empty() &&(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1577, __extension__ __PRETTY_FUNCTION__))
1577 "Expected at least a single killing block")(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1577, __extension__ __PRETTY_FUNCTION__))
;
1578
1579 // Find the common post-dominator of all killing blocks.
1580 BasicBlock *CommonPred = *KillingBlocks.begin();
1581 for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1582 I != E; I++) {
1583 if (!CommonPred)
1584 break;
1585 CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1586 }
1587
1588 // If CommonPred is in the set of killing blocks, just check if it
1589 // post-dominates EarlierAccess.
1590 if (KillingBlocks.count(CommonPred)) {
1591 if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
1592 return {EarlierAccess};
1593 return None;
1594 }
1595
1596 // If the common post-dominator does not post-dominate EarlierAccess,
1597 // there is a path from EarlierAccess to an exit not going through a
1598 // killing block.
1599 if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
1600 SetVector<BasicBlock *> WorkList;
1601
1602 // If CommonPred is null, there are multiple exits from the function.
1603 // They all have to be added to the worklist.
1604 if (CommonPred)
1605 WorkList.insert(CommonPred);
1606 else
1607 for (BasicBlock *R : PDT.roots())
1608 WorkList.insert(R);
1609
1610 NumCFGTries++;
1611 // Check if all paths starting from an exit node go through one of the
1612 // killing blocks before reaching EarlierAccess.
1613 for (unsigned I = 0; I < WorkList.size(); I++) {
1614 NumCFGChecks++;
1615 BasicBlock *Current = WorkList[I];
1616 if (KillingBlocks.count(Current))
1617 continue;
1618 if (Current == EarlierAccess->getBlock())
1619 return None;
1620
1621 // EarlierAccess is reachable from the entry, so we don't have to
1622 // explore unreachable blocks further.
1623 if (!DT.isReachableFromEntry(Current))
1624 continue;
1625
1626 for (BasicBlock *Pred : predecessors(Current))
1627 WorkList.insert(Pred);
1628
1629 if (WorkList.size() >= MemorySSAPathCheckLimit)
1630 return None;
1631 }
1632 NumCFGSuccess++;
1633 return {EarlierAccess};
1634 }
1635 return None;
1636 }
1637
1638 // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1639 // potentially dead.
1640 return {EarlierAccess};
1641 }
1642
1643 // Delete dead memory defs
1644 void deleteDeadInstruction(Instruction *SI) {
1645 MemorySSAUpdater Updater(&MSSA);
1646 SmallVector<Instruction *, 32> NowDeadInsts;
1647 NowDeadInsts.push_back(SI);
1648 --NumFastOther;
1649
1650 while (!NowDeadInsts.empty()) {
1651 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1652 ++NumFastOther;
1653
1654 // Try to preserve debug information attached to the dead instruction.
1655 salvageDebugInfo(*DeadInst);
1656 salvageKnowledge(DeadInst);
1657
1658 // Remove the Instruction from MSSA.
1659 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1660 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1661 SkipStores.insert(MD);
1662 }
1663 Updater.removeMemoryAccess(MA);
1664 }
1665
1666 auto I = IOLs.find(DeadInst->getParent());
1667 if (I != IOLs.end())
1668 I->second.erase(DeadInst);
1669 // Remove its operands
1670 for (Use &O : DeadInst->operands())
1671 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1672 O = nullptr;
1673 if (isInstructionTriviallyDead(OpI, &TLI))
1674 NowDeadInsts.push_back(OpI);
1675 }
1676
1677 DeadInst->eraseFromParent();
1678 }
1679 }
1680
1681 // Check for any extra throws between SI and NI that block DSE. This only
1682 // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1683 // throw are handled during the walk from one def to the next.
1684 bool mayThrowBetween(Instruction *SI, Instruction *NI,
1685 const Value *SILocUnd) {
1686 // First see if we can ignore it by using the fact that SI is an
1687 // alloca/alloca like object that is not visible to the caller during
1688 // execution of the function.
1689 if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
36
Assuming 'SILocUnd' is null
1690 return false;
1691
1692 if (SI->getParent() == NI->getParent())
37
Assuming the condition is false
38
Taking false branch
1693 return ThrowingBlocks.count(SI->getParent());
1694 return !ThrowingBlocks.empty();
39
Calling 'SmallPtrSetImplBase::empty'
42
Returning from 'SmallPtrSetImplBase::empty'
43
Returning zero, which participates in a condition later
1695 }
1696
1697 // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1698 // act as barriers:
1699 // * A memory instruction that may throw and \p SI accesses a non-stack
1700 // object.
1701 // * Atomic stores stronger that monotonic.
1702 bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
1703 // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1704 // like object that does not escape.
1705 if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
47
Assuming the condition is false
1706 return true;
1707
1708 // If NI is an atomic load/store stronger than monotonic, do not try to
1709 // eliminate/reorder it.
1710 if (NI->isAtomic()) {
48
Assuming the condition is false
49
Taking false branch
1711 if (auto *LI = dyn_cast<LoadInst>(NI))
1712 return isStrongerThanMonotonic(LI->getOrdering());
1713 if (auto *SI = dyn_cast<StoreInst>(NI))
1714 return isStrongerThanMonotonic(SI->getOrdering());
1715 if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
1716 return isStrongerThanMonotonic(ARMW->getOrdering());
1717 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
1718 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1719 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1720 llvm_unreachable("other instructions should be skipped in MemorySSA")::llvm::llvm_unreachable_internal("other instructions should be skipped in MemorySSA"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1720)
;
1721 }
1722 return false;
50
Returning zero, which participates in a condition later
1723 }
1724
1725 /// Eliminate writes to objects that are not visible in the caller and are not
1726 /// accessed before returning from the function.
1727 bool eliminateDeadWritesAtEndOfFunction() {
1728 bool MadeChange = false;
1729 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1730 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1731 << "Trying to eliminate MemoryDefs at the end of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
;
1732 for (int I = MemDefs.size() - 1; I >= 0; I--) {
1733 MemoryDef *Def = MemDefs[I];
1734 if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1735 continue;
1736
1737 Instruction *DefI = Def->getMemoryInst();
1738 SmallVector<const Value *, 4> Pointers;
1739 auto DefLoc = getLocForWriteEx(DefI);
1740 if (!DefLoc)
1741 continue;
1742
1743 // NOTE: Currently eliminating writes at the end of a function is limited
1744 // to MemoryDefs with a single underlying object, to save compile-time. In
1745 // practice it appears the case with multiple underlying objects is very
1746 // uncommon. If it turns out to be important, we can use
1747 // getUnderlyingObjects here instead.
1748 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1749 if (!UO || !isInvisibleToCallerAfterRet(UO))
1750 continue;
1751
1752 if (isWriteAtEndOfFunction(Def)) {
1753 // See through pointer-to-pointer bitcasts
1754 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
1755 "of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
;
1756 deleteDeadInstruction(DefI);
1757 ++NumFastStores;
1758 MadeChange = true;
1759 }
1760 }
1761 return MadeChange;
1762 }
1763
1764 /// \returns true if \p Def is a no-op store, either because it
1765 /// directly stores back a loaded value or stores zero to a calloced object.
1766 bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
1767 const Value *DefUO) {
1768 StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1769 MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1770 Constant *StoredConstant = nullptr;
1771 if (Store)
1772 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1773 if (MemSet)
1774 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1775
1776 if (StoredConstant && StoredConstant->isNullValue()) {
1777 auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1778 if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
1779 auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1780 // If UnderlyingDef is the clobbering access of Def, no instructions
1781 // between them can modify the memory location.
1782 auto *ClobberDef =
1783 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1784 return UnderlyingDef == ClobberDef;
1785 }
1786 }
1787
1788 if (!Store)
1789 return false;
1790
1791 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1792 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1793 // Get the defining access for the load.
1794 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1795 // Fast path: the defining accesses are the same.
1796 if (LoadAccess == Def->getDefiningAccess())
1797 return true;
1798
1799 // Look through phi accesses. Recursively scan all phi accesses by
1800 // adding them to a worklist. Bail when we run into a memory def that
1801 // does not match LoadAccess.
1802 SetVector<MemoryAccess *> ToCheck;
1803 MemoryAccess *Current =
1804 MSSA.getWalker()->getClobberingMemoryAccess(Def);
1805 // We don't want to bail when we run into the store memory def. But,
1806 // the phi access may point to it. So, pretend like we've already
1807 // checked it.
1808 ToCheck.insert(Def);
1809 ToCheck.insert(Current);
1810 // Start at current (1) to simulate already having checked Def.
1811 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1812 Current = ToCheck[I];
1813 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1814 // Check all the operands.
1815 for (auto &Use : PhiAccess->incoming_values())
1816 ToCheck.insert(cast<MemoryAccess>(&Use));
1817 continue;
1818 }
1819
1820 // If we found a memory def, bail. This happens when we have an
1821 // unrelated write in between an otherwise noop store.
1822 assert(isa<MemoryDef>(Current) &&(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
1823 "Only MemoryDefs should reach here.")(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
;
1824 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1825 // We are searching for the definition of the store's destination.
1826 // So, if that is the same definition as the load, then this is a
1827 // noop. Otherwise, fail.
1828 if (LoadAccess != Current)
1829 return false;
1830 }
1831 return true;
1832 }
1833 }
1834
1835 return false;
1836 }
1837};
1838
1839bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1840 DominatorTree &DT, PostDominatorTree &PDT,
1841 const TargetLibraryInfo &TLI) {
1842 bool MadeChange = false;
1843
1844 DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
1845 // For each store:
1846 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1847 MemoryDef *KillingDef = State.MemDefs[I];
1848 if (State.SkipStores.count(KillingDef))
1849 continue;
1850 Instruction *SI = KillingDef->getMemoryInst();
1851
1852 Optional<MemoryLocation> MaybeSILoc;
1853 if (State.isMemTerminatorInst(SI))
1854 MaybeSILoc = State.getLocForTerminator(SI).map(
1855 [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1856 else
1857 MaybeSILoc = State.getLocForWriteEx(SI);
1858
1859 if (!MaybeSILoc) {
1860 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
1861 << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
;
1862 continue;
1863 }
1864 MemoryLocation SILoc = *MaybeSILoc;
1865 assert(SILoc.Ptr && "SILoc should not be null")(static_cast <bool> (SILoc.Ptr && "SILoc should not be null"
) ? void (0) : __assert_fail ("SILoc.Ptr && \"SILoc should not be null\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1865, __extension__ __PRETTY_FUNCTION__))
;
1866 const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
1867
1868 MemoryAccess *Current = KillingDef;
1869 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
1870 << *Current << " (" << *SI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
;
1871
1872 unsigned ScanLimit = MemorySSAScanLimit;
1873 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1874 unsigned PartialLimit = MemorySSAPartialStoreLimit;
1875 // Worklist of MemoryAccesses that may be killed by KillingDef.
1876 SetVector<MemoryAccess *> ToCheck;
1877
1878 if (SILocUnd)
1879 ToCheck.insert(KillingDef->getDefiningAccess());
1880
1881 bool Shortend = false;
1882 bool IsMemTerm = State.isMemTerminatorInst(SI);
1883 // Check if MemoryAccesses in the worklist are killed by KillingDef.
1884 for (unsigned I = 0; I < ToCheck.size(); I++) {
1885 Current = ToCheck[I];
1886 if (State.SkipStores.count(Current))
1887 continue;
1888
1889 Optional<MemoryAccess *> Next = State.getDomMemoryDef(
1890 KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
1891 IsMemTerm, PartialLimit);
1892
1893 if (!Next) {
1894 LLVM_DEBUG(dbgs() << " finished walk\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " finished walk\n"; } } while (false
)
;
1895 continue;
1896 }
1897
1898 MemoryAccess *EarlierAccess = *Next;
1899 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking if we can kill " <<
*EarlierAccess; } } while (false)
;
1900 if (isa<MemoryPhi>(EarlierAccess)) {
1901 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding incoming values to worklist\n"
; } } while (false)
;
1902 for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
1903 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1904 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1905 BasicBlock *PhiBlock = EarlierAccess->getBlock();
1906
1907 // We only consider incoming MemoryAccesses that come before the
1908 // MemoryPhi. Otherwise we could discover candidates that do not
1909 // strictly dominate our starting def.
1910 if (State.PostOrderNumbers[IncomingBlock] >
1911 State.PostOrderNumbers[PhiBlock])
1912 ToCheck.insert(IncomingAccess);
1913 }
1914 continue;
1915 }
1916 auto *NextDef = cast<MemoryDef>(EarlierAccess);
1917 Instruction *NI = NextDef->getMemoryInst();
1918 LLVM_DEBUG(dbgs() << " (" << *NI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *NI << ")\n"; }
} while (false)
;
1919 ToCheck.insert(NextDef->getDefiningAccess());
1920 NumGetDomMemoryDefPassed++;
1921
1922 if (!DebugCounter::shouldExecute(MemorySSACounter))
1923 continue;
1924
1925 MemoryLocation NILoc = *State.getLocForWriteEx(NI);
1926
1927 if (IsMemTerm) {
1928 const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
1929 if (SILocUnd != NIUnd)
1930 continue;
1931 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
1932 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
1933 State.deleteDeadInstruction(NI);
1934 ++NumFastStores;
1935 MadeChange = true;
1936 } else {
1937 // Check if NI overwrites SI.
1938 int64_t InstWriteOffset, DepWriteOffset;
1939 OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc,
1940 DepWriteOffset, InstWriteOffset);
1941 if (OR == OW_MaybePartial) {
1942 auto Iter = State.IOLs.insert(
1943 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
1944 NI->getParent(), InstOverlapIntervalsTy()));
1945 auto &IOL = Iter.first->second;
1946 OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
1947 NI, IOL);
1948 }
1949
1950 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
1951 auto *Earlier = dyn_cast<StoreInst>(NI);
1952 auto *Later = dyn_cast<StoreInst>(SI);
1953 // We are re-using tryToMergePartialOverlappingStores, which requires
1954 // Earlier to domiante Later.
1955 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
1956 if (Earlier && Later && DT.dominates(Earlier, Later)) {
1957 if (Constant *Merged = tryToMergePartialOverlappingStores(
1958 Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
1959 State.BatchAA, &DT)) {
1960
1961 // Update stored value of earlier store to merged constant.
1962 Earlier->setOperand(0, Merged);
1963 ++NumModifiedStores;
1964 MadeChange = true;
1965
1966 Shortend = true;
1967 // Remove later store and remove any outstanding overlap intervals
1968 // for the updated store.
1969 State.deleteDeadInstruction(Later);
1970 auto I = State.IOLs.find(Earlier->getParent());
1971 if (I != State.IOLs.end())
1972 I->second.erase(Earlier);
1973 break;
1974 }
1975 }
1976 }
1977
1978 if (OR == OW_Complete) {
1979 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
1980 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
1981 State.deleteDeadInstruction(NI);
1982 ++NumFastStores;
1983 MadeChange = true;
1984 }
1985 }
1986 }
1987
1988 // Check if the store is a no-op.
1989 if (!Shortend && isRemovable(SI) &&
1990 State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
1991 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove No-Op Store:\n DEAD: "
<< *SI << '\n'; } } while (false)
;
1992 State.deleteDeadInstruction(SI);
1993 NumRedundantStores++;
1994 MadeChange = true;
1995 continue;
1996 }
1997 }
1998
1999 if (EnablePartialOverwriteTracking)
2000 for (auto &KV : State.IOLs)
2001 MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2002
2003 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2004 return MadeChange;
2005}
2006} // end anonymous namespace
2007
2008//===----------------------------------------------------------------------===//
2009// DSE Pass
2010//===----------------------------------------------------------------------===//
2011PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2012 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2013 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2014 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2015 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2016 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2017
2018 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2019
2020#ifdef LLVM_ENABLE_STATS1
2021 if (AreStatisticsEnabled())
2022 for (auto &I : instructions(F))
2023 NumRemainingStores += isa<StoreInst>(&I);
2024#endif
2025
2026 if (!Changed)
2027 return PreservedAnalyses::all();
2028
2029 PreservedAnalyses PA;
2030 PA.preserveSet<CFGAnalyses>();
2031 PA.preserve<MemorySSAAnalysis>();
2032 return PA;
2033}
2034
2035namespace {
2036
2037/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2038class DSELegacyPass : public FunctionPass {
2039public:
2040 static char ID; // Pass identification, replacement for typeid
2041
2042 DSELegacyPass() : FunctionPass(ID) {
2043 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2044 }
2045
2046 bool runOnFunction(Function &F) override {
2047 if (skipFunction(F))
2048 return false;
2049
2050 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2051 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2052 const TargetLibraryInfo &TLI =
2053 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2054 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2055 PostDominatorTree &PDT =
2056 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2057
2058 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2059
2060#ifdef LLVM_ENABLE_STATS1
2061 if (AreStatisticsEnabled())
2062 for (auto &I : instructions(F))
2063 NumRemainingStores += isa<StoreInst>(&I);
2064#endif
2065
2066 return Changed;
2067 }
2068
2069 void getAnalysisUsage(AnalysisUsage &AU) const override {
2070 AU.setPreservesCFG();
2071 AU.addRequired<AAResultsWrapperPass>();
2072 AU.addRequired<TargetLibraryInfoWrapperPass>();
2073 AU.addPreserved<GlobalsAAWrapperPass>();
2074 AU.addRequired<DominatorTreeWrapperPass>();
2075 AU.addPreserved<DominatorTreeWrapperPass>();
2076 AU.addRequired<PostDominatorTreeWrapperPass>();
2077 AU.addRequired<MemorySSAWrapperPass>();
2078 AU.addPreserved<PostDominatorTreeWrapperPass>();
2079 AU.addPreserved<MemorySSAWrapperPass>();
2080 }
2081};
2082
2083} // end anonymous namespace
2084
2085char DSELegacyPass::ID = 0;
2086
2087INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2088 false)static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2089INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
2090INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry);
2091INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
2092INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
2093INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
2094INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry);
2095INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
2096INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2097 false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2098
2099FunctionPass *llvm::createDeadStoreEliminationPass() {
2100 return new DSELegacyPass();
2101}

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h

1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file exposes an interface to building/using memory SSA to
11/// walk memory instructions using a use/def graph.
12///
13/// Memory SSA class builds an SSA form that links together memory access
14/// instructions such as loads, stores, atomics, and calls. Additionally, it
15/// does a trivial form of "heap versioning" Every time the memory state changes
16/// in the program, we generate a new heap version. It generates
17/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18///
19/// As a trivial example,
20/// define i32 @main() #0 {
21/// entry:
22/// %call = call noalias i8* @_Znwm(i64 4) #2
23/// %0 = bitcast i8* %call to i32*
24/// %call1 = call noalias i8* @_Znwm(i64 4) #2
25/// %1 = bitcast i8* %call1 to i32*
26/// store i32 5, i32* %0, align 4
27/// store i32 7, i32* %1, align 4
28/// %2 = load i32* %0, align 4
29/// %3 = load i32* %1, align 4
30/// %add = add nsw i32 %2, %3
31/// ret i32 %add
32/// }
33///
34/// Will become
35/// define i32 @main() #0 {
36/// entry:
37/// ; 1 = MemoryDef(0)
38/// %call = call noalias i8* @_Znwm(i64 4) #3
39/// %2 = bitcast i8* %call to i32*
40/// ; 2 = MemoryDef(1)
41/// %call1 = call noalias i8* @_Znwm(i64 4) #3
42/// %4 = bitcast i8* %call1 to i32*
43/// ; 3 = MemoryDef(2)
44/// store i32 5, i32* %2, align 4
45/// ; 4 = MemoryDef(3)
46/// store i32 7, i32* %4, align 4
47/// ; MemoryUse(3)
48/// %7 = load i32* %2, align 4
49/// ; MemoryUse(4)
50/// %8 = load i32* %4, align 4
51/// %add = add nsw i32 %7, %8
52/// ret i32 %add
53/// }
54///
55/// Given this form, all the stores that could ever effect the load at %8 can be
56/// gotten by using the MemoryUse associated with it, and walking from use to
57/// def until you hit the top of the function.
58///
59/// Each def also has a list of users associated with it, so you can walk from
60/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61/// but not the RHS of MemoryDefs. You can see this above at %7, which would
62/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63/// store, all the MemoryUses on its use lists are may-aliases of that store
64/// (but the MemoryDefs on its use list may not be).
65///
66/// MemoryDefs are not disambiguated because it would require multiple reaching
67/// definitions, which would require multiple phis, and multiple memoryaccesses
68/// per instruction.
69//
70//===----------------------------------------------------------------------===//
71
72#ifndef LLVM_ANALYSIS_MEMORYSSA_H
73#define LLVM_ANALYSIS_MEMORYSSA_H
74
75#include "llvm/ADT/DenseMap.h"
76#include "llvm/ADT/GraphTraits.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/ilist.h"
80#include "llvm/ADT/ilist_node.h"
81#include "llvm/ADT/iterator.h"
82#include "llvm/ADT/iterator_range.h"
83#include "llvm/ADT/simple_ilist.h"
84#include "llvm/Analysis/AliasAnalysis.h"
85#include "llvm/Analysis/MemoryLocation.h"
86#include "llvm/Analysis/PHITransAddr.h"
87#include "llvm/IR/BasicBlock.h"
88#include "llvm/IR/DerivedUser.h"
89#include "llvm/IR/Dominators.h"
90#include "llvm/IR/Module.h"
91#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/IR/ValueHandle.h"
97#include "llvm/Pass.h"
98#include "llvm/Support/Casting.h"
99#include "llvm/Support/CommandLine.h"
100#include <algorithm>
101#include <cassert>
102#include <cstddef>
103#include <iterator>
104#include <memory>
105#include <utility>
106
107namespace llvm {
108
109/// Enables memory ssa as a dependency for loop passes.
110extern cl::opt<bool> EnableMSSALoopDependency;
111
112class AllocaInst;
113class Function;
114class Instruction;
115class MemoryAccess;
116class MemorySSAWalker;
117class LLVMContext;
118class raw_ostream;
119
120namespace MSSAHelpers {
121
122struct AllAccessTag {};
123struct DefsOnlyTag {};
124
125} // end namespace MSSAHelpers
126
127enum : unsigned {
128 // Used to signify what the default invalid ID is for MemoryAccess's
129 // getID()
130 INVALID_MEMORYACCESS_ID = -1U
131};
132
133template <class T> class memoryaccess_def_iterator_base;
134using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
135using const_memoryaccess_def_iterator =
136 memoryaccess_def_iterator_base<const MemoryAccess>;
137
138// The base for all memory accesses. All memory accesses in a block are
139// linked together using an intrusive list.
140class MemoryAccess
141 : public DerivedUser,
142 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
143 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
144public:
145 using AllAccessType =
146 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
147 using DefsOnlyType =
148 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
149
150 MemoryAccess(const MemoryAccess &) = delete;
151 MemoryAccess &operator=(const MemoryAccess &) = delete;
152
153 void *operator new(size_t) = delete;
154
155 // Methods for support type inquiry through isa, cast, and
156 // dyn_cast
157 static bool classof(const Value *V) {
158 unsigned ID = V->getValueID();
159 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
160 }
161
162 BasicBlock *getBlock() const { return Block; }
163
164 void print(raw_ostream &OS) const;
165 void dump() const;
166
167 /// The user iterators for a memory access
168 using iterator = user_iterator;
169 using const_iterator = const_user_iterator;
170
171 /// This iterator walks over all of the defs in a given
172 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
173 /// MemoryUse/MemoryDef, this walks the defining access.
174 memoryaccess_def_iterator defs_begin();
175 const_memoryaccess_def_iterator defs_begin() const;
176 memoryaccess_def_iterator defs_end();
177 const_memoryaccess_def_iterator defs_end() const;
178
179 /// Get the iterators for the all access list and the defs only list
180 /// We default to the all access list.
181 AllAccessType::self_iterator getIterator() {
182 return this->AllAccessType::getIterator();
183 }
184 AllAccessType::const_self_iterator getIterator() const {
185 return this->AllAccessType::getIterator();
186 }
187 AllAccessType::reverse_self_iterator getReverseIterator() {
188 return this->AllAccessType::getReverseIterator();
189 }
190 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
191 return this->AllAccessType::getReverseIterator();
192 }
193 DefsOnlyType::self_iterator getDefsIterator() {
194 return this->DefsOnlyType::getIterator();
195 }
196 DefsOnlyType::const_self_iterator getDefsIterator() const {
197 return this->DefsOnlyType::getIterator();
198 }
199 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
200 return this->DefsOnlyType::getReverseIterator();
201 }
202 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
203 return this->DefsOnlyType::getReverseIterator();
204 }
205
206protected:
207 friend class MemoryDef;
208 friend class MemoryPhi;
209 friend class MemorySSA;
210 friend class MemoryUse;
211 friend class MemoryUseOrDef;
212
213 /// Used by MemorySSA to change the block of a MemoryAccess when it is
214 /// moved.
215 void setBlock(BasicBlock *BB) { Block = BB; }
216
217 /// Used for debugging and tracking things about MemoryAccesses.
218 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
219 inline unsigned getID() const;
220
221 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
222 BasicBlock *BB, unsigned NumOperands)
223 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
224 Block(BB) {}
225
226 // Use deleteValue() to delete a generic MemoryAccess.
227 ~MemoryAccess() = default;
228
229private:
230 BasicBlock *Block;
231};
232
233template <>
234struct ilist_alloc_traits<MemoryAccess> {
235 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
236};
237
238inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
239 MA.print(OS);
240 return OS;
241}
242
243/// Class that has the common methods + fields of memory uses/defs. It's
244/// a little awkward to have, but there are many cases where we want either a
245/// use or def, and there are many cases where uses are needed (defs aren't
246/// acceptable), and vice-versa.
247///
248/// This class should never be instantiated directly; make a MemoryUse or
249/// MemoryDef instead.
250class MemoryUseOrDef : public MemoryAccess {
251public:
252 void *operator new(size_t) = delete;
253
254 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
255
256 /// Get the instruction that this MemoryUse represents.
257 Instruction *getMemoryInst() const { return MemoryInstruction; }
258
259 /// Get the access that produces the memory state used by this Use.
260 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
261
262 static bool classof(const Value *MA) {
263 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
264 }
265
266 // Sadly, these have to be public because they are needed in some of the
267 // iterators.
268 inline bool isOptimized() const;
269 inline MemoryAccess *getOptimized() const;
270 inline void setOptimized(MemoryAccess *);
271
272 // Retrieve AliasResult type of the optimized access. Ideally this would be
273 // returned by the caching walker and may go away in the future.
274 Optional<AliasResult> getOptimizedAccessType() const {
275 return isOptimized() ? OptimizedAccessAlias : None;
276 }
277
278 /// Reset the ID of what this MemoryUse was optimized to, causing it to
279 /// be rewalked by the walker if necessary.
280 /// This really should only be called by tests.
281 inline void resetOptimized();
282
283protected:
284 friend class MemorySSA;
285 friend class MemorySSAUpdater;
286
287 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
288 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
289 unsigned NumOperands)
290 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
291 MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) {
292 setDefiningAccess(DMA);
293 }
294
295 // Use deleteValue() to delete a generic MemoryUseOrDef.
296 ~MemoryUseOrDef() = default;
297
298 void setOptimizedAccessType(Optional<AliasResult> AR) {
299 OptimizedAccessAlias = AR;
300 }
301
302 void setDefiningAccess(
303 MemoryAccess *DMA, bool Optimized = false,
304 Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) {
305 if (!Optimized) {
306 setOperand(0, DMA);
307 return;
308 }
309 setOptimized(DMA);
310 setOptimizedAccessType(AR);
311 }
312
313private:
314 Instruction *MemoryInstruction;
315 Optional<AliasResult> OptimizedAccessAlias;
316};
317
318/// Represents read-only accesses to memory
319///
320/// In particular, the set of Instructions that will be represented by
321/// MemoryUse's is exactly the set of Instructions for which
322/// AliasAnalysis::getModRefInfo returns "Ref".
323class MemoryUse final : public MemoryUseOrDef {
324public:
325 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
326
327 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
328 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
329 /*NumOperands=*/1) {}
330
331 // allocate space for exactly one operand
332 void *operator new(size_t s) { return User::operator new(s, 1); }
333
334 static bool classof(const Value *MA) {
335 return MA->getValueID() == MemoryUseVal;
336 }
337
338 void print(raw_ostream &OS) const;
339
340 void setOptimized(MemoryAccess *DMA) {
341 OptimizedID = DMA->getID();
342 setOperand(0, DMA);
343 }
344
345 bool isOptimized() const {
346 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
347 }
348
349 MemoryAccess *getOptimized() const {
350 return getDefiningAccess();
351 }
352
353 void resetOptimized() {
354 OptimizedID = INVALID_MEMORYACCESS_ID;
355 }
356
357protected:
358 friend class MemorySSA;
359
360private:
361 static void deleteMe(DerivedUser *Self);
362
363 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
364};
365
366template <>
367struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
368DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits
<MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator
MemoryUse::op_begin() const { return OperandTraits<MemoryUse
>::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse
::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse
>::op_end(this); } MemoryUse::const_op_iterator MemoryUse::
op_end() const { return OperandTraits<MemoryUse>::op_end
(const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUse>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 368, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUse>::op_begin
(const_cast<MemoryUse*>(this))[i_nocapture].get()); } void
MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryUse>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 368, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryUse::getNumOperands() const { return OperandTraits
<MemoryUse>::operands(this); } template <int Idx_nocapture
> Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryUse::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
369
370/// Represents a read-write access to memory, whether it is a must-alias,
371/// or a may-alias.
372///
373/// In particular, the set of Instructions that will be represented by
374/// MemoryDef's is exactly the set of Instructions for which
375/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
376/// Note that, in order to provide def-def chains, all defs also have a use
377/// associated with them. This use points to the nearest reaching
378/// MemoryDef/MemoryPhi.
379class MemoryDef final : public MemoryUseOrDef {
380public:
381 friend class MemorySSA;
382
383 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
384
385 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
386 unsigned Ver)
387 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
388 /*NumOperands=*/2),
389 ID(Ver) {}
390
391 // allocate space for exactly two operands
392 void *operator new(size_t s) { return User::operator new(s, 2); }
393
394 static bool classof(const Value *MA) {
395 return MA->getValueID() == MemoryDefVal;
396 }
397
398 void setOptimized(MemoryAccess *MA) {
399 setOperand(1, MA);
400 OptimizedID = MA->getID();
401 }
402
403 MemoryAccess *getOptimized() const {
404 return cast_or_null<MemoryAccess>(getOperand(1));
405 }
406
407 bool isOptimized() const {
408 return getOptimized() && OptimizedID == getOptimized()->getID();
409 }
410
411 void resetOptimized() {
412 OptimizedID = INVALID_MEMORYACCESS_ID;
413 setOperand(1, nullptr);
414 }
415
416 void print(raw_ostream &OS) const;
417
418 unsigned getID() const { return ID; }
419
420private:
421 static void deleteMe(DerivedUser *Self);
422
423 const unsigned ID;
424 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
425};
426
427template <>
428struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
429DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits
<MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator
MemoryDef::op_begin() const { return OperandTraits<MemoryDef
>::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef
::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef
>::op_end(this); } MemoryDef::const_op_iterator MemoryDef::
op_end() const { return OperandTraits<MemoryDef>::op_end
(const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 429, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryDef>::op_begin
(const_cast<MemoryDef*>(this))[i_nocapture].get()); } void
MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryDef>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 429, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryDef::getNumOperands() const { return OperandTraits
<MemoryDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryDef::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
430
431template <>
432struct OperandTraits<MemoryUseOrDef> {
433 static Use *op_begin(MemoryUseOrDef *MUD) {
434 if (auto *MU = dyn_cast<MemoryUse>(MUD))
435 return OperandTraits<MemoryUse>::op_begin(MU);
436 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
437 }
438
439 static Use *op_end(MemoryUseOrDef *MUD) {
440 if (auto *MU = dyn_cast<MemoryUse>(MUD))
441 return OperandTraits<MemoryUse>::op_end(MU);
442 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
443 }
444
445 static unsigned operands(const MemoryUseOrDef *MUD) {
446 if (const auto *MU = dyn_cast<MemoryUse>(MUD))
447 return OperandTraits<MemoryUse>::operands(MU);
448 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
449 }
450};
451DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return
OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef
::const_op_iterator MemoryUseOrDef::op_begin() const { return
OperandTraits<MemoryUseOrDef>::op_begin(const_cast<
MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef
::op_end() { return OperandTraits<MemoryUseOrDef>::op_end
(this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end
() const { return OperandTraits<MemoryUseOrDef>::op_end
(const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 451, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin
(const_cast<MemoryUseOrDef*>(this))[i_nocapture].get())
; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<MemoryUseOrDef>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 451, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits
<MemoryUseOrDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryUseOrDef::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &MemoryUseOrDef::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
452
453/// Represents phi nodes for memory accesses.
454///
455/// These have the same semantic as regular phi nodes, with the exception that
456/// only one phi will ever exist in a given basic block.
457/// Guaranteeing one phi per block means guaranteeing there is only ever one
458/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
459/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
460/// a MemoryPhi's operands.
461/// That is, given
462/// if (a) {
463/// store %a
464/// store %b
465/// }
466/// it *must* be transformed into
467/// if (a) {
468/// 1 = MemoryDef(liveOnEntry)
469/// store %a
470/// 2 = MemoryDef(1)
471/// store %b
472/// }
473/// and *not*
474/// if (a) {
475/// 1 = MemoryDef(liveOnEntry)
476/// store %a
477/// 2 = MemoryDef(liveOnEntry)
478/// store %b
479/// }
480/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
481/// end of the branch, and if there are not two phi nodes, one will be
482/// disconnected completely from the SSA graph below that point.
483/// Because MemoryUse's do not generate new definitions, they do not have this
484/// issue.
485class MemoryPhi final : public MemoryAccess {
486 // allocate space for exactly zero operands
487 void *operator new(size_t s) { return User::operator new(s); }
488
489public:
490 /// Provide fast operand accessors
491 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
492
493 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
494 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
495 ReservedSpace(NumPreds) {
496 allocHungoffUses(ReservedSpace);
497 }
498
499 // Block iterator interface. This provides access to the list of incoming
500 // basic blocks, which parallels the list of incoming values.
501 using block_iterator = BasicBlock **;
502 using const_block_iterator = BasicBlock *const *;
503
504 block_iterator block_begin() {
505 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
506 }
507
508 const_block_iterator block_begin() const {
509 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
510 }
511
512 block_iterator block_end() { return block_begin() + getNumOperands(); }
513
514 const_block_iterator block_end() const {
515 return block_begin() + getNumOperands();
516 }
517
518 iterator_range<block_iterator> blocks() {
519 return make_range(block_begin(), block_end());
520 }
521
522 iterator_range<const_block_iterator> blocks() const {
523 return make_range(block_begin(), block_end());
524 }
525
526 op_range incoming_values() { return operands(); }
527
528 const_op_range incoming_values() const { return operands(); }
529
530 /// Return the number of incoming edges
531 unsigned getNumIncomingValues() const { return getNumOperands(); }
532
533 /// Return incoming value number x
534 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
535 void setIncomingValue(unsigned I, MemoryAccess *V) {
536 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 536, __extension__ __PRETTY_FUNCTION__))
;
537 setOperand(I, V);
538 }
539
540 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
541 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
542
543 /// Return incoming basic block number @p i.
544 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
545
546 /// Return incoming basic block corresponding
547 /// to an operand of the PHI.
548 BasicBlock *getIncomingBlock(const Use &U) const {
549 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 549, __extension__ __PRETTY_FUNCTION__))
;
550 return getIncomingBlock(unsigned(&U - op_begin()));
551 }
552
553 /// Return incoming basic block corresponding
554 /// to value use iterator.
555 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
556 return getIncomingBlock(I.getUse());
557 }
558
559 void setIncomingBlock(unsigned I, BasicBlock *BB) {
560 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 560, __extension__ __PRETTY_FUNCTION__))
;
561 block_begin()[I] = BB;
562 }
563
564 /// Add an incoming value to the end of the PHI list
565 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
566 if (getNumOperands() == ReservedSpace)
567 growOperands(); // Get more space!
568 // Initialize some new operands.
569 setNumHungOffUseOperands(getNumOperands() + 1);
570 setIncomingValue(getNumOperands() - 1, V);
571 setIncomingBlock(getNumOperands() - 1, BB);
572 }
573
574 /// Return the first index of the specified basic
575 /// block in the value list for this PHI. Returns -1 if no instance.
576 int getBasicBlockIndex(const BasicBlock *BB) const {
577 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
578 if (block_begin()[I] == BB)
579 return I;
580 return -1;
581 }
582
583 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
584 int Idx = getBasicBlockIndex(BB);
585 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 585, __extension__ __PRETTY_FUNCTION__))
;
586 return getIncomingValue(Idx);
587 }
588
589 // After deleting incoming position I, the order of incoming may be changed.
590 void unorderedDeleteIncoming(unsigned I) {
591 unsigned E = getNumOperands();
592 assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry."
) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
594 // itself should be deleted.
595 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
"at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 596, __extension__ __PRETTY_FUNCTION__))
596 "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
"at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 setIncomingValue(I, getIncomingValue(E - 1));
598 setIncomingBlock(I, block_begin()[E - 1]);
599 setOperand(E - 1, nullptr);
600 block_begin()[E - 1] = nullptr;
601 setNumHungOffUseOperands(getNumOperands() - 1);
602 }
603
604 // After deleting entries that satisfy Pred, remaining entries may have
605 // changed order.
606 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
607 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
608 if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
609 unorderedDeleteIncoming(I);
610 E = getNumOperands();
611 --I;
612 }
613 assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 &&
"Cannot remove all incoming blocks in a MemoryPhi.") ? void (
0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 614, __extension__ __PRETTY_FUNCTION__))
614 "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 &&
"Cannot remove all incoming blocks in a MemoryPhi.") ? void (
0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 614, __extension__ __PRETTY_FUNCTION__))
;
615 }
616
617 // After deleting incoming block BB, the incoming blocks order may be changed.
618 void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
619 unorderedDeleteIncomingIf(
620 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
621 }
622
623 // After deleting incoming memory access MA, the incoming accesses order may
624 // be changed.
625 void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
626 unorderedDeleteIncomingIf(
627 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
628 }
629
630 static bool classof(const Value *V) {
631 return V->getValueID() == MemoryPhiVal;
632 }
633
634 void print(raw_ostream &OS) const;
635
636 unsigned getID() const { return ID; }
637
638protected:
639 friend class MemorySSA;
640
641 /// this is more complicated than the generic
642 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
643 /// values and pointers to the incoming blocks, all in one allocation.
644 void allocHungoffUses(unsigned N) {
645 User::allocHungoffUses(N, /* IsPhi */ true);
646 }
647
648private:
649 // For debugging only
650 const unsigned ID;
651 unsigned ReservedSpace;
652
653 /// This grows the operand list in response to a push_back style of
654 /// operation. This grows the number of ops by 1.5 times.
655 void growOperands() {
656 unsigned E = getNumOperands();
657 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
658 ReservedSpace = std::max(E + E / 2, 2u);
659 growHungoffUses(ReservedSpace, /* IsPhi */ true);
660 }
661
662 static void deleteMe(DerivedUser *Self);
663};
664
665inline unsigned MemoryAccess::getID() const {
666 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 667, __extension__ __PRETTY_FUNCTION__))
667 "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 if (const auto *MD = dyn_cast<MemoryDef>(this))
669 return MD->getID();
670 return cast<MemoryPhi>(this)->getID();
671}
672
673inline bool MemoryUseOrDef::isOptimized() const {
674 if (const auto *MD = dyn_cast<MemoryDef>(this))
675 return MD->isOptimized();
676 return cast<MemoryUse>(this)->isOptimized();
677}
678
679inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
680 if (const auto *MD = dyn_cast<MemoryDef>(this))
681 return MD->getOptimized();
682 return cast<MemoryUse>(this)->getOptimized();
683}
684
685inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
686 if (auto *MD = dyn_cast<MemoryDef>(this))
687 MD->setOptimized(MA);
688 else
689 cast<MemoryUse>(this)->setOptimized(MA);
690}
691
692inline void MemoryUseOrDef::resetOptimized() {
693 if (auto *MD = dyn_cast<MemoryDef>(this))
694 MD->resetOptimized();
695 else
696 cast<MemoryUse>(this)->resetOptimized();
697}
698
699template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
700DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits
<MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator
MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi
>::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi
::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi
>::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi::
op_end() const { return OperandTraits<MemoryPhi>::op_end
(const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryPhi>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 700, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryPhi>::op_begin
(const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void
MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryPhi>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 700, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryPhi::getNumOperands() const { return OperandTraits
<MemoryPhi>::operands(this); } template <int Idx_nocapture
> Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
701
702/// Encapsulates MemorySSA, including all data associated with memory
703/// accesses.
704class MemorySSA {
705public:
706 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
707
708 // MemorySSA must remain where it's constructed; Walkers it creates store
709 // pointers to it.
710 MemorySSA(MemorySSA &&) = delete;
711
712 ~MemorySSA();
713
714 MemorySSAWalker *getWalker();
715 MemorySSAWalker *getSkipSelfWalker();
716
717 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
718 /// access associated with it. If passed a basic block gets the memory phi
719 /// node that exists for that block, if there is one. Otherwise, this will get
720 /// a MemoryUseOrDef.
721 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
722 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
723 }
724
725 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
726 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
727 }
728
729 DominatorTree &getDomTree() const { return *DT; }
730
731 void dump() const;
732 void print(raw_ostream &) const;
733
734 /// Return true if \p MA represents the live on entry value
735 ///
736 /// Loads and stores from pointer arguments and other global values may be
737 /// defined by memory operations that do not occur in the current function, so
738 /// they may be live on entry to the function. MemorySSA represents such
739 /// memory state by the live on entry definition, which is guaranteed to occur
740 /// before any other memory access in the function.
741 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
742 return MA == LiveOnEntryDef.get();
8
Assuming the condition is false
9
Returning zero, which participates in a condition later
743 }
744
745 inline MemoryAccess *getLiveOnEntryDef() const {
746 return LiveOnEntryDef.get();
747 }
748
749 // Sadly, iplists, by default, owns and deletes pointers added to the
750 // list. It's not currently possible to have two iplists for the same type,
751 // where one owns the pointers, and one does not. This is because the traits
752 // are per-type, not per-tag. If this ever changes, we should make the
753 // DefList an iplist.
754 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
755 using DefsList =
756 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
757
758 /// Return the list of MemoryAccess's for a given basic block.
759 ///
760 /// This list is not modifiable by the user.
761 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
762 return getWritableBlockAccesses(BB);
763 }
764
765 /// Return the list of MemoryDef's and MemoryPhi's for a given basic
766 /// block.
767 ///
768 /// This list is not modifiable by the user.
769 const DefsList *getBlockDefs(const BasicBlock *BB) const {
770 return getWritableBlockDefs(BB);
771 }
772
773 /// Given two memory accesses in the same basic block, determine
774 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
775 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
776
777 /// Given two memory accesses in potentially different blocks,
778 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
779 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
780
781 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
782 /// dominates Use \p B.
783 bool dominates(const MemoryAccess *A, const Use &B) const;
784
785 /// Verify that MemorySSA is self consistent (IE definitions dominate
786 /// all uses, uses appear in the right places). This is used by unit tests.
787 void verifyMemorySSA() const;
788
789 /// Used in various insertion functions to specify whether we are talking
790 /// about the beginning or end of a block.
791 enum InsertionPlace { Beginning, End, BeforeTerminator };
792
793protected:
794 // Used by Memory SSA annotater, dumpers, and wrapper pass
795 friend class MemorySSAAnnotatedWriter;
796 friend class MemorySSAPrinterLegacyPass;
797 friend class MemorySSAUpdater;
798
799 void verifyOrderingDominationAndDefUses(Function &F) const;
800 void verifyDominationNumbers(const Function &F) const;
801 void verifyPrevDefInPhis(Function &F) const;
802
803 // This is used by the use optimizer and updater.
804 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
805 auto It = PerBlockAccesses.find(BB);
806 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
807 }
808
809 // This is used by the use optimizer and updater.
810 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
811 auto It = PerBlockDefs.find(BB);
812 return It == PerBlockDefs.end() ? nullptr : It->second.get();
813 }
814
815 // These is used by the updater to perform various internal MemorySSA
816 // machinsations. They do not always leave the IR in a correct state, and
817 // relies on the updater to fixup what it breaks, so it is not public.
818
819 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
820 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
821
822 // Rename the dominator tree branch rooted at BB.
823 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
824 SmallPtrSetImpl<BasicBlock *> &Visited) {
825 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
826 }
827
828 void removeFromLookups(MemoryAccess *);
829 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
830 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
831 InsertionPlace);
832 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
833 AccessList::iterator);
834 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
835 const MemoryUseOrDef *Template = nullptr,
836 bool CreationMustSucceed = true);
837
838private:
839 template <class AliasAnalysisType> class ClobberWalkerBase;
840 template <class AliasAnalysisType> class CachingWalker;
841 template <class AliasAnalysisType> class SkipSelfWalker;
842 class OptimizeUses;
843
844 CachingWalker<AliasAnalysis> *getWalkerImpl();
845 void buildMemorySSA(BatchAAResults &BAA);
846
847 void prepareForMoveTo(MemoryAccess *, BasicBlock *);
848 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
849
850 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
851 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
852
853 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
854 MemoryPhi *createMemoryPhi(BasicBlock *BB);
855 template <typename AliasAnalysisType>
856 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
857 const MemoryUseOrDef *Template = nullptr);
858 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
859 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
860 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
861 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
862 SmallPtrSetImpl<BasicBlock *> &Visited,
863 bool SkipVisited = false, bool RenameAllUses = false);
864 AccessList *getOrCreateAccessList(const BasicBlock *);
865 DefsList *getOrCreateDefsList(const BasicBlock *);
866 void renumberBlock(const BasicBlock *) const;
867 AliasAnalysis *AA;
868 DominatorTree *DT;
869 Function &F;
870
871 // Memory SSA mappings
872 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
873
874 // These two mappings contain the main block to access/def mappings for
875 // MemorySSA. The list contained in PerBlockAccesses really owns all the
876 // MemoryAccesses.
877 // Both maps maintain the invariant that if a block is found in them, the
878 // corresponding list is not empty, and if a block is not found in them, the
879 // corresponding list is empty.
880 AccessMap PerBlockAccesses;
881 DefsMap PerBlockDefs;
882 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
883
884 // Domination mappings
885 // Note that the numbering is local to a block, even though the map is
886 // global.
887 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
888 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
889
890 // Memory SSA building info
891 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
892 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
893 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
894 unsigned NextID;
895};
896
897// Internal MemorySSA utils, for use by MemorySSA classes and walkers
898class MemorySSAUtil {
899protected:
900 friend class GVNHoist;
901 friend class MemorySSAWalker;
902
903 // This function should not be used by new passes.
904 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
905 AliasAnalysis &AA);
906};
907
908// This pass does eager building and then printing of MemorySSA. It is used by
909// the tests to be able to build, dump, and verify Memory SSA.
910class MemorySSAPrinterLegacyPass : public FunctionPass {
911public:
912 MemorySSAPrinterLegacyPass();
913
914 bool runOnFunction(Function &) override;
915 void getAnalysisUsage(AnalysisUsage &AU) const override;
916
917 static char ID;
918};
919
920/// An analysis that produces \c MemorySSA for a function.
921///
922class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
923 friend AnalysisInfoMixin<MemorySSAAnalysis>;
924
925 static AnalysisKey Key;
926
927public:
928 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
929 // pointers after construction. Use a wrapper class instead of plain
930 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
931 struct Result {
932 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
933
934 MemorySSA &getMSSA() { return *MSSA.get(); }
935
936 std::unique_ptr<MemorySSA> MSSA;
937
938 bool invalidate(Function &F, const PreservedAnalyses &PA,
939 FunctionAnalysisManager::Invalidator &Inv);
940 };
941
942 Result run(Function &F, FunctionAnalysisManager &AM);
943};
944
945/// Printer pass for \c MemorySSA.
946class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
947 raw_ostream &OS;
948
949public:
950 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
951
952 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
953};
954
955/// Verifier pass for \c MemorySSA.
956struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
957 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
958};
959
960/// Legacy analysis pass which computes \c MemorySSA.
961class MemorySSAWrapperPass : public FunctionPass {
962public:
963 MemorySSAWrapperPass();
964
965 static char ID;
966
967 bool runOnFunction(Function &) override;
968 void releaseMemory() override;
969 MemorySSA &getMSSA() { return *MSSA; }
970 const MemorySSA &getMSSA() const { return *MSSA; }
971
972 void getAnalysisUsage(AnalysisUsage &AU) const override;
973
974 void verifyAnalysis() const override;
975 void print(raw_ostream &OS, const Module *M = nullptr) const override;
976
977private:
978 std::unique_ptr<MemorySSA> MSSA;
979};
980
981/// This is the generic walker interface for walkers of MemorySSA.
982/// Walkers are used to be able to further disambiguate the def-use chains
983/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
984/// you.
985/// In particular, while the def-use chains provide basic information, and are
986/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
987/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
988/// information. In particular, they may want to use SCEV info to further
989/// disambiguate memory accesses, or they may want the nearest dominating
990/// may-aliasing MemoryDef for a call or a store. This API enables a
991/// standardized interface to getting and using that info.
992class MemorySSAWalker {
993public:
994 MemorySSAWalker(MemorySSA *);
995 virtual ~MemorySSAWalker() = default;
996
997 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
998
999 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1000 /// will give you the nearest dominating MemoryAccess that Mod's the location
1001 /// the instruction accesses (by skipping any def which AA can prove does not
1002 /// alias the location(s) accessed by the instruction given).
1003 ///
1004 /// Note that this will return a single access, and it must dominate the
1005 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1006 /// this will return the MemoryPhi, not the operand. This means that
1007 /// given:
1008 /// if (a) {
1009 /// 1 = MemoryDef(liveOnEntry)
1010 /// store %a
1011 /// } else {
1012 /// 2 = MemoryDef(liveOnEntry)
1013 /// store %b
1014 /// }
1015 /// 3 = MemoryPhi(2, 1)
1016 /// MemoryUse(3)
1017 /// load %a
1018 ///
1019 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1020 /// in the if (a) branch.
1021 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
1022 MemoryAccess *MA = MSSA->getMemoryAccess(I);
1023 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?"
) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1023, __extension__ __PRETTY_FUNCTION__))
;
1024 return getClobberingMemoryAccess(MA);
1025 }
1026
1027 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1028 /// but takes a MemoryAccess instead of an Instruction.
1029 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
1030
1031 /// Given a potentially clobbering memory access and a new location,
1032 /// calling this will give you the nearest dominating clobbering MemoryAccess
1033 /// (by skipping non-aliasing def links).
1034 ///
1035 /// This version of the function is mainly used to disambiguate phi translated
1036 /// pointers, where the value of a pointer may have changed from the initial
1037 /// memory access. Note that this expects to be handed either a MemoryUse,
1038 /// or an already potentially clobbering access. Unlike the above API, if
1039 /// given a MemoryDef that clobbers the pointer as the starting access, it
1040 /// will return that MemoryDef, whereas the above would return the clobber
1041 /// starting from the use side of the memory def.
1042 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1043 const MemoryLocation &) = 0;
1044
1045 /// Given a memory access, invalidate anything this walker knows about
1046 /// that access.
1047 /// This API is used by walkers that store information to perform basic cache
1048 /// invalidation. This will be called by MemorySSA at appropriate times for
1049 /// the walker it uses or returns.
1050 virtual void invalidateInfo(MemoryAccess *) {}
1051
1052protected:
1053 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1054 // constructor.
1055 MemorySSA *MSSA;
1056};
1057
1058/// A MemorySSAWalker that does no alias queries, or anything else. It
1059/// simply returns the links as they were constructed by the builder.
1060class DoNothingMemorySSAWalker final : public MemorySSAWalker {
1061public:
1062 // Keep the overrides below from hiding the Instruction overload of
1063 // getClobberingMemoryAccess.
1064 using MemorySSAWalker::getClobberingMemoryAccess;
1065
1066 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1067 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1068 const MemoryLocation &) override;
1069};
1070
1071using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1072using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1073
1074/// Iterator base class used to implement const and non-const iterators
1075/// over the defining accesses of a MemoryAccess.
1076template <class T>
1077class memoryaccess_def_iterator_base
1078 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1079 std::forward_iterator_tag, T, ptrdiff_t, T *,
1080 T *> {
1081 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1082
1083public:
1084 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1085 memoryaccess_def_iterator_base() = default;
1086
1087 bool operator==(const memoryaccess_def_iterator_base &Other) const {
1088 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1089 }
1090
1091 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1092 // block from the operand in constant time (In a PHINode, the uselist has
1093 // both, so it's just subtraction). We provide it as part of the
1094 // iterator to avoid callers having to linear walk to get the block.
1095 // If the operation becomes constant time on MemoryPHI's, this bit of
1096 // abstraction breaking should be removed.
1097 BasicBlock *getPhiArgBlock() const {
1098 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1099 assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI"
) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1099, __extension__ __PRETTY_FUNCTION__))
;
1100 return MP->getIncomingBlock(ArgNo);
1101 }
1102
1103 typename std::iterator_traits<BaseT>::pointer operator*() const {
1104 assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1104, __extension__ __PRETTY_FUNCTION__))
;
1105 // Go to the first argument for phis, and the defining access for everything
1106 // else.
1107 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1108 return MP->getIncomingValue(ArgNo);
1109 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1110 }
1111
1112 using BaseT::operator++;
1113 memoryaccess_def_iterator_base &operator++() {
1114 assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator"
) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1114, __extension__ __PRETTY_FUNCTION__))
;
1115 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1116 if (++ArgNo >= MP->getNumIncomingValues()) {
1117 ArgNo = 0;
1118 Access = nullptr;
1119 }
1120 } else {
1121 Access = nullptr;
1122 }
1123 return *this;
1124 }
1125
1126private:
1127 T *Access = nullptr;
1128 unsigned ArgNo = 0;
1129};
1130
1131inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1132 return memoryaccess_def_iterator(this);
1133}
1134
1135inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1136 return const_memoryaccess_def_iterator(this);
1137}
1138
1139inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1140 return memoryaccess_def_iterator();
1141}
1142
1143inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1144 return const_memoryaccess_def_iterator();
1145}
1146
1147/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1148/// and uses in the inverse case.
1149template <> struct GraphTraits<MemoryAccess *> {
1150 using NodeRef = MemoryAccess *;
1151 using ChildIteratorType = memoryaccess_def_iterator;
1152
1153 static NodeRef getEntryNode(NodeRef N) { return N; }
1154 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1155 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1156};
1157
1158template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1159 using NodeRef = MemoryAccess *;
1160 using ChildIteratorType = MemoryAccess::iterator;
1161
1162 static NodeRef getEntryNode(NodeRef N) { return N; }
1163 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1164 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1165};
1166
1167/// Provide an iterator that walks defs, giving both the memory access,
1168/// and the current pointer location, updating the pointer location as it
1169/// changes due to phi node translation.
1170///
1171/// This iterator, while somewhat specialized, is what most clients actually
1172/// want when walking upwards through MemorySSA def chains. It takes a pair of
1173/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1174/// memory location through phi nodes for the user.
1175class upward_defs_iterator
1176 : public iterator_facade_base<upward_defs_iterator,
1177 std::forward_iterator_tag,
1178 const MemoryAccessPair> {
1179 using BaseT = upward_defs_iterator::iterator_facade_base;
1180
1181public:
1182 upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT,
1183 bool *PerformedPhiTranslation = nullptr)
1184 : DefIterator(Info.first), Location(Info.second),
1185 OriginalAccess(Info.first), DT(DT),
1186 PerformedPhiTranslation(PerformedPhiTranslation) {
1187 CurrentPair.first = nullptr;
1188
1189 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1190 fillInCurrentPair();
1191 }
1192
1193 upward_defs_iterator() { CurrentPair.first = nullptr; }
1194
1195 bool operator==(const upward_defs_iterator &Other) const {
1196 return DefIterator == Other.DefIterator;
1197 }
1198
1199 typename std::iterator_traits<BaseT>::reference operator*() const {
1200 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1201, __extension__ __PRETTY_FUNCTION__))
1201 "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1201, __extension__ __PRETTY_FUNCTION__))
;
1202 return CurrentPair;
1203 }
1204
1205 using BaseT::operator++;
1206 upward_defs_iterator &operator++() {
1207 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
1208 "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
;
1209 ++DefIterator;
1210 if (DefIterator != OriginalAccess->defs_end())
1211 fillInCurrentPair();
1212 return *this;
1213 }
1214
1215 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1216
1217private:
1218 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1219 /// loop. In particular, this guarantees that it only references a single
1220 /// MemoryLocation during execution of the containing function.
1221 bool IsGuaranteedLoopInvariant(Value *Ptr) const;
1222
1223 void fillInCurrentPair() {
1224 CurrentPair.first = *DefIterator;
1225 CurrentPair.second = Location;
1226 if (WalkingPhi && Location.Ptr) {
1227 // Mark size as unknown, if the location is not guaranteed to be
1228 // loop-invariant for any possible loop in the function. Setting the size
1229 // to unknown guarantees that any memory accesses that access locations
1230 // after the pointer are considered as clobbers, which is important to
1231 // catch loop carried dependences.
1232 if (Location.Ptr &&
1233 !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
1234 CurrentPair.second =
1235 Location.getWithNewSize(LocationSize::beforeOrAfterPointer());
1236 PHITransAddr Translator(
1237 const_cast<Value *>(Location.Ptr),
1238 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1239
1240 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1241 DefIterator.getPhiArgBlock(), DT,
1242 true)) {
1243 Value *TransAddr = Translator.getAddr();
1244 if (TransAddr != Location.Ptr) {
1245 CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
1246
1247 if (TransAddr &&
1248 !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
1249 CurrentPair.second = CurrentPair.second.getWithNewSize(
1250 LocationSize::beforeOrAfterPointer());
1251
1252 if (PerformedPhiTranslation)
1253 *PerformedPhiTranslation = true;
1254 }
1255 }
1256 }
1257 }
1258
1259 MemoryAccessPair CurrentPair;
1260 memoryaccess_def_iterator DefIterator;
1261 MemoryLocation Location;
1262 MemoryAccess *OriginalAccess = nullptr;
1263 DominatorTree *DT = nullptr;
1264 bool WalkingPhi = false;
1265 bool *PerformedPhiTranslation = nullptr;
1266};
1267
1268inline upward_defs_iterator
1269upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT,
1270 bool *PerformedPhiTranslation = nullptr) {
1271 return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation);
1272}
1273
1274inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1275
1276inline iterator_range<upward_defs_iterator>
1277upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
1278 return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
1279}
1280
1281/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1282/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1283/// comparing against a null def_chain_iterator, this will compare equal only
1284/// after walking said Phi/liveOnEntry.
1285///
1286/// The UseOptimizedChain flag specifies whether to walk the clobbering
1287/// access chain, or all the accesses.
1288///
1289/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1290/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1291/// a phi node. The optimized chain walks the clobbering access of a store.
1292/// So if you are just trying to find, given a store, what the next
1293/// thing that would clobber the same memory is, you want the optimized chain.
1294template <class T, bool UseOptimizedChain = false>
1295struct def_chain_iterator
1296 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1297 std::forward_iterator_tag, MemoryAccess *> {
1298 def_chain_iterator() : MA(nullptr) {}
1299 def_chain_iterator(T MA) : MA(MA) {}
1300
1301 T operator*() const { return MA; }
1302
1303 def_chain_iterator &operator++() {
1304 // N.B. liveOnEntry has a null defining access.
1305 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1306 if (UseOptimizedChain && MUD->isOptimized())
1307 MA = MUD->getOptimized();
1308 else
1309 MA = MUD->getDefiningAccess();
1310 } else {
1311 MA = nullptr;
1312 }
1313
1314 return *this;
1315 }
1316
1317 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1318
1319private:
1320 T MA;
1321};
1322
1323template <class T>
1324inline iterator_range<def_chain_iterator<T>>
1325def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1326#ifdef EXPENSIVE_CHECKS
1327 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1328, __extension__ __PRETTY_FUNCTION__))
1328 "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1328, __extension__ __PRETTY_FUNCTION__))
;
1329#endif
1330 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1331}
1332
1333template <class T>
1334inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1335 return make_range(def_chain_iterator<T, true>(MA),
1336 def_chain_iterator<T, true>(nullptr));
1337}
1338
1339} // end namespace llvm
1340
1341#endif // LLVM_ANALYSIS_MEMORYSSA_H

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h

1//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallPtrSet class. See the doxygen comment for
10// SmallPtrSetImplBase for more details on the algorithm used.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_SMALLPTRSET_H
15#define LLVM_ADT_SMALLPTRSET_H
16
17#include "llvm/ADT/EpochTracker.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/ReverseIteration.h"
20#include "llvm/Support/type_traits.h"
21#include <cassert>
22#include <cstddef>
23#include <cstdlib>
24#include <cstring>
25#include <initializer_list>
26#include <iterator>
27#include <utility>
28
29namespace llvm {
30
31/// SmallPtrSetImplBase - This is the common code shared among all the
32/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
33/// for small and one for large sets.
34///
35/// Small sets use an array of pointers allocated in the SmallPtrSet object,
36/// which is treated as a simple array of pointers. When a pointer is added to
37/// the set, the array is scanned to see if the element already exists, if not
38/// the element is 'pushed back' onto the array. If we run out of space in the
39/// array, we grow into the 'large set' case. SmallSet should be used when the
40/// sets are often small. In this case, no memory allocation is used, and only
41/// light-weight and cache-efficient scanning is used.
42///
43/// Large sets use a classic exponentially-probed hash table. Empty buckets are
44/// represented with an illegal pointer value (-1) to allow null pointers to be
45/// inserted. Tombstones are represented with another illegal pointer value
46/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
47/// more. When this happens, the table is doubled in size.
48///
49class SmallPtrSetImplBase : public DebugEpochBase {
50 friend class SmallPtrSetIteratorImpl;
51
52protected:
53 /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
54 const void **SmallArray;
55 /// CurArray - This is the current set of buckets. If equal to SmallArray,
56 /// then the set is in 'small mode'.
57 const void **CurArray;
58 /// CurArraySize - The allocated size of CurArray, always a power of two.
59 unsigned CurArraySize;
60
61 /// Number of elements in CurArray that contain a value or are a tombstone.
62 /// If small, all these elements are at the beginning of CurArray and the rest
63 /// is uninitialized.
64 unsigned NumNonEmpty;
65 /// Number of tombstones in CurArray.
66 unsigned NumTombstones;
67
68 // Helpers to copy and move construct a SmallPtrSet.
69 SmallPtrSetImplBase(const void **SmallStorage,
70 const SmallPtrSetImplBase &that);
71 SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
72 SmallPtrSetImplBase &&that);
73
74 explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
75 : SmallArray(SmallStorage), CurArray(SmallStorage),
76 CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
77 assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&(static_cast <bool> (SmallSize && (SmallSize &
(SmallSize-1)) == 0 && "Initial size must be a power of two!"
) ? void (0) : __assert_fail ("SmallSize && (SmallSize & (SmallSize-1)) == 0 && \"Initial size must be a power of two!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 78, __extension__ __PRETTY_FUNCTION__))
78 "Initial size must be a power of two!")(static_cast <bool> (SmallSize && (SmallSize &
(SmallSize-1)) == 0 && "Initial size must be a power of two!"
) ? void (0) : __assert_fail ("SmallSize && (SmallSize & (SmallSize-1)) == 0 && \"Initial size must be a power of two!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 78, __extension__ __PRETTY_FUNCTION__))
;
79 }
80
81 ~SmallPtrSetImplBase() {
82 if (!isSmall())
83 free(CurArray);
84 }
85
86public:
87 using size_type = unsigned;
88
89 SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
90
91 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return size() == 0; }
40
Assuming the condition is true
41
Returning the value 1, which participates in a condition later
92 size_type size() const { return NumNonEmpty - NumTombstones; }
93
94 void clear() {
95 incrementEpoch();
96 // If the capacity of the array is huge, and the # elements used is small,
97 // shrink the array.
98 if (!isSmall()) {
99 if (size() * 4 < CurArraySize && CurArraySize > 32)
100 return shrink_and_clear();
101 // Fill the array with empty markers.
102 memset(CurArray, -1, CurArraySize * sizeof(void *));
103 }
104
105 NumNonEmpty = 0;
106 NumTombstones = 0;
107 }
108
109protected:
110 static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
111
112 static void *getEmptyMarker() {
113 // Note that -1 is chosen to make clear() efficiently implementable with
114 // memset and because it's not a valid pointer value.
115 return reinterpret_cast<void*>(-1);
116 }
117
118 const void **EndPointer() const {
119 return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
120 }
121
122 /// insert_imp - This returns true if the pointer was new to the set, false if
123 /// it was already in the set. This is hidden from the client so that the
124 /// derived class can check that the right type of pointer is passed in.
125 std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
126 if (isSmall()) {
127 // Check to see if it is already in the set.
128 const void **LastTombstone = nullptr;
129 for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
130 APtr != E; ++APtr) {
131 const void *Value = *APtr;
132 if (Value == Ptr)
133 return std::make_pair(APtr, false);
134 if (Value == getTombstoneMarker())
135 LastTombstone = APtr;
136 }
137
138 // Did we find any tombstone marker?
139 if (LastTombstone != nullptr) {
140 *LastTombstone = Ptr;
141 --NumTombstones;
142 incrementEpoch();
143 return std::make_pair(LastTombstone, true);
144 }
145
146 // Nope, there isn't. If we stay small, just 'pushback' now.
147 if (NumNonEmpty < CurArraySize) {
148 SmallArray[NumNonEmpty++] = Ptr;
149 incrementEpoch();
150 return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
151 }
152 // Otherwise, hit the big set case, which will call grow.
153 }
154 return insert_imp_big(Ptr);
155 }
156
157 /// erase_imp - If the set contains the specified pointer, remove it and
158 /// return true, otherwise return false. This is hidden from the client so
159 /// that the derived class can check that the right type of pointer is passed
160 /// in.
161 bool erase_imp(const void * Ptr) {
162 const void *const *P = find_imp(Ptr);
163 if (P == EndPointer())
164 return false;
165
166 const void **Loc = const_cast<const void **>(P);
167 assert(*Loc == Ptr && "broken find!")(static_cast <bool> (*Loc == Ptr && "broken find!"
) ? void (0) : __assert_fail ("*Loc == Ptr && \"broken find!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 167, __extension__ __PRETTY_FUNCTION__))
;
168 *Loc = getTombstoneMarker();
169 NumTombstones++;
170 return true;
171 }
172
173 /// Returns the raw pointer needed to construct an iterator. If element not
174 /// found, this will be EndPointer. Otherwise, it will be a pointer to the
175 /// slot which stores Ptr;
176 const void *const * find_imp(const void * Ptr) const {
177 if (isSmall()) {
178 // Linear search for the item.
179 for (const void *const *APtr = SmallArray,
180 *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
181 if (*APtr == Ptr)
182 return APtr;
183 return EndPointer();
184 }
185
186 // Big set case.
187 auto *Bucket = FindBucketFor(Ptr);
188 if (*Bucket == Ptr)
189 return Bucket;
190 return EndPointer();
191 }
192
193private:
194 bool isSmall() const { return CurArray == SmallArray; }
195
196 std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
197
198 const void * const *FindBucketFor(const void *Ptr) const;
199 void shrink_and_clear();
200
201 /// Grow - Allocate a larger backing store for the buckets and move it over.
202 void Grow(unsigned NewSize);
203
204protected:
205 /// swap - Swaps the elements of two sets.
206 /// Note: This method assumes that both sets have the same small size.
207 void swap(SmallPtrSetImplBase &RHS);
208
209 void CopyFrom(const SmallPtrSetImplBase &RHS);
210 void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
211
212private:
213 /// Code shared by MoveFrom() and move constructor.
214 void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
215 /// Code shared by CopyFrom() and copy constructor.
216 void CopyHelper(const SmallPtrSetImplBase &RHS);
217};
218
219/// SmallPtrSetIteratorImpl - This is the common base class shared between all
220/// instances of SmallPtrSetIterator.
221class SmallPtrSetIteratorImpl {
222protected:
223 const void *const *Bucket;
224 const void *const *End;
225
226public:
227 explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
228 : Bucket(BP), End(E) {
229 if (shouldReverseIterate()) {
230 RetreatIfNotValid();
231 return;
232 }
233 AdvanceIfNotValid();
234 }
235
236 bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
237 return Bucket == RHS.Bucket;
238 }
239 bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
240 return Bucket != RHS.Bucket;
241 }
242
243protected:
244 /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
245 /// that is. This is guaranteed to stop because the end() bucket is marked
246 /// valid.
247 void AdvanceIfNotValid() {
248 assert(Bucket <= End)(static_cast <bool> (Bucket <= End) ? void (0) : __assert_fail
("Bucket <= End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 248, __extension__ __PRETTY_FUNCTION__))
;
249 while (Bucket != End &&
250 (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
251 *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
252 ++Bucket;
253 }
254 void RetreatIfNotValid() {
255 assert(Bucket >= End)(static_cast <bool> (Bucket >= End) ? void (0) : __assert_fail
("Bucket >= End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 255, __extension__ __PRETTY_FUNCTION__))
;
256 while (Bucket != End &&
257 (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
258 Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
259 --Bucket;
260 }
261 }
262};
263
264/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
265template <typename PtrTy>
266class SmallPtrSetIterator : public SmallPtrSetIteratorImpl,
267 DebugEpochBase::HandleBase {
268 using PtrTraits = PointerLikeTypeTraits<PtrTy>;
269
270public:
271 using value_type = PtrTy;
272 using reference = PtrTy;
273 using pointer = PtrTy;
274 using difference_type = std::ptrdiff_t;
275 using iterator_category = std::forward_iterator_tag;
276
277 explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
278 const DebugEpochBase &Epoch)
279 : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
280
281 // Most methods are provided by the base class.
282
283 const PtrTy operator*() const {
284 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 284, __extension__ __PRETTY_FUNCTION__))
;
285 if (shouldReverseIterate()) {
286 assert(Bucket > End)(static_cast <bool> (Bucket > End) ? void (0) : __assert_fail
("Bucket > End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 286, __extension__ __PRETTY_FUNCTION__))
;
287 return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
288 }
289 assert(Bucket < End)(static_cast <bool> (Bucket < End) ? void (0) : __assert_fail
("Bucket < End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 289, __extension__ __PRETTY_FUNCTION__))
;
290 return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
291 }
292
293 inline SmallPtrSetIterator& operator++() { // Preincrement
294 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 if (shouldReverseIterate()) {
296 --Bucket;
297 RetreatIfNotValid();
298 return *this;
299 }
300 ++Bucket;
301 AdvanceIfNotValid();
302 return *this;
303 }
304
305 SmallPtrSetIterator operator++(int) { // Postincrement
306 SmallPtrSetIterator tmp = *this;
307 ++*this;
308 return tmp;
309 }
310};
311
312/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
313/// power of two (which means N itself if N is already a power of two).
314template<unsigned N>
315struct RoundUpToPowerOfTwo;
316
317/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
318/// helper template used to implement RoundUpToPowerOfTwo.
319template<unsigned N, bool isPowerTwo>
320struct RoundUpToPowerOfTwoH {
321 enum { Val = N };
322};
323template<unsigned N>
324struct RoundUpToPowerOfTwoH<N, false> {
325 enum {
326 // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
327 // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
328 Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
329 };
330};
331
332template<unsigned N>
333struct RoundUpToPowerOfTwo {
334 enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
335};
336
337/// A templated base class for \c SmallPtrSet which provides the
338/// typesafe interface that is common across all small sizes.
339///
340/// This is particularly useful for passing around between interface boundaries
341/// to avoid encoding a particular small size in the interface boundary.
342template <typename PtrType>
343class SmallPtrSetImpl : public SmallPtrSetImplBase {
344 using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
345 using PtrTraits = PointerLikeTypeTraits<PtrType>;
346 using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
347
348protected:
349 // Forward constructors to the base.
350 using SmallPtrSetImplBase::SmallPtrSetImplBase;
351
352public:
353 using iterator = SmallPtrSetIterator<PtrType>;
354 using const_iterator = SmallPtrSetIterator<PtrType>;
355 using key_type = ConstPtrType;
356 using value_type = PtrType;
357
358 SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
359
360 /// Inserts Ptr if and only if there is no element in the container equal to
361 /// Ptr. The bool component of the returned pair is true if and only if the
362 /// insertion takes place, and the iterator component of the pair points to
363 /// the element equal to Ptr.
364 std::pair<iterator, bool> insert(PtrType Ptr) {
365 auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
366 return std::make_pair(makeIterator(p.first), p.second);
367 }
368
369 /// Insert the given pointer with an iterator hint that is ignored. This is
370 /// identical to calling insert(Ptr), but allows SmallPtrSet to be used by
371 /// std::insert_iterator and std::inserter().
372 iterator insert(iterator, PtrType Ptr) {
373 return insert(Ptr).first;
374 }
375
376 /// erase - If the set contains the specified pointer, remove it and return
377 /// true, otherwise return false.
378 bool erase(PtrType Ptr) {
379 return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
380 }
381 /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
382 size_type count(ConstPtrType Ptr) const {
383 return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
384 }
385 iterator find(ConstPtrType Ptr) const {
386 return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
387 }
388 bool contains(ConstPtrType Ptr) const {
389 return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
390 }
391
392 template <typename IterT>
393 void insert(IterT I, IterT E) {
394 for (; I != E; ++I)
395 insert(*I);
396 }
397
398 void insert(std::initializer_list<PtrType> IL) {
399 insert(IL.begin(), IL.end());
400 }
401
402 iterator begin() const {
403 if (shouldReverseIterate())
404 return makeIterator(EndPointer() - 1);
405 return makeIterator(CurArray);
406 }
407 iterator end() const { return makeIterator(EndPointer()); }
408
409private:
410 /// Create an iterator that dereferences to same place as the given pointer.
411 iterator makeIterator(const void *const *P) const {
412 if (shouldReverseIterate())
413 return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
414 return iterator(P, EndPointer(), *this);
415 }
416};
417
418/// Equality comparison for SmallPtrSet.
419///
420/// Iterates over elements of LHS confirming that each value from LHS is also in
421/// RHS, and that no additional values are in RHS.
422template <typename PtrType>
423bool operator==(const SmallPtrSetImpl<PtrType> &LHS,
424 const SmallPtrSetImpl<PtrType> &RHS) {
425 if (LHS.size() != RHS.size())
426 return false;
427
428 for (const auto *KV : LHS)
429 if (!RHS.count(KV))
430 return false;
431
432 return true;
433}
434
435/// Inequality comparison for SmallPtrSet.
436///
437/// Equivalent to !(LHS == RHS).
438template <typename PtrType>
439bool operator!=(const SmallPtrSetImpl<PtrType> &LHS,
440 const SmallPtrSetImpl<PtrType> &RHS) {
441 return !(LHS == RHS);
442}
443
444/// SmallPtrSet - This class implements a set which is optimized for holding
445/// SmallSize or less elements. This internally rounds up SmallSize to the next
446/// power of two if it is not already a power of two. See the comments above
447/// SmallPtrSetImplBase for details of the algorithm.
448template<class PtrType, unsigned SmallSize>
449class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
450 // In small mode SmallPtrSet uses linear search for the elements, so it is
451 // not a good idea to choose this value too high. You may consider using a
452 // DenseSet<> instead if you expect many elements in the set.
453 static_assert(SmallSize <= 32, "SmallSize should be small");
454
455 using BaseT = SmallPtrSetImpl<PtrType>;
456
457 // Make sure that SmallSize is a power of two, round up if not.
458 enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
459 /// SmallStorage - Fixed size storage used in 'small mode'.
460 const void *SmallStorage[SmallSizePowTwo];
461
462public:
463 SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
464 SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
465 SmallPtrSet(SmallPtrSet &&that)
466 : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
467
468 template<typename It>
469 SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
470 this->insert(I, E);
471 }
472
473 SmallPtrSet(std::initializer_list<PtrType> IL)
474 : BaseT(SmallStorage, SmallSizePowTwo) {
475 this->insert(IL.begin(), IL.end());
476 }
477
478 SmallPtrSet<PtrType, SmallSize> &
479 operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
480 if (&RHS != this)
481 this->CopyFrom(RHS);
482 return *this;
483 }
484
485 SmallPtrSet<PtrType, SmallSize> &
486 operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
487 if (&RHS != this)
488 this->MoveFrom(SmallSizePowTwo, std::move(RHS));
489 return *this;
490 }
491
492 SmallPtrSet<PtrType, SmallSize> &
493 operator=(std::initializer_list<PtrType> IL) {
494 this->clear();
495 this->insert(IL.begin(), IL.end());
496 return *this;
497 }
498
499 /// swap - Swaps the elements of two sets.
500 void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
501 SmallPtrSetImplBase::swap(RHS);
502 }
503};
504
505} // end namespace llvm
506
507namespace std {
508
509 /// Implement std::swap in terms of SmallPtrSet swap.
510 template<class T, unsigned N>
511 inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
512 LHS.swap(RHS);
513 }
514
515} // end namespace std
516
517#endif // LLVM_ADT_SMALLPTRSET_H

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/ADT/iterator.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Config/abi-breaking.h"
24#include "llvm/Support/ErrorHandling.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstdint>
29#include <cstdlib>
30#include <functional>
31#include <initializer_list>
32#include <iterator>
33#include <limits>
34#include <memory>
35#include <tuple>
36#include <type_traits>
37#include <utility>
38
39#ifdef EXPENSIVE_CHECKS
40#include <random> // for std::mt19937
41#endif
42
43namespace llvm {
44
45// Only used by compiler if both template types are the same. Useful when
46// using SFINAE to test for the existence of member functions.
47template <typename T, T> struct SameType;
48
49namespace detail {
50
51template <typename RangeT>
52using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
53
54template <typename RangeT>
55using ValueOfRange = typename std::remove_reference<decltype(
56 *std::begin(std::declval<RangeT &>()))>::type;
57
58} // end namespace detail
59
60//===----------------------------------------------------------------------===//
61// Extra additions to <type_traits>
62//===----------------------------------------------------------------------===//
63
64template <typename T> struct make_const_ptr {
65 using type =
66 typename std::add_pointer<typename std::add_const<T>::type>::type;
67};
68
69template <typename T> struct make_const_ref {
70 using type = typename std::add_lvalue_reference<
71 typename std::add_const<T>::type>::type;
72};
73
74namespace detail {
75template <typename...> using void_t = void;
76template <class, template <class...> class Op, class... Args> struct detector {
77 using value_t = std::false_type;
78};
79template <template <class...> class Op, class... Args>
80struct detector<void_t<Op<Args...>>, Op, Args...> {
81 using value_t = std::true_type;
82};
83} // end namespace detail
84
85/// Detects if a given trait holds for some set of arguments 'Args'.
86/// For example, the given trait could be used to detect if a given type
87/// has a copy assignment operator:
88/// template<class T>
89/// using has_copy_assign_t = decltype(std::declval<T&>()
90/// = std::declval<const T&>());
91/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
92template <template <class...> class Op, class... Args>
93using is_detected = typename detail::detector<void, Op, Args...>::value_t;
94
95namespace detail {
96template <typename Callable, typename... Args>
97using is_invocable =
98 decltype(std::declval<Callable &>()(std::declval<Args>()...));
99} // namespace detail
100
101/// Check if a Callable type can be invoked with the given set of arg types.
102template <typename Callable, typename... Args>
103using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
104
105/// This class provides various trait information about a callable object.
106/// * To access the number of arguments: Traits::num_args
107/// * To access the type of an argument: Traits::arg_t<Index>
108/// * To access the type of the result: Traits::result_t
109template <typename T, bool isClass = std::is_class<T>::value>
110struct function_traits : public function_traits<decltype(&T::operator())> {};
111
112/// Overload for class function types.
113template <typename ClassType, typename ReturnType, typename... Args>
114struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
115 /// The number of arguments to this function.
116 enum { num_args = sizeof...(Args) };
117
118 /// The result type of this function.
119 using result_t = ReturnType;
120
121 /// The type of an argument to this function.
122 template <size_t Index>
123 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
124};
125/// Overload for class function types.
126template <typename ClassType, typename ReturnType, typename... Args>
127struct function_traits<ReturnType (ClassType::*)(Args...), false>
128 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
129/// Overload for non-class function types.
130template <typename ReturnType, typename... Args>
131struct function_traits<ReturnType (*)(Args...), false> {
132 /// The number of arguments to this function.
133 enum { num_args = sizeof...(Args) };
134
135 /// The result type of this function.
136 using result_t = ReturnType;
137
138 /// The type of an argument to this function.
139 template <size_t i>
140 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
141};
142/// Overload for non-class function type references.
143template <typename ReturnType, typename... Args>
144struct function_traits<ReturnType (&)(Args...), false>
145 : public function_traits<ReturnType (*)(Args...)> {};
146
147//===----------------------------------------------------------------------===//
148// Extra additions to <functional>
149//===----------------------------------------------------------------------===//
150
151template <class Ty> struct identity {
152 using argument_type = Ty;
153
154 Ty &operator()(Ty &self) const {
155 return self;
156 }
157 const Ty &operator()(const Ty &self) const {
158 return self;
159 }
160};
161
162/// An efficient, type-erasing, non-owning reference to a callable. This is
163/// intended for use as the type of a function parameter that is not used
164/// after the function in question returns.
165///
166/// This class does not own the callable, so it is not in general safe to store
167/// a function_ref.
168template<typename Fn> class function_ref;
169
170template<typename Ret, typename ...Params>
171class function_ref<Ret(Params...)> {
172 Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
173 intptr_t callable;
174
175 template<typename Callable>
176 static Ret callback_fn(intptr_t callable, Params ...params) {
177 return (*reinterpret_cast<Callable*>(callable))(
178 std::forward<Params>(params)...);
179 }
180
181public:
182 function_ref() = default;
183 function_ref(std::nullptr_t) {}
184
185 template <typename Callable>
186 function_ref(
187 Callable &&callable,
188 // This is not the copy-constructor.
189 std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
190 function_ref>::value> * = nullptr,
191 // Functor must be callable and return a suitable type.
192 std::enable_if_t<std::is_void<Ret>::value ||
193 std::is_convertible<decltype(std::declval<Callable>()(
194 std::declval<Params>()...)),
195 Ret>::value> * = nullptr)
196 : callback(callback_fn<typename std::remove_reference<Callable>::type>),
197 callable(reinterpret_cast<intptr_t>(&callable)) {}
198
199 Ret operator()(Params ...params) const {
200 return callback(callable, std::forward<Params>(params)...);
201 }
202
203 explicit operator bool() const { return callback; }
204};
205
206//===----------------------------------------------------------------------===//
207// Extra additions to <iterator>
208//===----------------------------------------------------------------------===//
209
210namespace adl_detail {
211
212using std::begin;
213
214template <typename ContainerTy>
215decltype(auto) adl_begin(ContainerTy &&container) {
216 return begin(std::forward<ContainerTy>(container));
217}
218
219using std::end;
220
221template <typename ContainerTy>
222decltype(auto) adl_end(ContainerTy &&container) {
223 return end(std::forward<ContainerTy>(container));
224}
225
226using std::swap;
227
228template <typename T>
229void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
230 std::declval<T>()))) {
231 swap(std::forward<T>(lhs), std::forward<T>(rhs));
232}
233
234} // end namespace adl_detail
235
236template <typename ContainerTy>
237decltype(auto) adl_begin(ContainerTy &&container) {
238 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
239}
240
241template <typename ContainerTy>
242decltype(auto) adl_end(ContainerTy &&container) {
243 return adl_detail::adl_end(std::forward<ContainerTy>(container));
244}
245
246template <typename T>
247void adl_swap(T &&lhs, T &&rhs) noexcept(
248 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
249 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
250}
251
252/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
253template <typename T>
254constexpr bool empty(const T &RangeOrContainer) {
255 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
256}
257
258/// Returns true if the given container only contains a single element.
259template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
260 auto B = std::begin(C), E = std::end(C);
261 return B != E && std::next(B) == E;
262}
263
264/// Return a range covering \p RangeOrContainer with the first N elements
265/// excluded.
266template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
267 return make_range(std::next(adl_begin(RangeOrContainer), N),
268 adl_end(RangeOrContainer));
269}
270
271// mapped_iterator - This is a simple iterator adapter that causes a function to
272// be applied whenever operator* is invoked on the iterator.
273
274template <typename ItTy, typename FuncTy,
275 typename FuncReturnTy =
276 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
277class mapped_iterator
278 : public iterator_adaptor_base<
279 mapped_iterator<ItTy, FuncTy>, ItTy,
280 typename std::iterator_traits<ItTy>::iterator_category,
281 typename std::remove_reference<FuncReturnTy>::type> {
282public:
283 mapped_iterator(ItTy U, FuncTy F)
284 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
285
286 ItTy getCurrent() { return this->I; }
287
288 FuncReturnTy operator*() const { return F(*this->I); }
289
290private:
291 FuncTy F;
292};
293
294// map_iterator - Provide a convenient way to create mapped_iterators, just like
295// make_pair is useful for creating pairs...
296template <class ItTy, class FuncTy>
297inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
298 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
299}
300
301template <class ContainerTy, class FuncTy>
302auto map_range(ContainerTy &&C, FuncTy F) {
303 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
304}
305
306/// Helper to determine if type T has a member called rbegin().
307template <typename Ty> class has_rbegin_impl {
308 using yes = char[1];
309 using no = char[2];
310
311 template <typename Inner>
312 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
313
314 template <typename>
315 static no& test(...);
316
317public:
318 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
319};
320
321/// Metafunction to determine if T& or T has a member called rbegin().
322template <typename Ty>
323struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
324};
325
326// Returns an iterator_range over the given container which iterates in reverse.
327// Note that the container must have rbegin()/rend() methods for this to work.
328template <typename ContainerTy>
329auto reverse(ContainerTy &&C,
330 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
331 return make_range(C.rbegin(), C.rend());
332}
333
334// Returns a std::reverse_iterator wrapped around the given iterator.
335template <typename IteratorTy>
336std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
337 return std::reverse_iterator<IteratorTy>(It);
338}
339
340// Returns an iterator_range over the given container which iterates in reverse.
341// Note that the container must have begin()/end() methods which return
342// bidirectional iterators for this to work.
343template <typename ContainerTy>
344auto reverse(ContainerTy &&C,
345 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
346 return make_range(llvm::make_reverse_iterator(std::end(C)),
347 llvm::make_reverse_iterator(std::begin(C)));
348}
349
350/// An iterator adaptor that filters the elements of given inner iterators.
351///
352/// The predicate parameter should be a callable object that accepts the wrapped
353/// iterator's reference type and returns a bool. When incrementing or
354/// decrementing the iterator, it will call the predicate on each element and
355/// skip any where it returns false.
356///
357/// \code
358/// int A[] = { 1, 2, 3, 4 };
359/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
360/// // R contains { 1, 3 }.
361/// \endcode
362///
363/// Note: filter_iterator_base implements support for forward iteration.
364/// filter_iterator_impl exists to provide support for bidirectional iteration,
365/// conditional on whether the wrapped iterator supports it.
366template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
367class filter_iterator_base
368 : public iterator_adaptor_base<
369 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
370 WrappedIteratorT,
371 typename std::common_type<
372 IterTag, typename std::iterator_traits<
373 WrappedIteratorT>::iterator_category>::type> {
374 using BaseT = iterator_adaptor_base<
375 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
376 WrappedIteratorT,
377 typename std::common_type<
378 IterTag, typename std::iterator_traits<
379 WrappedIteratorT>::iterator_category>::type>;
380
381protected:
382 WrappedIteratorT End;
383 PredicateT Pred;
384
385 void findNextValid() {
386 while (this->I != End && !Pred(*this->I))
387 BaseT::operator++();
388 }
389
390 // Construct the iterator. The begin iterator needs to know where the end
391 // is, so that it can properly stop when it gets there. The end iterator only
392 // needs the predicate to support bidirectional iteration.
393 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
394 PredicateT Pred)
395 : BaseT(Begin), End(End), Pred(Pred) {
396 findNextValid();
397 }
398
399public:
400 using BaseT::operator++;
401
402 filter_iterator_base &operator++() {
403 BaseT::operator++();
404 findNextValid();
405 return *this;
406 }
407};
408
409/// Specialization of filter_iterator_base for forward iteration only.
410template <typename WrappedIteratorT, typename PredicateT,
411 typename IterTag = std::forward_iterator_tag>
412class filter_iterator_impl
413 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
414 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
415
416public:
417 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
418 PredicateT Pred)
419 : BaseT(Begin, End, Pred) {}
420};
421
422/// Specialization of filter_iterator_base for bidirectional iteration.
423template <typename WrappedIteratorT, typename PredicateT>
424class filter_iterator_impl<WrappedIteratorT, PredicateT,
425 std::bidirectional_iterator_tag>
426 : public filter_iterator_base<WrappedIteratorT, PredicateT,
427 std::bidirectional_iterator_tag> {
428 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
429 std::bidirectional_iterator_tag>;
430 void findPrevValid() {
431 while (!this->Pred(*this->I))
432 BaseT::operator--();
433 }
434
435public:
436 using BaseT::operator--;
437
438 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
439 PredicateT Pred)
440 : BaseT(Begin, End, Pred) {}
441
442 filter_iterator_impl &operator--() {
443 BaseT::operator--();
444 findPrevValid();
445 return *this;
446 }
447};
448
449namespace detail {
450
451template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
452 using type = std::forward_iterator_tag;
453};
454
455template <> struct fwd_or_bidi_tag_impl<true> {
456 using type = std::bidirectional_iterator_tag;
457};
458
459/// Helper which sets its type member to forward_iterator_tag if the category
460/// of \p IterT does not derive from bidirectional_iterator_tag, and to
461/// bidirectional_iterator_tag otherwise.
462template <typename IterT> struct fwd_or_bidi_tag {
463 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
464 std::bidirectional_iterator_tag,
465 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
466};
467
468} // namespace detail
469
470/// Defines filter_iterator to a suitable specialization of
471/// filter_iterator_impl, based on the underlying iterator's category.
472template <typename WrappedIteratorT, typename PredicateT>
473using filter_iterator = filter_iterator_impl<
474 WrappedIteratorT, PredicateT,
475 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
476
477/// Convenience function that takes a range of elements and a predicate,
478/// and return a new filter_iterator range.
479///
480/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
481/// lifetime of that temporary is not kept by the returned range object, and the
482/// temporary is going to be dropped on the floor after the make_iterator_range
483/// full expression that contains this function call.
484template <typename RangeT, typename PredicateT>
485iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
486make_filter_range(RangeT &&Range, PredicateT Pred) {
487 using FilterIteratorT =
488 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
489 return make_range(
490 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
491 std::end(std::forward<RangeT>(Range)), Pred),
492 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
493 std::end(std::forward<RangeT>(Range)), Pred));
494}
495
496/// A pseudo-iterator adaptor that is designed to implement "early increment"
497/// style loops.
498///
499/// This is *not a normal iterator* and should almost never be used directly. It
500/// is intended primarily to be used with range based for loops and some range
501/// algorithms.
502///
503/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
504/// somewhere between them. The constraints of these iterators are:
505///
506/// - On construction or after being incremented, it is comparable and
507/// dereferencable. It is *not* incrementable.
508/// - After being dereferenced, it is neither comparable nor dereferencable, it
509/// is only incrementable.
510///
511/// This means you can only dereference the iterator once, and you can only
512/// increment it once between dereferences.
513template <typename WrappedIteratorT>
514class early_inc_iterator_impl
515 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
516 WrappedIteratorT, std::input_iterator_tag> {
517 using BaseT =
518 iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
519 WrappedIteratorT, std::input_iterator_tag>;
520
521 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
522
523protected:
524#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
525 bool IsEarlyIncremented = false;
526#endif
527
528public:
529 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
530
531 using BaseT::operator*;
532 decltype(*std::declval<WrappedIteratorT>()) operator*() {
533#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
534 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast <bool> (!IsEarlyIncremented && "Cannot dereference twice!"
) ? void (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 534, __extension__ __PRETTY_FUNCTION__))
;
535 IsEarlyIncremented = true;
536#endif
537 return *(this->I)++;
538 }
539
540 using BaseT::operator++;
541 early_inc_iterator_impl &operator++() {
542#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
543 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast <bool> (IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? void (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 IsEarlyIncremented = false;
545#endif
546 return *this;
547 }
548
549 friend bool operator==(const early_inc_iterator_impl &LHS,
550 const early_inc_iterator_impl &RHS) {
551#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
552 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast <bool> (!LHS.IsEarlyIncremented &&
"Cannot compare after dereferencing!") ? void (0) : __assert_fail
("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 552, __extension__ __PRETTY_FUNCTION__))
;
553#endif
554 return (const BaseT &)LHS == (const BaseT &)RHS;
555 }
556};
557
558/// Make a range that does early increment to allow mutation of the underlying
559/// range without disrupting iteration.
560///
561/// The underlying iterator will be incremented immediately after it is
562/// dereferenced, allowing deletion of the current node or insertion of nodes to
563/// not disrupt iteration provided they do not invalidate the *next* iterator --
564/// the current iterator can be invalidated.
565///
566/// This requires a very exact pattern of use that is only really suitable to
567/// range based for loops and other range algorithms that explicitly guarantee
568/// to dereference exactly once each element, and to increment exactly once each
569/// element.
570template <typename RangeT>
571iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
572make_early_inc_range(RangeT &&Range) {
573 using EarlyIncIteratorT =
574 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
575 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
576 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
577}
578
579// forward declarations required by zip_shortest/zip_first/zip_longest
580template <typename R, typename UnaryPredicate>
581bool all_of(R &&range, UnaryPredicate P);
582template <typename R, typename UnaryPredicate>
583bool any_of(R &&range, UnaryPredicate P);
584
585namespace detail {
586
587using std::declval;
588
589// We have to alias this since inlining the actual type at the usage site
590// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
591template<typename... Iters> struct ZipTupleType {
592 using type = std::tuple<decltype(*declval<Iters>())...>;
593};
594
595template <typename ZipType, typename... Iters>
596using zip_traits = iterator_facade_base<
597 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
598 typename std::iterator_traits<
599 Iters>::iterator_category...>::type,
600 // ^ TODO: Implement random access methods.
601 typename ZipTupleType<Iters...>::type,
602 typename std::iterator_traits<typename std::tuple_element<
603 0, std::tuple<Iters...>>::type>::difference_type,
604 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
605 // inner iterators have the same difference_type. It would fail if, for
606 // instance, the second field's difference_type were non-numeric while the
607 // first is.
608 typename ZipTupleType<Iters...>::type *,
609 typename ZipTupleType<Iters...>::type>;
610
611template <typename ZipType, typename... Iters>
612struct zip_common : public zip_traits<ZipType, Iters...> {
613 using Base = zip_traits<ZipType, Iters...>;
614 using value_type = typename Base::value_type;
615
616 std::tuple<Iters...> iterators;
617
618protected:
619 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
620 return value_type(*std::get<Ns>(iterators)...);
621 }
622
623 template <size_t... Ns>
624 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
625 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
626 }
627
628 template <size_t... Ns>
629 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
630 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
631 }
632
633public:
634 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
635
636 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
637
638 const value_type operator*() const {
639 return deref(std::index_sequence_for<Iters...>{});
640 }
641
642 ZipType &operator++() {
643 iterators = tup_inc(std::index_sequence_for<Iters...>{});
644 return *reinterpret_cast<ZipType *>(this);
645 }
646
647 ZipType &operator--() {
648 static_assert(Base::IsBidirectional,
649 "All inner iterators must be at least bidirectional.");
650 iterators = tup_dec(std::index_sequence_for<Iters...>{});
651 return *reinterpret_cast<ZipType *>(this);
652 }
653};
654
655template <typename... Iters>
656struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
657 using Base = zip_common<zip_first<Iters...>, Iters...>;
658
659 bool operator==(const zip_first<Iters...> &other) const {
660 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
661 }
662
663 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
664};
665
666template <typename... Iters>
667class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
668 template <size_t... Ns>
669 bool test(const zip_shortest<Iters...> &other,
670 std::index_sequence<Ns...>) const {
671 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
672 std::get<Ns>(other.iterators)...},
673 identity<bool>{});
674 }
675
676public:
677 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
678
679 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
680
681 bool operator==(const zip_shortest<Iters...> &other) const {
682 return !test(other, std::index_sequence_for<Iters...>{});
683 }
684};
685
686template <template <typename...> class ItType, typename... Args> class zippy {
687public:
688 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
689 using iterator_category = typename iterator::iterator_category;
690 using value_type = typename iterator::value_type;
691 using difference_type = typename iterator::difference_type;
692 using pointer = typename iterator::pointer;
693 using reference = typename iterator::reference;
694
695private:
696 std::tuple<Args...> ts;
697
698 template <size_t... Ns>
699 iterator begin_impl(std::index_sequence<Ns...>) const {
700 return iterator(std::begin(std::get<Ns>(ts))...);
701 }
702 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
703 return iterator(std::end(std::get<Ns>(ts))...);
704 }
705
706public:
707 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
708
709 iterator begin() const {
710 return begin_impl(std::index_sequence_for<Args...>{});
711 }
712 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
713};
714
715} // end namespace detail
716
717/// zip iterator for two or more iteratable types.
718template <typename T, typename U, typename... Args>
719detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
720 Args &&... args) {
721 return detail::zippy<detail::zip_shortest, T, U, Args...>(
722 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
723}
724
725/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
726/// be the shortest.
727template <typename T, typename U, typename... Args>
728detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
729 Args &&... args) {
730 return detail::zippy<detail::zip_first, T, U, Args...>(
731 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
732}
733
734namespace detail {
735template <typename Iter>
736Iter next_or_end(const Iter &I, const Iter &End) {
737 if (I == End)
738 return End;
739 return std::next(I);
740}
741
742template <typename Iter>
743auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
744 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
745 if (I == End)
746 return None;
747 return *I;
748}
749
750template <typename Iter> struct ZipLongestItemType {
751 using type =
752 llvm::Optional<typename std::remove_const<typename std::remove_reference<
753 decltype(*std::declval<Iter>())>::type>::type>;
754};
755
756template <typename... Iters> struct ZipLongestTupleType {
757 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
758};
759
760template <typename... Iters>
761class zip_longest_iterator
762 : public iterator_facade_base<
763 zip_longest_iterator<Iters...>,
764 typename std::common_type<
765 std::forward_iterator_tag,
766 typename std::iterator_traits<Iters>::iterator_category...>::type,
767 typename ZipLongestTupleType<Iters...>::type,
768 typename std::iterator_traits<typename std::tuple_element<
769 0, std::tuple<Iters...>>::type>::difference_type,
770 typename ZipLongestTupleType<Iters...>::type *,
771 typename ZipLongestTupleType<Iters...>::type> {
772public:
773 using value_type = typename ZipLongestTupleType<Iters...>::type;
774
775private:
776 std::tuple<Iters...> iterators;
777 std::tuple<Iters...> end_iterators;
778
779 template <size_t... Ns>
780 bool test(const zip_longest_iterator<Iters...> &other,
781 std::index_sequence<Ns...>) const {
782 return llvm::any_of(
783 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
784 std::get<Ns>(other.iterators)...},
785 identity<bool>{});
786 }
787
788 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
789 return value_type(
790 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
791 }
792
793 template <size_t... Ns>
794 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
795 return std::tuple<Iters...>(
796 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
797 }
798
799public:
800 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
801 : iterators(std::forward<Iters>(ts.first)...),
802 end_iterators(std::forward<Iters>(ts.second)...) {}
803
804 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
805
806 value_type operator*() const {
807 return deref(std::index_sequence_for<Iters...>{});
808 }
809
810 zip_longest_iterator<Iters...> &operator++() {
811 iterators = tup_inc(std::index_sequence_for<Iters...>{});
812 return *this;
813 }
814
815 bool operator==(const zip_longest_iterator<Iters...> &other) const {
816 return !test(other, std::index_sequence_for<Iters...>{});
817 }
818};
819
820template <typename... Args> class zip_longest_range {
821public:
822 using iterator =
823 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
824 using iterator_category = typename iterator::iterator_category;
825 using value_type = typename iterator::value_type;
826 using difference_type = typename iterator::difference_type;
827 using pointer = typename iterator::pointer;
828 using reference = typename iterator::reference;
829
830private:
831 std::tuple<Args...> ts;
832
833 template <size_t... Ns>
834 iterator begin_impl(std::index_sequence<Ns...>) const {
835 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
836 adl_end(std::get<Ns>(ts)))...);
837 }
838
839 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
840 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
841 adl_end(std::get<Ns>(ts)))...);
842 }
843
844public:
845 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
846
847 iterator begin() const {
848 return begin_impl(std::index_sequence_for<Args...>{});
849 }
850 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
851};
852} // namespace detail
853
854/// Iterate over two or more iterators at the same time. Iteration continues
855/// until all iterators reach the end. The llvm::Optional only contains a value
856/// if the iterator has not reached the end.
857template <typename T, typename U, typename... Args>
858detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
859 Args &&... args) {
860 return detail::zip_longest_range<T, U, Args...>(
861 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
862}
863
864/// Iterator wrapper that concatenates sequences together.
865///
866/// This can concatenate different iterators, even with different types, into
867/// a single iterator provided the value types of all the concatenated
868/// iterators expose `reference` and `pointer` types that can be converted to
869/// `ValueT &` and `ValueT *` respectively. It doesn't support more
870/// interesting/customized pointer or reference types.
871///
872/// Currently this only supports forward or higher iterator categories as
873/// inputs and always exposes a forward iterator interface.
874template <typename ValueT, typename... IterTs>
875class concat_iterator
876 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
877 std::forward_iterator_tag, ValueT> {
878 using BaseT = typename concat_iterator::iterator_facade_base;
879
880 /// We store both the current and end iterators for each concatenated
881 /// sequence in a tuple of pairs.
882 ///
883 /// Note that something like iterator_range seems nice at first here, but the
884 /// range properties are of little benefit and end up getting in the way
885 /// because we need to do mutation on the current iterators.
886 std::tuple<IterTs...> Begins;
887 std::tuple<IterTs...> Ends;
888
889 /// Attempts to increment a specific iterator.
890 ///
891 /// Returns true if it was able to increment the iterator. Returns false if
892 /// the iterator is already at the end iterator.
893 template <size_t Index> bool incrementHelper() {
894 auto &Begin = std::get<Index>(Begins);
895 auto &End = std::get<Index>(Ends);
896 if (Begin == End)
897 return false;
898
899 ++Begin;
900 return true;
901 }
902
903 /// Increments the first non-end iterator.
904 ///
905 /// It is an error to call this with all iterators at the end.
906 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
907 // Build a sequence of functions to increment each iterator if possible.
908 bool (concat_iterator::*IncrementHelperFns[])() = {
909 &concat_iterator::incrementHelper<Ns>...};
910
911 // Loop over them, and stop as soon as we succeed at incrementing one.
912 for (auto &IncrementHelperFn : IncrementHelperFns)
913 if ((this->*IncrementHelperFn)())
914 return;
915
916 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 916)
;
917 }
918
919 /// Returns null if the specified iterator is at the end. Otherwise,
920 /// dereferences the iterator and returns the address of the resulting
921 /// reference.
922 template <size_t Index> ValueT *getHelper() const {
923 auto &Begin = std::get<Index>(Begins);
924 auto &End = std::get<Index>(Ends);
925 if (Begin == End)
926 return nullptr;
927
928 return &*Begin;
929 }
930
931 /// Finds the first non-end iterator, dereferences, and returns the resulting
932 /// reference.
933 ///
934 /// It is an error to call this with all iterators at the end.
935 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
936 // Build a sequence of functions to get from iterator if possible.
937 ValueT *(concat_iterator::*GetHelperFns[])() const = {
938 &concat_iterator::getHelper<Ns>...};
939
940 // Loop over them, and return the first result we find.
941 for (auto &GetHelperFn : GetHelperFns)
942 if (ValueT *P = (this->*GetHelperFn)())
943 return *P;
944
945 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 945)
;
946 }
947
948public:
949 /// Constructs an iterator from a sequence of ranges.
950 ///
951 /// We need the full range to know how to switch between each of the
952 /// iterators.
953 template <typename... RangeTs>
954 explicit concat_iterator(RangeTs &&... Ranges)
955 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
956
957 using BaseT::operator++;
958
959 concat_iterator &operator++() {
960 increment(std::index_sequence_for<IterTs...>());
961 return *this;
962 }
963
964 ValueT &operator*() const {
965 return get(std::index_sequence_for<IterTs...>());
966 }
967
968 bool operator==(const concat_iterator &RHS) const {
969 return Begins == RHS.Begins && Ends == RHS.Ends;
970 }
971};
972
973namespace detail {
974
975/// Helper to store a sequence of ranges being concatenated and access them.
976///
977/// This is designed to facilitate providing actual storage when temporaries
978/// are passed into the constructor such that we can use it as part of range
979/// based for loops.
980template <typename ValueT, typename... RangeTs> class concat_range {
981public:
982 using iterator =
983 concat_iterator<ValueT,
984 decltype(std::begin(std::declval<RangeTs &>()))...>;
985
986private:
987 std::tuple<RangeTs...> Ranges;
988
989 template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
990 return iterator(std::get<Ns>(Ranges)...);
991 }
992 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
993 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
994 std::end(std::get<Ns>(Ranges)))...);
995 }
996
997public:
998 concat_range(RangeTs &&... Ranges)
999 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1000
1001 iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
1002 iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
1003};
1004
1005} // end namespace detail
1006
1007/// Concatenated range across two or more ranges.
1008///
1009/// The desired value type must be explicitly specified.
1010template <typename ValueT, typename... RangeTs>
1011detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1012 static_assert(sizeof...(RangeTs) > 1,
1013 "Need more than one range to concatenate!");
1014 return detail::concat_range<ValueT, RangeTs...>(
1015 std::forward<RangeTs>(Ranges)...);
1016}
1017
1018/// A utility class used to implement an iterator that contains some base object
1019/// and an index. The iterator moves the index but keeps the base constant.
1020template <typename DerivedT, typename BaseT, typename T,
1021 typename PointerT = T *, typename ReferenceT = T &>
1022class indexed_accessor_iterator
1023 : public llvm::iterator_facade_base<DerivedT,
1024 std::random_access_iterator_tag, T,
1025 std::ptrdiff_t, PointerT, ReferenceT> {
1026public:
1027 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1028 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
;
1029 return index - rhs.index;
1030 }
1031 bool operator==(const indexed_accessor_iterator &rhs) const {
1032 return base == rhs.base && index == rhs.index;
1033 }
1034 bool operator<(const indexed_accessor_iterator &rhs) const {
1035 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return index < rhs.index;
1037 }
1038
1039 DerivedT &operator+=(ptrdiff_t offset) {
1040 this->index += offset;
1041 return static_cast<DerivedT &>(*this);
1042 }
1043 DerivedT &operator-=(ptrdiff_t offset) {
1044 this->index -= offset;
1045 return static_cast<DerivedT &>(*this);
1046 }
1047
1048 /// Returns the current index of the iterator.
1049 ptrdiff_t getIndex() const { return index; }
1050
1051 /// Returns the current base of the iterator.
1052 const BaseT &getBase() const { return base; }
1053
1054protected:
1055 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1056 : base(base), index(index) {}
1057 BaseT base;
1058 ptrdiff_t index;
1059};
1060
1061namespace detail {
1062/// The class represents the base of a range of indexed_accessor_iterators. It
1063/// provides support for many different range functionalities, e.g.
1064/// drop_front/slice/etc.. Derived range classes must implement the following
1065/// static methods:
1066/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1067/// - Dereference an iterator pointing to the base object at the given
1068/// index.
1069/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1070/// - Return a new base that is offset from the provide base by 'index'
1071/// elements.
1072template <typename DerivedT, typename BaseT, typename T,
1073 typename PointerT = T *, typename ReferenceT = T &>
1074class indexed_accessor_range_base {
1075public:
1076 using RangeBaseT =
1077 indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
1078
1079 /// An iterator element of this range.
1080 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1081 PointerT, ReferenceT> {
1082 public:
1083 // Index into this iterator, invoking a static method on the derived type.
1084 ReferenceT operator*() const {
1085 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1086 }
1087
1088 private:
1089 iterator(BaseT owner, ptrdiff_t curIndex)
1090 : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
1091 owner, curIndex) {}
1092
1093 /// Allow access to the constructor.
1094 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1095 ReferenceT>;
1096 };
1097
1098 indexed_accessor_range_base(iterator begin, iterator end)
1099 : base(offset_base(begin.getBase(), begin.getIndex())),
1100 count(end.getIndex() - begin.getIndex()) {}
1101 indexed_accessor_range_base(const iterator_range<iterator> &range)
1102 : indexed_accessor_range_base(range.begin(), range.end()) {}
1103 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1104 : base(base), count(count) {}
1105
1106 iterator begin() const { return iterator(base, 0); }
1107 iterator end() const { return iterator(base, count); }
1108 ReferenceT operator[](size_t Index) const {
1109 assert(Index < size() && "invalid index for value range")(static_cast <bool> (Index < size() && "invalid index for value range"
) ? void (0) : __assert_fail ("Index < size() && \"invalid index for value range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1109, __extension__ __PRETTY_FUNCTION__))
;
1110 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1111 }
1112 ReferenceT front() const {
1113 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 return (*this)[0];
1115 }
1116 ReferenceT back() const {
1117 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 return (*this)[size() - 1];
1119 }
1120
1121 /// Compare this range with another.
1122 template <typename OtherT> bool operator==(const OtherT &other) const {
1123 return size() ==
1124 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1125 std::equal(begin(), end(), other.begin());
1126 }
1127 template <typename OtherT> bool operator!=(const OtherT &other) const {
1128 return !(*this == other);
1129 }
1130
1131 /// Return the size of this range.
1132 size_t size() const { return count; }
1133
1134 /// Return if the range is empty.
1135 bool empty() const { return size() == 0; }
1136
1137 /// Drop the first N elements, and keep M elements.
1138 DerivedT slice(size_t n, size_t m) const {
1139 assert(n + m <= size() && "invalid size specifiers")(static_cast <bool> (n + m <= size() && "invalid size specifiers"
) ? void (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1139, __extension__ __PRETTY_FUNCTION__))
;
1140 return DerivedT(offset_base(base, n), m);
1141 }
1142
1143 /// Drop the first n elements.
1144 DerivedT drop_front(size_t n = 1) const {
1145 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1145, __extension__ __PRETTY_FUNCTION__))
;
1146 return slice(n, size() - n);
1147 }
1148 /// Drop the last n elements.
1149 DerivedT drop_back(size_t n = 1) const {
1150 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1150, __extension__ __PRETTY_FUNCTION__))
;
1151 return DerivedT(base, size() - n);
1152 }
1153
1154 /// Take the first n elements.
1155 DerivedT take_front(size_t n = 1) const {
1156 return n < size() ? drop_back(size() - n)
1157 : static_cast<const DerivedT &>(*this);
1158 }
1159
1160 /// Take the last n elements.
1161 DerivedT take_back(size_t n = 1) const {
1162 return n < size() ? drop_front(size() - n)
1163 : static_cast<const DerivedT &>(*this);
1164 }
1165
1166 /// Allow conversion to any type accepting an iterator_range.
1167 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1168 RangeT, iterator_range<iterator>>::value>>
1169 operator RangeT() const {
1170 return RangeT(iterator_range<iterator>(*this));
1171 }
1172
1173 /// Returns the base of this range.
1174 const BaseT &getBase() const { return base; }
1175
1176private:
1177 /// Offset the given base by the given amount.
1178 static BaseT offset_base(const BaseT &base, size_t n) {
1179 return n == 0 ? base : DerivedT::offset_base(base, n);
1180 }
1181
1182protected:
1183 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1184 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1185 indexed_accessor_range_base &
1186 operator=(const indexed_accessor_range_base &) = default;
1187
1188 /// The base that owns the provided range of values.
1189 BaseT base;
1190 /// The size from the owning range.
1191 ptrdiff_t count;
1192};
1193} // end namespace detail
1194
1195/// This class provides an implementation of a range of
1196/// indexed_accessor_iterators where the base is not indexable. Ranges with
1197/// bases that are offsetable should derive from indexed_accessor_range_base
1198/// instead. Derived range classes are expected to implement the following
1199/// static method:
1200/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1201/// - Dereference an iterator pointing to a parent base at the given index.
1202template <typename DerivedT, typename BaseT, typename T,
1203 typename PointerT = T *, typename ReferenceT = T &>
1204class indexed_accessor_range
1205 : public detail::indexed_accessor_range_base<
1206 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1207public:
1208 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1209 : detail::indexed_accessor_range_base<
1210 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1211 std::make_pair(base, startIndex), count) {}
1212 using detail::indexed_accessor_range_base<
1213 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1214 ReferenceT>::indexed_accessor_range_base;
1215
1216 /// Returns the current base of the range.
1217 const BaseT &getBase() const { return this->base.first; }
1218
1219 /// Returns the current start index of the range.
1220 ptrdiff_t getStartIndex() const { return this->base.second; }
1221
1222 /// See `detail::indexed_accessor_range_base` for details.
1223 static std::pair<BaseT, ptrdiff_t>
1224 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1225 // We encode the internal base as a pair of the derived base and a start
1226 // index into the derived base.
1227 return std::make_pair(base.first, base.second + index);
1228 }
1229 /// See `detail::indexed_accessor_range_base` for details.
1230 static ReferenceT
1231 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1232 ptrdiff_t index) {
1233 return DerivedT::dereference(base.first, base.second + index);
1234 }
1235};
1236
1237/// Given a container of pairs, return a range over the first elements.
1238template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1239 return llvm::map_range(
1240 std::forward<ContainerTy>(c),
1241 [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
1242 return elt.first;
1243 });
1244}
1245
1246/// Given a container of pairs, return a range over the second elements.
1247template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1248 return llvm::map_range(
1249 std::forward<ContainerTy>(c),
1250 [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
1251 return elt.second;
1252 });
1253}
1254
1255//===----------------------------------------------------------------------===//
1256// Extra additions to <utility>
1257//===----------------------------------------------------------------------===//
1258
1259/// Function object to check whether the first component of a std::pair
1260/// compares less than the first component of another std::pair.
1261struct less_first {
1262 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1263 return lhs.first < rhs.first;
1264 }
1265};
1266
1267/// Function object to check whether the second component of a std::pair
1268/// compares less than the second component of another std::pair.
1269struct less_second {
1270 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1271 return lhs.second < rhs.second;
1272 }
1273};
1274
1275/// \brief Function object to apply a binary function to the first component of
1276/// a std::pair.
1277template<typename FuncTy>
1278struct on_first {
1279 FuncTy func;
1280
1281 template <typename T>
1282 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1283 return func(lhs.first, rhs.first);
1284 }
1285};
1286
1287/// Utility type to build an inheritance chain that makes it easy to rank
1288/// overload candidates.
1289template <int N> struct rank : rank<N - 1> {};
1290template <> struct rank<0> {};
1291
1292/// traits class for checking whether type T is one of any of the given
1293/// types in the variadic list.
1294template <typename T, typename... Ts>
1295using is_one_of = disjunction<std::is_same<T, Ts>...>;
1296
1297/// traits class for checking whether type T is a base class for all
1298/// the given types in the variadic list.
1299template <typename T, typename... Ts>
1300using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
1301
1302//===----------------------------------------------------------------------===//
1303// Extra additions for arrays
1304//===----------------------------------------------------------------------===//
1305
1306// We have a copy here so that LLVM behaves the same when using different
1307// standard libraries.
1308template <class Iterator, class RNG>
1309void shuffle(Iterator first, Iterator last, RNG &&g) {
1310 // It would be better to use a std::uniform_int_distribution,
1311 // but that would be stdlib dependent.
1312 typedef
1313 typename std::iterator_traits<Iterator>::difference_type difference_type;
1314 for (auto size = last - first; size > 1; ++first, (void)--size) {
1315 difference_type offset = g() % size;
1316 // Avoid self-assignment due to incorrect assertions in libstdc++
1317 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1318 if (offset != difference_type(0))
1319 std::iter_swap(first, first + offset);
1320 }
1321}
1322
1323/// Find the length of an array.
1324template <class T, std::size_t N>
1325constexpr inline size_t array_lengthof(T (&)[N]) {
1326 return N;
1327}
1328
1329/// Adapt std::less<T> for array_pod_sort.
1330template<typename T>
1331inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1332 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1333 *reinterpret_cast<const T*>(P2)))
1334 return -1;
1335 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1336 *reinterpret_cast<const T*>(P1)))
1337 return 1;
1338 return 0;
1339}
1340
1341/// get_array_pod_sort_comparator - This is an internal helper function used to
1342/// get type deduction of T right.
1343template<typename T>
1344inline int (*get_array_pod_sort_comparator(const T &))
1345 (const void*, const void*) {
1346 return array_pod_sort_comparator<T>;
1347}
1348
1349#ifdef EXPENSIVE_CHECKS
1350namespace detail {
1351
1352inline unsigned presortShuffleEntropy() {
1353 static unsigned Result(std::random_device{}());
1354 return Result;
1355}
1356
1357template <class IteratorTy>
1358inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1359 std::mt19937 Generator(presortShuffleEntropy());
1360 llvm::shuffle(Start, End, Generator);
1361}
1362
1363} // end namespace detail
1364#endif
1365
1366/// array_pod_sort - This sorts an array with the specified start and end
1367/// extent. This is just like std::sort, except that it calls qsort instead of
1368/// using an inlined template. qsort is slightly slower than std::sort, but
1369/// most sorts are not performance critical in LLVM and std::sort has to be
1370/// template instantiated for each type, leading to significant measured code
1371/// bloat. This function should generally be used instead of std::sort where
1372/// possible.
1373///
1374/// This function assumes that you have simple POD-like types that can be
1375/// compared with std::less and can be moved with memcpy. If this isn't true,
1376/// you should use std::sort.
1377///
1378/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1379/// default to std::less.
1380template<class IteratorTy>
1381inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1382 // Don't inefficiently call qsort with one element or trigger undefined
1383 // behavior with an empty sequence.
1384 auto NElts = End - Start;
1385 if (NElts <= 1) return;
1386#ifdef EXPENSIVE_CHECKS
1387 detail::presortShuffle<IteratorTy>(Start, End);
1388#endif
1389 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));