Bug Summary

File:lib/Analysis/MemorySSA.cpp
Warning:line 1650, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MemorySSA.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn325874/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn325874/build-llvm/lib/Analysis -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-23-163436-368-1 -x c++ /build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp

/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp

1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the MemorySSA class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/MemorySSA.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DenseMapInfo.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/DepthFirstIterator.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/Optional.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/IteratedDominanceFrontier.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallSite.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/FormattedStream.h"
50#include "llvm/Support/raw_ostream.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <memory>
55#include <utility>
56
57using namespace llvm;
58
59#define DEBUG_TYPE"memoryssa" "memoryssa"
60
61INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
62 true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
63INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
64INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
65INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
66 true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
67
68INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
69 "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
70INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
71INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
72 "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
73
74static cl::opt<unsigned> MaxCheckLimit(
75 "memssa-check-limit", cl::Hidden, cl::init(100),
76 cl::desc("The maximum number of stores/phis MemorySSA"
77 "will consider trying to walk past (default = 100)"));
78
79static cl::opt<bool>
80 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81 cl::desc("Verify MemorySSA in legacy printer pass."));
82
83namespace llvm {
84
85/// \brief An assembly annotator class to print Memory SSA information in
86/// comments.
87class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
88 friend class MemorySSA;
89
90 const MemorySSA *MSSA;
91
92public:
93 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94
95 void emitBasicBlockStartAnnot(const BasicBlock *BB,
96 formatted_raw_ostream &OS) override {
97 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98 OS << "; " << *MA << "\n";
99 }
100
101 void emitInstructionAnnot(const Instruction *I,
102 formatted_raw_ostream &OS) override {
103 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104 OS << "; " << *MA << "\n";
105 }
106};
107
108} // end namespace llvm
109
110namespace {
111
112/// Our current alias analysis API differentiates heavily between calls and
113/// non-calls, and functions called on one usually assert on the other.
114/// This class encapsulates the distinction to simplify other code that wants
115/// "Memory affecting instructions and related data" to use as a key.
116/// For example, this class is used as a densemap key in the use optimizer.
117class MemoryLocOrCall {
118public:
119 bool IsCall = false;
120
121 MemoryLocOrCall() = default;
122 MemoryLocOrCall(MemoryUseOrDef *MUD)
123 : MemoryLocOrCall(MUD->getMemoryInst()) {}
124 MemoryLocOrCall(const MemoryUseOrDef *MUD)
125 : MemoryLocOrCall(MUD->getMemoryInst()) {}
126
127 MemoryLocOrCall(Instruction *Inst) {
128 if (ImmutableCallSite(Inst)) {
129 IsCall = true;
130 CS = ImmutableCallSite(Inst);
131 } else {
132 IsCall = false;
133 // There is no such thing as a memorylocation for a fence inst, and it is
134 // unique in that regard.
135 if (!isa<FenceInst>(Inst))
136 Loc = MemoryLocation::get(Inst);
137 }
138 }
139
140 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141
142 ImmutableCallSite getCS() const {
143 assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail
("IsCall", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144 return CS;
145 }
146
147 MemoryLocation getLoc() const {
148 assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail
("!IsCall", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 148, __extension__ __PRETTY_FUNCTION__))
;
149 return Loc;
150 }
151
152 bool operator==(const MemoryLocOrCall &Other) const {
153 if (IsCall != Other.IsCall)
154 return false;
155
156 if (IsCall)
157 return CS.getCalledValue() == Other.CS.getCalledValue();
158 return Loc == Other.Loc;
159 }
160
161private:
162 union {
163 ImmutableCallSite CS;
164 MemoryLocation Loc;
165 };
166};
167
168} // end anonymous namespace
169
170namespace llvm {
171
172template <> struct DenseMapInfo<MemoryLocOrCall> {
173 static inline MemoryLocOrCall getEmptyKey() {
174 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
175 }
176
177 static inline MemoryLocOrCall getTombstoneKey() {
178 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
179 }
180
181 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
182 if (MLOC.IsCall)
183 return hash_combine(MLOC.IsCall,
184 DenseMapInfo<const Value *>::getHashValue(
185 MLOC.getCS().getCalledValue()));
186 return hash_combine(
187 MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
188 }
189
190 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
191 return LHS == RHS;
192 }
193};
194
195} // end namespace llvm
196
197/// This does one-way checks to see if Use could theoretically be hoisted above
198/// MayClobber. This will not check the other way around.
199///
200/// This assumes that, for the purposes of MemorySSA, Use comes directly after
201/// MayClobber, with no potentially clobbering operations in between them.
202/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
203static bool areLoadsReorderable(const LoadInst *Use,
204 const LoadInst *MayClobber) {
205 bool VolatileUse = Use->isVolatile();
206 bool VolatileClobber = MayClobber->isVolatile();
207 // Volatile operations may never be reordered with other volatile operations.
208 if (VolatileUse && VolatileClobber)
209 return false;
210 // Otherwise, volatile doesn't matter here. From the language reference:
211 // 'optimizers may change the order of volatile operations relative to
212 // non-volatile operations.'"
213
214 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
215 // is weaker, it can be moved above other loads. We just need to be sure that
216 // MayClobber isn't an acquire load, because loads can't be moved above
217 // acquire loads.
218 //
219 // Note that this explicitly *does* allow the free reordering of monotonic (or
220 // weaker) loads of the same address.
221 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
222 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
223 AtomicOrdering::Acquire);
224 return !(SeqCstUse || MayClobberIsAcquire);
225}
226
227static bool instructionClobbersQuery(MemoryDef *MD,
228 const MemoryLocation &UseLoc,
229 const Instruction *UseInst,
230 AliasAnalysis &AA) {
231 Instruction *DefInst = MD->getMemoryInst();
232 assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction"
) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 232, __extension__ __PRETTY_FUNCTION__))
;
233 ImmutableCallSite UseCS(UseInst);
234
235 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
236 // These intrinsics will show up as affecting memory, but they are just
237 // markers.
238 switch (II->getIntrinsicID()) {
239 case Intrinsic::lifetime_start:
240 if (UseCS)
241 return false;
242 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
243 case Intrinsic::lifetime_end:
244 case Intrinsic::invariant_start:
245 case Intrinsic::invariant_end:
246 case Intrinsic::assume:
247 return false;
248 default:
249 break;
250 }
251 }
252
253 if (UseCS) {
254 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
255 return isModOrRefSet(I);
256 }
257
258 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
259 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
260 return !areLoadsReorderable(UseLoad, DefLoad);
261
262 return isModSet(AA.getModRefInfo(DefInst, UseLoc));
263}
264
265static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
266 const MemoryLocOrCall &UseMLOC,
267 AliasAnalysis &AA) {
268 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
269 // to exist while MemoryLocOrCall is pushed through places.
270 if (UseMLOC.IsCall)
271 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
272 AA);
273 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
274 AA);
275}
276
277// Return true when MD may alias MU, return false otherwise.
278bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
279 AliasAnalysis &AA) {
280 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
281}
282
283namespace {
284
285struct UpwardsMemoryQuery {
286 // True if our original query started off as a call
287 bool IsCall = false;
288 // The pointer location we started the query with. This will be empty if
289 // IsCall is true.
290 MemoryLocation StartingLoc;
291 // This is the instruction we were querying about.
292 const Instruction *Inst = nullptr;
293 // The MemoryAccess we actually got called with, used to test local domination
294 const MemoryAccess *OriginalAccess = nullptr;
295
296 UpwardsMemoryQuery() = default;
297
298 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
299 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
300 if (!IsCall)
301 StartingLoc = MemoryLocation::get(Inst);
302 }
303};
304
305} // end anonymous namespace
306
307static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
308 AliasAnalysis &AA) {
309 Instruction *Inst = MD->getMemoryInst();
310 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
311 switch (II->getIntrinsicID()) {
312 case Intrinsic::lifetime_end:
313 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
314 default:
315 return false;
316 }
317 }
318 return false;
319}
320
321static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
322 const Instruction *I) {
323 // If the memory can't be changed, then loads of the memory can't be
324 // clobbered.
325 //
326 // FIXME: We should handle invariant groups, as well. It's a bit harder,
327 // because we need to pay close attention to invariant group barriers.
328 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
329 AA.pointsToConstantMemory(cast<LoadInst>(I)->
330 getPointerOperand()));
331}
332
333/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
334/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
335///
336/// This is meant to be as simple and self-contained as possible. Because it
337/// uses no cache, etc., it can be relatively expensive.
338///
339/// \param Start The MemoryAccess that we want to walk from.
340/// \param ClobberAt A clobber for Start.
341/// \param StartLoc The MemoryLocation for Start.
342/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
343/// \param Query The UpwardsMemoryQuery we used for our search.
344/// \param AA The AliasAnalysis we used for our search.
345static void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__))
346checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
347 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
348 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
349 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?") ? void (0) : __assert_fail
("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 349, __extension__ __PRETTY_FUNCTION__))
;
350
351 if (MSSA.isLiveOnEntryDef(Start)) {
352 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 353, __extension__ __PRETTY_FUNCTION__))
353 "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 353, __extension__ __PRETTY_FUNCTION__))
;
354 return;
355 }
356
357 bool FoundClobber = false;
358 DenseSet<MemoryAccessPair> VisitedPhis;
359 SmallVector<MemoryAccessPair, 8> Worklist;
360 Worklist.emplace_back(Start, StartLoc);
361 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
362 // is found, complain.
363 while (!Worklist.empty()) {
364 MemoryAccessPair MAP = Worklist.pop_back_val();
365 // All we care about is that nothing from Start to ClobberAt clobbers Start.
366 // We learn nothing from revisiting nodes.
367 if (!VisitedPhis.insert(MAP).second)
368 continue;
369
370 for (MemoryAccess *MA : def_chain(MAP.first)) {
371 if (MA == ClobberAt) {
372 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
373 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
374 // since it won't let us short-circuit.
375 //
376 // Also, note that this can't be hoisted out of the `Worklist` loop,
377 // since MD may only act as a clobber for 1 of N MemoryLocations.
378 FoundClobber =
379 FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
380 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
381 }
382 break;
383 }
384
385 // We should never hit liveOnEntry, unless it's the clobber.
386 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) &&
"Hit liveOnEntry before clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387
388 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
389 (void)MD;
390 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 391, __extension__ __PRETTY_FUNCTION__))
391 "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 391, __extension__ __PRETTY_FUNCTION__))
;
392 continue;
393 }
394
395 assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void (
0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 395, __extension__ __PRETTY_FUNCTION__))
;
396 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
397 }
398 }
399
400 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
401 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
402 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 403, __extension__ __PRETTY_FUNCTION__))
403 "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 403, __extension__ __PRETTY_FUNCTION__))
;
404}
405
406namespace {
407
408/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
409/// in one class.
410class ClobberWalker {
411 /// Save a few bytes by using unsigned instead of size_t.
412 using ListIndex = unsigned;
413
414 /// Represents a span of contiguous MemoryDefs, potentially ending in a
415 /// MemoryPhi.
416 struct DefPath {
417 MemoryLocation Loc;
418 // Note that, because we always walk in reverse, Last will always dominate
419 // First. Also note that First and Last are inclusive.
420 MemoryAccess *First;
421 MemoryAccess *Last;
422 Optional<ListIndex> Previous;
423
424 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
425 Optional<ListIndex> Previous)
426 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
427
428 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
429 Optional<ListIndex> Previous)
430 : DefPath(Loc, Init, Init, Previous) {}
431 };
432
433 const MemorySSA &MSSA;
434 AliasAnalysis &AA;
435 DominatorTree &DT;
436 UpwardsMemoryQuery *Query;
437
438 // Phi optimization bookkeeping
439 SmallVector<DefPath, 32> Paths;
440 DenseSet<ConstMemoryAccessPair> VisitedPhis;
441
442 /// Find the nearest def or phi that `From` can legally be optimized to.
443 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
444 assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() &&
"Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
;
445
446 BasicBlock *BB = From->getBlock();
447 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
448 DomTreeNode *Node = DT.getNode(BB);
449 while ((Node = Node->getIDom())) {
450 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
451 if (Defs)
452 return &*Defs->rbegin();
453 }
454 return Result;
455 }
456
457 /// Result of calling walkToPhiOrClobber.
458 struct UpwardsWalkResult {
459 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
460 /// both.
461 MemoryAccess *Result;
462 bool IsKnownClobber;
463 };
464
465 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
466 /// This will update Desc.Last as it walks. It will (optionally) also stop at
467 /// StopAt.
468 ///
469 /// This does not test for whether StopAt is a clobber
470 UpwardsWalkResult
471 walkToPhiOrClobber(DefPath &Desc,
472 const MemoryAccess *StopAt = nullptr) const {
473 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) &&
"Uses don't exist in my world") ? void (0) : __assert_fail (
"!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 473, __extension__ __PRETTY_FUNCTION__))
;
474
475 for (MemoryAccess *Current : def_chain(Desc.Last)) {
476 Desc.Last = Current;
477 if (Current == StopAt)
478 return {Current, false};
479
480 if (auto *MD = dyn_cast<MemoryDef>(Current))
481 if (MSSA.isLiveOnEntryDef(MD) ||
482 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
483 return {MD, true};
484 }
485
486 assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 487, __extension__ __PRETTY_FUNCTION__))
487 "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 487, __extension__ __PRETTY_FUNCTION__))
;
488 return {Desc.Last, false};
489 }
490
491 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
492 ListIndex PriorNode) {
493 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
494 upward_defs_end());
495 for (const MemoryAccessPair &P : UpwardDefs) {
496 PausedSearches.push_back(Paths.size());
497 Paths.emplace_back(P.second, P.first, PriorNode);
498 }
499 }
500
501 /// Represents a search that terminated after finding a clobber. This clobber
502 /// may or may not be present in the path of defs from LastNode..SearchStart,
503 /// since it may have been retrieved from cache.
504 struct TerminatedPath {
505 MemoryAccess *Clobber;
506 ListIndex LastNode;
507 };
508
509 /// Get an access that keeps us from optimizing to the given phi.
510 ///
511 /// PausedSearches is an array of indices into the Paths array. Its incoming
512 /// value is the indices of searches that stopped at the last phi optimization
513 /// target. It's left in an unspecified state.
514 ///
515 /// If this returns None, NewPaused is a vector of searches that terminated
516 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
517 Optional<TerminatedPath>
518 getBlockingAccess(const MemoryAccess *StopWhere,
519 SmallVectorImpl<ListIndex> &PausedSearches,
520 SmallVectorImpl<ListIndex> &NewPaused,
521 SmallVectorImpl<TerminatedPath> &Terminated) {
522 assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() &&
"No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 522, __extension__ __PRETTY_FUNCTION__))
;
523
524 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
525 // PausedSearches as our stack.
526 while (!PausedSearches.empty()) {
527 ListIndex PathIndex = PausedSearches.pop_back_val();
528 DefPath &Node = Paths[PathIndex];
529
530 // If we've already visited this path with this MemoryLocation, we don't
531 // need to do so again.
532 //
533 // NOTE: That we just drop these paths on the ground makes caching
534 // behavior sporadic. e.g. given a diamond:
535 // A
536 // B C
537 // D
538 //
539 // ...If we walk D, B, A, C, we'll only cache the result of phi
540 // optimization for A, B, and D; C will be skipped because it dies here.
541 // This arguably isn't the worst thing ever, since:
542 // - We generally query things in a top-down order, so if we got below D
543 // without needing cache entries for {C, MemLoc}, then chances are
544 // that those cache entries would end up ultimately unused.
545 // - We still cache things for A, so C only needs to walk up a bit.
546 // If this behavior becomes problematic, we can fix without a ton of extra
547 // work.
548 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
549 continue;
550
551 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
552 if (Res.IsKnownClobber) {
553 assert(Res.Result != StopWhere)(static_cast <bool> (Res.Result != StopWhere) ? void (0
) : __assert_fail ("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 553, __extension__ __PRETTY_FUNCTION__))
;
554 // If this wasn't a cache hit, we hit a clobber when walking. That's a
555 // failure.
556 TerminatedPath Term{Res.Result, PathIndex};
557 if (!MSSA.dominates(Res.Result, StopWhere))
558 return Term;
559
560 // Otherwise, it's a valid thing to potentially optimize to.
561 Terminated.push_back(Term);
562 continue;
563 }
564
565 if (Res.Result == StopWhere) {
566 // We've hit our target. Save this path off for if we want to continue
567 // walking.
568 NewPaused.push_back(PathIndex);
569 continue;
570 }
571
572 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result)
&& "liveOnEntry is a clobber") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 572, __extension__ __PRETTY_FUNCTION__))
;
573 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
574 }
575
576 return None;
577 }
578
579 template <typename T, typename Walker>
580 struct generic_def_path_iterator
581 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
582 std::forward_iterator_tag, T *> {
583 generic_def_path_iterator() = default;
584 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
585
586 T &operator*() const { return curNode(); }
587
588 generic_def_path_iterator &operator++() {
589 N = curNode().Previous;
590 return *this;
591 }
592
593 bool operator==(const generic_def_path_iterator &O) const {
594 if (N.hasValue() != O.N.hasValue())
595 return false;
596 return !N.hasValue() || *N == *O.N;
597 }
598
599 private:
600 T &curNode() const { return W->Paths[*N]; }
601
602 Walker *W = nullptr;
603 Optional<ListIndex> N = None;
604 };
605
606 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
607 using const_def_path_iterator =
608 generic_def_path_iterator<const DefPath, const ClobberWalker>;
609
610 iterator_range<def_path_iterator> def_path(ListIndex From) {
611 return make_range(def_path_iterator(this, From), def_path_iterator());
612 }
613
614 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
615 return make_range(const_def_path_iterator(this, From),
616 const_def_path_iterator());
617 }
618
619 struct OptznResult {
620 /// The path that contains our result.
621 TerminatedPath PrimaryClobber;
622 /// The paths that we can legally cache back from, but that aren't
623 /// necessarily the result of the Phi optimization.
624 SmallVector<TerminatedPath, 4> OtherClobbers;
625 };
626
627 ListIndex defPathIndex(const DefPath &N) const {
628 // The assert looks nicer if we don't need to do &N
629 const DefPath *NP = &N;
630 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 631, __extension__ __PRETTY_FUNCTION__))
631 "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 631, __extension__ __PRETTY_FUNCTION__))
;
632 return NP - &Paths.front();
633 }
634
635 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
636 /// that act as legal clobbers. Note that this won't return *all* clobbers.
637 ///
638 /// Phi optimization algorithm tl;dr:
639 /// - Find the earliest def/phi, A, we can optimize to
640 /// - Find if all paths from the starting memory access ultimately reach A
641 /// - If not, optimization isn't possible.
642 /// - Otherwise, walk from A to another clobber or phi, A'.
643 /// - If A' is a def, we're done.
644 /// - If A' is a phi, try to optimize it.
645 ///
646 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
647 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
648 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
649 const MemoryLocation &Loc) {
650 assert(Paths.empty() && VisitedPhis.empty() &&(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 651, __extension__ __PRETTY_FUNCTION__))
651 "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 651, __extension__ __PRETTY_FUNCTION__))
;
652
653 Paths.emplace_back(Loc, Start, Phi, None);
654 // Stores how many "valid" optimization nodes we had prior to calling
655 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
656 auto PriorPathsSize = Paths.size();
657
658 SmallVector<ListIndex, 16> PausedSearches;
659 SmallVector<ListIndex, 8> NewPaused;
660 SmallVector<TerminatedPath, 4> TerminatedPaths;
661
662 addSearches(Phi, PausedSearches, 0);
663
664 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
665 // Paths.
666 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
667 assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move"
) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 auto Dom = Paths.begin();
669 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
670 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
671 Dom = I;
672 auto Last = Paths.end() - 1;
673 if (Last != Dom)
674 std::iter_swap(Last, Dom);
675 };
676
677 MemoryPhi *Current = Phi;
678 while (true) {
679 assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
680 "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
;
681
682 const auto *Target = getWalkTarget(Current);
683 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
684 // optimization for the prior phi.
685 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 687, __extension__ __PRETTY_FUNCTION__))
686 return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 687, __extension__ __PRETTY_FUNCTION__))
687 }))(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 687, __extension__ __PRETTY_FUNCTION__))
;
688
689 // FIXME: This is broken, because the Blocker may be reported to be
690 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
691 // For the moment, this is fine, since we do nothing with blocker info.
692 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
693 Target, PausedSearches, NewPaused, TerminatedPaths)) {
694
695 // Find the node we started at. We can't search based on N->Last, since
696 // we may have gone around a loop with a different MemoryLocation.
697 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
698 return defPathIndex(N) < PriorPathsSize;
699 });
700 assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void
(0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 700, __extension__ __PRETTY_FUNCTION__))
;
701
702 DefPath &CurNode = *Iter;
703 assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0
) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 703, __extension__ __PRETTY_FUNCTION__))
;
704
705 // Two things:
706 // A. We can't reliably cache all of NewPaused back. Consider a case
707 // where we have two paths in NewPaused; one of which can't optimize
708 // above this phi, whereas the other can. If we cache the second path
709 // back, we'll end up with suboptimal cache entries. We can handle
710 // cases like this a bit better when we either try to find all
711 // clobbers that block phi optimization, or when our cache starts
712 // supporting unfinished searches.
713 // B. We can't reliably cache TerminatedPaths back here without doing
714 // extra checks; consider a case like:
715 // T
716 // / \
717 // D C
718 // \ /
719 // S
720 // Where T is our target, C is a node with a clobber on it, D is a
721 // diamond (with a clobber *only* on the left or right node, N), and
722 // S is our start. Say we walk to D, through the node opposite N
723 // (read: ignoring the clobber), and see a cache entry in the top
724 // node of D. That cache entry gets put into TerminatedPaths. We then
725 // walk up to C (N is later in our worklist), find the clobber, and
726 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
727 // the bottom part of D to the cached clobber, ignoring the clobber
728 // in N. Again, this problem goes away if we start tracking all
729 // blockers for a given phi optimization.
730 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
731 return {Result, {}};
732 }
733
734 // If there's nothing left to search, then all paths led to valid clobbers
735 // that we got from our cache; pick the nearest to the start, and allow
736 // the rest to be cached back.
737 if (NewPaused.empty()) {
738 MoveDominatedPathToEnd(TerminatedPaths);
739 TerminatedPath Result = TerminatedPaths.pop_back_val();
740 return {Result, std::move(TerminatedPaths)};
741 }
742
743 MemoryAccess *DefChainEnd = nullptr;
744 SmallVector<TerminatedPath, 4> Clobbers;
745 for (ListIndex Paused : NewPaused) {
746 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
747 if (WR.IsKnownClobber)
748 Clobbers.push_back({WR.Result, Paused});
749 else
750 // Micro-opt: If we hit the end of the chain, save it.
751 DefChainEnd = WR.Result;
752 }
753
754 if (!TerminatedPaths.empty()) {
755 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
756 // do it now.
757 if (!DefChainEnd)
758 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
759 DefChainEnd = MA;
760
761 // If any of the terminated paths don't dominate the phi we'll try to
762 // optimize, we need to figure out what they are and quit.
763 const BasicBlock *ChainBB = DefChainEnd->getBlock();
764 for (const TerminatedPath &TP : TerminatedPaths) {
765 // Because we know that DefChainEnd is as "high" as we can go, we
766 // don't need local dominance checks; BB dominance is sufficient.
767 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
768 Clobbers.push_back(TP);
769 }
770 }
771
772 // If we have clobbers in the def chain, find the one closest to Current
773 // and quit.
774 if (!Clobbers.empty()) {
775 MoveDominatedPathToEnd(Clobbers);
776 TerminatedPath Result = Clobbers.pop_back_val();
777 return {Result, std::move(Clobbers)};
778 }
779
780 assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 781, __extension__ __PRETTY_FUNCTION__))
781 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 781, __extension__ __PRETTY_FUNCTION__))
;
782
783 // Because liveOnEntry is a clobber, this must be a phi.
784 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
785
786 PriorPathsSize = Paths.size();
787 PausedSearches.clear();
788 for (ListIndex I : NewPaused)
789 addSearches(DefChainPhi, PausedSearches, I);
790 NewPaused.clear();
791
792 Current = DefChainPhi;
793 }
794 }
795
796 void verifyOptResult(const OptznResult &R) const {
797 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 799, __extension__ __PRETTY_FUNCTION__))
798 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 799, __extension__ __PRETTY_FUNCTION__))
799 }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 799, __extension__ __PRETTY_FUNCTION__))
;
800 }
801
802 void resetPhiOptznState() {
803 Paths.clear();
804 VisitedPhis.clear();
805 }
806
807public:
808 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
809 : MSSA(MSSA), AA(AA), DT(DT) {}
810
811 void reset() {}
812
813 /// Finds the nearest clobber for the given query, optimizing phis if
814 /// possible.
815 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
816 Query = &Q;
817
818 MemoryAccess *Current = Start;
819 // This walker pretends uses don't exist. If we're handed one, silently grab
820 // its def. (This has the nice side-effect of ensuring we never cache uses)
821 if (auto *MU = dyn_cast<MemoryUse>(Start))
822 Current = MU->getDefiningAccess();
823
824 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
825 // Fast path for the overly-common case (no crazy phi optimization
826 // necessary)
827 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
828 MemoryAccess *Result;
829 if (WalkResult.IsKnownClobber) {
830 Result = WalkResult.Result;
831 } else {
832 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
833 Current, Q.StartingLoc);
834 verifyOptResult(OptRes);
835 resetPhiOptznState();
836 Result = OptRes.PrimaryClobber.Clobber;
837 }
838
839#ifdef EXPENSIVE_CHECKS
840 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
841#endif
842 return Result;
843 }
844
845 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)(static_cast <bool> (MSSA == &this->MSSA) ? void
(0) : __assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 845, __extension__ __PRETTY_FUNCTION__))
; }
846};
847
848struct RenamePassData {
849 DomTreeNode *DTN;
850 DomTreeNode::const_iterator ChildIt;
851 MemoryAccess *IncomingVal;
852
853 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
854 MemoryAccess *M)
855 : DTN(D), ChildIt(It), IncomingVal(M) {}
856
857 void swap(RenamePassData &RHS) {
858 std::swap(DTN, RHS.DTN);
859 std::swap(ChildIt, RHS.ChildIt);
860 std::swap(IncomingVal, RHS.IncomingVal);
861 }
862};
863
864} // end anonymous namespace
865
866namespace llvm {
867
868/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
869/// longer does caching on its own,
870/// but the name has been retained for the moment.
871class MemorySSA::CachingWalker final : public MemorySSAWalker {
872 ClobberWalker Walker;
873 bool AutoResetWalker = true;
874
875 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
876
877public:
878 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
879 ~CachingWalker() override = default;
880
881 using MemorySSAWalker::getClobberingMemoryAccess;
882
883 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
884 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
885 const MemoryLocation &) override;
886 void invalidateInfo(MemoryAccess *) override;
887
888 /// Whether we call resetClobberWalker() after each time we *actually* walk to
889 /// answer a clobber query.
890 void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
891
892 /// Drop the walker's persistent data structures.
893 void resetClobberWalker() { Walker.reset(); }
894
895 void verify(const MemorySSA *MSSA) override {
896 MemorySSAWalker::verify(MSSA);
897 Walker.verify(MSSA);
898 }
899};
900
901} // end namespace llvm
902
903void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
904 bool RenameAllUses) {
905 // Pass through values to our successors
906 for (const BasicBlock *S : successors(BB)) {
907 auto It = PerBlockAccesses.find(S);
908 // Rename the phi nodes in our successor block
909 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
910 continue;
911 AccessList *Accesses = It->second.get();
912 auto *Phi = cast<MemoryPhi>(&Accesses->front());
913 if (RenameAllUses) {
914 int PhiIndex = Phi->getBasicBlockIndex(BB);
915 assert(PhiIndex != -1 && "Incomplete phi during partial rename")(static_cast <bool> (PhiIndex != -1 && "Incomplete phi during partial rename"
) ? void (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 915, __extension__ __PRETTY_FUNCTION__))
;
916 Phi->setIncomingValue(PhiIndex, IncomingVal);
917 } else
918 Phi->addIncoming(IncomingVal, BB);
919 }
920}
921
922/// \brief Rename a single basic block into MemorySSA form.
923/// Uses the standard SSA renaming algorithm.
924/// \returns The new incoming value.
925MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
926 bool RenameAllUses) {
927 auto It = PerBlockAccesses.find(BB);
928 // Skip most processing if the list is empty.
929 if (It != PerBlockAccesses.end()) {
930 AccessList *Accesses = It->second.get();
931 for (MemoryAccess &L : *Accesses) {
932 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
933 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
934 MUD->setDefiningAccess(IncomingVal);
935 if (isa<MemoryDef>(&L))
936 IncomingVal = &L;
937 } else {
938 IncomingVal = &L;
939 }
940 }
941 }
942 return IncomingVal;
943}
944
945/// \brief This is the standard SSA renaming algorithm.
946///
947/// We walk the dominator tree in preorder, renaming accesses, and then filling
948/// in phi nodes in our successors.
949void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
950 SmallPtrSetImpl<BasicBlock *> &Visited,
951 bool SkipVisited, bool RenameAllUses) {
952 SmallVector<RenamePassData, 32> WorkStack;
953 // Skip everything if we already renamed this block and we are skipping.
954 // Note: You can't sink this into the if, because we need it to occur
955 // regardless of whether we skip blocks or not.
956 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
957 if (SkipVisited && AlreadyVisited)
958 return;
959
960 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
961 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
962 WorkStack.push_back({Root, Root->begin(), IncomingVal});
963
964 while (!WorkStack.empty()) {
965 DomTreeNode *Node = WorkStack.back().DTN;
966 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
967 IncomingVal = WorkStack.back().IncomingVal;
968
969 if (ChildIt == Node->end()) {
970 WorkStack.pop_back();
971 } else {
972 DomTreeNode *Child = *ChildIt;
973 ++WorkStack.back().ChildIt;
974 BasicBlock *BB = Child->getBlock();
975 // Note: You can't sink this into the if, because we need it to occur
976 // regardless of whether we skip blocks or not.
977 AlreadyVisited = !Visited.insert(BB).second;
978 if (SkipVisited && AlreadyVisited) {
979 // We already visited this during our renaming, which can happen when
980 // being asked to rename multiple blocks. Figure out the incoming val,
981 // which is the last def.
982 // Incoming value can only change if there is a block def, and in that
983 // case, it's the last block def in the list.
984 if (auto *BlockDefs = getWritableBlockDefs(BB))
985 IncomingVal = &*BlockDefs->rbegin();
986 } else
987 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
988 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
989 WorkStack.push_back({Child, Child->begin(), IncomingVal});
990 }
991 }
992}
993
994/// \brief This handles unreachable block accesses by deleting phi nodes in
995/// unreachable blocks, and marking all other unreachable MemoryAccess's as
996/// being uses of the live on entry definition.
997void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
998 assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 999, __extension__ __PRETTY_FUNCTION__))
999 "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 999, __extension__ __PRETTY_FUNCTION__))
;
1000
1001 // Make sure phi nodes in our reachable successors end up with a
1002 // LiveOnEntryDef for our incoming edge, even though our block is forward
1003 // unreachable. We could just disconnect these blocks from the CFG fully,
1004 // but we do not right now.
1005 for (const BasicBlock *S : successors(BB)) {
1006 if (!DT->isReachableFromEntry(S))
1007 continue;
1008 auto It = PerBlockAccesses.find(S);
1009 // Rename the phi nodes in our successor block
1010 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1011 continue;
1012 AccessList *Accesses = It->second.get();
1013 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1014 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1015 }
1016
1017 auto It = PerBlockAccesses.find(BB);
1018 if (It == PerBlockAccesses.end())
1019 return;
1020
1021 auto &Accesses = It->second;
1022 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1023 auto Next = std::next(AI);
1024 // If we have a phi, just remove it. We are going to replace all
1025 // users with live on entry.
1026 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1027 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1028 else
1029 Accesses->erase(AI);
1030 AI = Next;
1031 }
1032}
1033
1034MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1035 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1036 NextID(INVALID_MEMORYACCESS_ID) {
1037 buildMemorySSA();
1038}
1039
1040MemorySSA::~MemorySSA() {
1041 // Drop all our references
1042 for (const auto &Pair : PerBlockAccesses)
1043 for (MemoryAccess &MA : *Pair.second)
1044 MA.dropAllReferences();
1045}
1046
1047MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1048 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1049
1050 if (Res.second)
1051 Res.first->second = llvm::make_unique<AccessList>();
1052 return Res.first->second.get();
1053}
1054
1055MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1056 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1057
1058 if (Res.second)
1059 Res.first->second = llvm::make_unique<DefsList>();
1060 return Res.first->second.get();
1061}
1062
1063namespace llvm {
1064
1065/// This class is a batch walker of all MemoryUse's in the program, and points
1066/// their defining access at the thing that actually clobbers them. Because it
1067/// is a batch walker that touches everything, it does not operate like the
1068/// other walkers. This walker is basically performing a top-down SSA renaming
1069/// pass, where the version stack is used as the cache. This enables it to be
1070/// significantly more time and memory efficient than using the regular walker,
1071/// which is walking bottom-up.
1072class MemorySSA::OptimizeUses {
1073public:
1074 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1075 DominatorTree *DT)
1076 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1077 Walker = MSSA->getWalker();
1078 }
1079
1080 void optimizeUses();
1081
1082private:
1083 /// This represents where a given memorylocation is in the stack.
1084 struct MemlocStackInfo {
1085 // This essentially is keeping track of versions of the stack. Whenever
1086 // the stack changes due to pushes or pops, these versions increase.
1087 unsigned long StackEpoch;
1088 unsigned long PopEpoch;
1089 // This is the lower bound of places on the stack to check. It is equal to
1090 // the place the last stack walk ended.
1091 // Note: Correctness depends on this being initialized to 0, which densemap
1092 // does
1093 unsigned long LowerBound;
1094 const BasicBlock *LowerBoundBlock;
1095 // This is where the last walk for this memory location ended.
1096 unsigned long LastKill;
1097 bool LastKillValid;
1098 };
1099
1100 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1101 SmallVectorImpl<MemoryAccess *> &,
1102 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1103
1104 MemorySSA *MSSA;
1105 MemorySSAWalker *Walker;
1106 AliasAnalysis *AA;
1107 DominatorTree *DT;
1108};
1109
1110} // end namespace llvm
1111
1112/// Optimize the uses in a given block This is basically the SSA renaming
1113/// algorithm, with one caveat: We are able to use a single stack for all
1114/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1115/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1116/// going to be some position in that stack of possible ones.
1117///
1118/// We track the stack positions that each MemoryLocation needs
1119/// to check, and last ended at. This is because we only want to check the
1120/// things that changed since last time. The same MemoryLocation should
1121/// get clobbered by the same store (getModRefInfo does not use invariantness or
1122/// things like this, and if they start, we can modify MemoryLocOrCall to
1123/// include relevant data)
1124void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1125 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1126 SmallVectorImpl<MemoryAccess *> &VersionStack,
1127 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1128
1129 /// If no accesses, nothing to do.
1130 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1131 if (Accesses == nullptr)
1132 return;
1133
1134 // Pop everything that doesn't dominate the current block off the stack,
1135 // increment the PopEpoch to account for this.
1136 while (true) {
1137 assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1139, __extension__ __PRETTY_FUNCTION__))
1138 !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1139, __extension__ __PRETTY_FUNCTION__))
1139 "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1139, __extension__ __PRETTY_FUNCTION__))
;
1140 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1141 if (DT->dominates(BackBlock, BB))
1142 break;
1143 while (VersionStack.back()->getBlock() == BackBlock)
1144 VersionStack.pop_back();
1145 ++PopEpoch;
1146 }
1147
1148 for (MemoryAccess &MA : *Accesses) {
1149 auto *MU = dyn_cast<MemoryUse>(&MA);
1150 if (!MU) {
1151 VersionStack.push_back(&MA);
1152 ++StackEpoch;
1153 continue;
1154 }
1155
1156 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1157 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1158 continue;
1159 }
1160
1161 MemoryLocOrCall UseMLOC(MU);
1162 auto &LocInfo = LocStackInfo[UseMLOC];
1163 // If the pop epoch changed, it means we've removed stuff from top of
1164 // stack due to changing blocks. We may have to reset the lower bound or
1165 // last kill info.
1166 if (LocInfo.PopEpoch != PopEpoch) {
1167 LocInfo.PopEpoch = PopEpoch;
1168 LocInfo.StackEpoch = StackEpoch;
1169 // If the lower bound was in something that no longer dominates us, we
1170 // have to reset it.
1171 // We can't simply track stack size, because the stack may have had
1172 // pushes/pops in the meantime.
1173 // XXX: This is non-optimal, but only is slower cases with heavily
1174 // branching dominator trees. To get the optimal number of queries would
1175 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1176 // the top of that stack dominates us. This does not seem worth it ATM.
1177 // A much cheaper optimization would be to always explore the deepest
1178 // branch of the dominator tree first. This will guarantee this resets on
1179 // the smallest set of blocks.
1180 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1181 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1182 // Reset the lower bound of things to check.
1183 // TODO: Some day we should be able to reset to last kill, rather than
1184 // 0.
1185 LocInfo.LowerBound = 0;
1186 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1187 LocInfo.LastKillValid = false;
1188 }
1189 } else if (LocInfo.StackEpoch != StackEpoch) {
1190 // If all that has changed is the StackEpoch, we only have to check the
1191 // new things on the stack, because we've checked everything before. In
1192 // this case, the lower bound of things to check remains the same.
1193 LocInfo.PopEpoch = PopEpoch;
1194 LocInfo.StackEpoch = StackEpoch;
1195 }
1196 if (!LocInfo.LastKillValid) {
1197 LocInfo.LastKill = VersionStack.size() - 1;
1198 LocInfo.LastKillValid = true;
1199 }
1200
1201 // At this point, we should have corrected last kill and LowerBound to be
1202 // in bounds.
1203 assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1204, __extension__ __PRETTY_FUNCTION__))
1204 "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1204, __extension__ __PRETTY_FUNCTION__))
;
1205 assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1206, __extension__ __PRETTY_FUNCTION__))
1206 "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1206, __extension__ __PRETTY_FUNCTION__))
;
1207 // In any case, the new upper bound is the top of the stack.
1208 unsigned long UpperBound = VersionStack.size() - 1;
1209
1210 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1211 DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1212 << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1213 << " because there are " << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1214 << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
;
1215 // Because we did not walk, LastKill is no longer valid, as this may
1216 // have been a kill.
1217 LocInfo.LastKillValid = false;
1218 continue;
1219 }
1220 bool FoundClobberResult = false;
1221 while (UpperBound > LocInfo.LowerBound) {
1222 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1223 // For phis, use the walker, see where we ended up, go there
1224 Instruction *UseInst = MU->getMemoryInst();
1225 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1226 // We are guaranteed to find it or something is wrong
1227 while (VersionStack[UpperBound] != Result) {
1228 assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail
("UpperBound != 0", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1228, __extension__ __PRETTY_FUNCTION__))
;
1229 --UpperBound;
1230 }
1231 FoundClobberResult = true;
1232 break;
1233 }
1234
1235 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1236 // If the lifetime of the pointer ends at this instruction, it's live on
1237 // entry.
1238 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1239 // Reset UpperBound to liveOnEntryDef's place in the stack
1240 UpperBound = 0;
1241 FoundClobberResult = true;
1242 break;
1243 }
1244 if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1245 FoundClobberResult = true;
1246 break;
1247 }
1248 --UpperBound;
1249 }
1250 // At the end of this loop, UpperBound is either a clobber, or lower bound
1251 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1252 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1253 MU->setDefiningAccess(VersionStack[UpperBound], true);
1254 // We were last killed now by where we got to
1255 LocInfo.LastKill = UpperBound;
1256 } else {
1257 // Otherwise, we checked all the new ones, and now we know we can get to
1258 // LastKill.
1259 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1260 }
1261 LocInfo.LowerBound = VersionStack.size() - 1;
1262 LocInfo.LowerBoundBlock = BB;
1263 }
1264}
1265
1266/// Optimize uses to point to their actual clobbering definitions.
1267void MemorySSA::OptimizeUses::optimizeUses() {
1268 SmallVector<MemoryAccess *, 16> VersionStack;
1269 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1270 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1271
1272 unsigned long StackEpoch = 1;
1273 unsigned long PopEpoch = 1;
1274 // We perform a non-recursive top-down dominator tree walk.
1275 for (const auto *DomNode : depth_first(DT->getRootNode()))
1276 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1277 LocStackInfo);
1278}
1279
1280void MemorySSA::placePHINodes(
1281 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1282 const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1283 // Determine where our MemoryPhi's should go
1284 ForwardIDFCalculator IDFs(*DT);
1285 IDFs.setDefiningBlocks(DefiningBlocks);
1286 SmallVector<BasicBlock *, 32> IDFBlocks;
1287 IDFs.calculate(IDFBlocks);
1288
1289 std::sort(IDFBlocks.begin(), IDFBlocks.end(),
1290 [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1291 return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1292 });
1293
1294 // Now place MemoryPhi nodes.
1295 for (auto &BB : IDFBlocks)
1296 createMemoryPhi(BB);
1297}
1298
1299void MemorySSA::buildMemorySSA() {
1300 // We create an access to represent "live on entry", for things like
1301 // arguments or users of globals, where the memory they use is defined before
1302 // the beginning of the function. We do not actually insert it into the IR.
1303 // We do not define a live on exit for the immediate uses, and thus our
1304 // semantics do *not* imply that something with no immediate uses can simply
1305 // be removed.
1306 BasicBlock &StartingPoint = F.getEntryBlock();
1307 LiveOnEntryDef =
1308 llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
1309 &StartingPoint, NextID++);
1310 DenseMap<const BasicBlock *, unsigned int> BBNumbers;
1311 unsigned NextBBNum = 0;
1312
1313 // We maintain lists of memory accesses per-block, trading memory for time. We
1314 // could just look up the memory access for every possible instruction in the
1315 // stream.
1316 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1317 // Go through each block, figure out where defs occur, and chain together all
1318 // the accesses.
1319 for (BasicBlock &B : F) {
1320 BBNumbers[&B] = NextBBNum++;
1321 bool InsertIntoDef = false;
1322 AccessList *Accesses = nullptr;
1323 DefsList *Defs = nullptr;
1324 for (Instruction &I : B) {
1325 MemoryUseOrDef *MUD = createNewAccess(&I);
1326 if (!MUD)
1327 continue;
1328
1329 if (!Accesses)
1330 Accesses = getOrCreateAccessList(&B);
1331 Accesses->push_back(MUD);
1332 if (isa<MemoryDef>(MUD)) {
1333 InsertIntoDef = true;
1334 if (!Defs)
1335 Defs = getOrCreateDefsList(&B);
1336 Defs->push_back(*MUD);
1337 }
1338 }
1339 if (InsertIntoDef)
1340 DefiningBlocks.insert(&B);
1341 }
1342 placePHINodes(DefiningBlocks, BBNumbers);
1343
1344 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1345 // filled in with all blocks.
1346 SmallPtrSet<BasicBlock *, 16> Visited;
1347 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1348
1349 CachingWalker *Walker = getWalkerImpl();
1350
1351 // We're doing a batch of updates; don't drop useful caches between them.
1352 Walker->setAutoResetWalker(false);
1353 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1354 Walker->setAutoResetWalker(true);
1355 Walker->resetClobberWalker();
1356
1357 // Mark the uses in unreachable blocks as live on entry, so that they go
1358 // somewhere.
1359 for (auto &BB : F)
1360 if (!Visited.count(&BB))
1361 markUnreachableAsLiveOnEntry(&BB);
1362}
1363
1364MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1365
1366MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1367 if (Walker)
1368 return Walker.get();
1369
1370 Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1371 return Walker.get();
1372}
1373
1374// This is a helper function used by the creation routines. It places NewAccess
1375// into the access and defs lists for a given basic block, at the given
1376// insertion point.
1377void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1378 const BasicBlock *BB,
1379 InsertionPlace Point) {
1380 auto *Accesses = getOrCreateAccessList(BB);
1381 if (Point == Beginning) {
1382 // If it's a phi node, it goes first, otherwise, it goes after any phi
1383 // nodes.
1384 if (isa<MemoryPhi>(NewAccess)) {
1385 Accesses->push_front(NewAccess);
1386 auto *Defs = getOrCreateDefsList(BB);
1387 Defs->push_front(*NewAccess);
1388 } else {
1389 auto AI = find_if_not(
1390 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1391 Accesses->insert(AI, NewAccess);
1392 if (!isa<MemoryUse>(NewAccess)) {
1393 auto *Defs = getOrCreateDefsList(BB);
1394 auto DI = find_if_not(
1395 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1396 Defs->insert(DI, *NewAccess);
1397 }
1398 }
1399 } else {
1400 Accesses->push_back(NewAccess);
1401 if (!isa<MemoryUse>(NewAccess)) {
1402 auto *Defs = getOrCreateDefsList(BB);
1403 Defs->push_back(*NewAccess);
1404 }
1405 }
1406 BlockNumberingValid.erase(BB);
1407}
1408
1409void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1410 AccessList::iterator InsertPt) {
1411 auto *Accesses = getWritableBlockAccesses(BB);
1412 bool WasEnd = InsertPt == Accesses->end();
1413 Accesses->insert(AccessList::iterator(InsertPt), What);
1414 if (!isa<MemoryUse>(What)) {
1415 auto *Defs = getOrCreateDefsList(BB);
1416 // If we got asked to insert at the end, we have an easy job, just shove it
1417 // at the end. If we got asked to insert before an existing def, we also get
1418 // an terator. If we got asked to insert before a use, we have to hunt for
1419 // the next def.
1420 if (WasEnd) {
1421 Defs->push_back(*What);
1422 } else if (isa<MemoryDef>(InsertPt)) {
1423 Defs->insert(InsertPt->getDefsIterator(), *What);
1424 } else {
1425 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1426 ++InsertPt;
1427 // Either we found a def, or we are inserting at the end
1428 if (InsertPt == Accesses->end())
1429 Defs->push_back(*What);
1430 else
1431 Defs->insert(InsertPt->getDefsIterator(), *What);
1432 }
1433 }
1434 BlockNumberingValid.erase(BB);
1435}
1436
1437// Move What before Where in the IR. The end result is taht What will belong to
1438// the right lists and have the right Block set, but will not otherwise be
1439// correct. It will not have the right defining access, and if it is a def,
1440// things below it will not properly be updated.
1441void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1442 AccessList::iterator Where) {
1443 // Keep it in the lookup tables, remove from the lists
1444 removeFromLists(What, false);
1445 What->setBlock(BB);
1446 insertIntoListsBefore(What, BB, Where);
1447}
1448
1449void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1450 InsertionPlace Point) {
1451 removeFromLists(What, false);
1452 What->setBlock(BB);
1453 insertIntoListsForBlock(What, BB, Point);
1454}
1455
1456MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1457 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"
) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1457, __extension__ __PRETTY_FUNCTION__))
;
1458 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1459 // Phi's always are placed at the front of the block.
1460 insertIntoListsForBlock(Phi, BB, Beginning);
1461 ValueToMemoryAccess[BB] = Phi;
1462 return Phi;
1463}
1464
1465MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1466 MemoryAccess *Definition) {
1467 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI"
) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1467, __extension__ __PRETTY_FUNCTION__))
;
1468 MemoryUseOrDef *NewAccess = createNewAccess(I);
1469 assert((static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1471, __extension__ __PRETTY_FUNCTION__))
1470 NewAccess != nullptr &&(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1471, __extension__ __PRETTY_FUNCTION__))
1471 "Tried to create a memory access for a non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1471, __extension__ __PRETTY_FUNCTION__))
;
1472 NewAccess->setDefiningAccess(Definition);
1473 return NewAccess;
1474}
1475
1476// Return true if the instruction has ordering constraints.
1477// Note specifically that this only considers stores and loads
1478// because others are still considered ModRef by getModRefInfo.
1479static inline bool isOrdered(const Instruction *I) {
1480 if (auto *SI = dyn_cast<StoreInst>(I)) {
1481 if (!SI->isUnordered())
1482 return true;
1483 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1484 if (!LI->isUnordered())
1485 return true;
1486 }
1487 return false;
1488}
1489
1490/// \brief Helper function to create new memory accesses
1491MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1492 // The assume intrinsic has a control dependency which we model by claiming
1493 // that it writes arbitrarily. Ignore that fake memory dependency here.
1494 // FIXME: Replace this special casing with a more accurate modelling of
1495 // assume's control dependency.
1496 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1497 if (II->getIntrinsicID() == Intrinsic::assume)
1498 return nullptr;
1499
1500 // Find out what affect this instruction has on memory.
1501 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1502 // The isOrdered check is used to ensure that volatiles end up as defs
1503 // (atomics end up as ModRef right now anyway). Until we separate the
1504 // ordering chain from the memory chain, this enables people to see at least
1505 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1506 // will still give an answer that bypasses other volatile loads. TODO:
1507 // Separate memory aliasing and ordering into two different chains so that we
1508 // can precisely represent both "what memory will this read/write/is clobbered
1509 // by" and "what instructions can I move this past".
1510 bool Def = isModSet(ModRef) || isOrdered(I);
1511 bool Use = isRefSet(ModRef);
1512
1513 // It's possible for an instruction to not modify memory at all. During
1514 // construction, we ignore them.
1515 if (!Def && !Use)
1516 return nullptr;
1517
1518 assert((Def || Use) &&(static_cast <bool> ((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? void (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1519, __extension__ __PRETTY_FUNCTION__))
1519 "Trying to create a memory access with a non-memory instruction")(static_cast <bool> ((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? void (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1519, __extension__ __PRETTY_FUNCTION__))
;
1520
1521 MemoryUseOrDef *MUD;
1522 if (Def)
1523 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1524 else
1525 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1526 ValueToMemoryAccess[I] = MUD;
1527 return MUD;
1528}
1529
1530/// \brief Returns true if \p Replacer dominates \p Replacee .
1531bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1532 const MemoryAccess *Replacee) const {
1533 if (isa<MemoryUseOrDef>(Replacee))
1534 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1535 const auto *MP = cast<MemoryPhi>(Replacee);
1536 // For a phi node, the use occurs in the predecessor block of the phi node.
1537 // Since we may occur multiple times in the phi node, we have to check each
1538 // operand to ensure Replacer dominates each operand where Replacee occurs.
1539 for (const Use &Arg : MP->operands()) {
1540 if (Arg.get() != Replacee &&
1541 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1542 return false;
1543 }
1544 return true;
1545}
1546
1547/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1548void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1549 assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1550, __extension__ __PRETTY_FUNCTION__))
1550 "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1550, __extension__ __PRETTY_FUNCTION__))
;
1551 BlockNumbering.erase(MA);
1552 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1553 MUD->setDefiningAccess(nullptr);
1554 // Invalidate our walker's cache if necessary
1555 if (!isa<MemoryUse>(MA))
1556 Walker->invalidateInfo(MA);
1557 // The call below to erase will destroy MA, so we can't change the order we
1558 // are doing things here
1559 Value *MemoryInst;
1560 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1561 MemoryInst = MUD->getMemoryInst();
1562 } else {
1563 MemoryInst = MA->getBlock();
1564 }
1565 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1566 if (VMA->second == MA)
1567 ValueToMemoryAccess.erase(VMA);
1568}
1569
1570/// \brief Properly remove \p MA from all of MemorySSA's lists.
1571///
1572/// Because of the way the intrusive list and use lists work, it is important to
1573/// do removal in the right order.
1574/// ShouldDelete defaults to true, and will cause the memory access to also be
1575/// deleted, not just removed.
1576void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1577 // The access list owns the reference, so we erase it from the non-owning list
1578 // first.
1579 if (!isa<MemoryUse>(MA)) {
1580 auto DefsIt = PerBlockDefs.find(MA->getBlock());
1581 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1582 Defs->remove(*MA);
1583 if (Defs->empty())
1584 PerBlockDefs.erase(DefsIt);
1585 }
1586
1587 // The erase call here will delete it. If we don't want it deleted, we call
1588 // remove instead.
1589 auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1590 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1591 if (ShouldDelete)
1592 Accesses->erase(MA);
1593 else
1594 Accesses->remove(MA);
1595
1596 if (Accesses->empty())
1597 PerBlockAccesses.erase(AccessIt);
1598}
1599
1600void MemorySSA::print(raw_ostream &OS) const {
1601 MemorySSAAnnotatedWriter Writer(this);
1602 F.print(OS, &Writer);
1603}
1604
1605#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1606LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); }
1607#endif
1608
1609void MemorySSA::verifyMemorySSA() const {
1610 verifyDefUses(F);
1611 verifyDomination(F);
1612 verifyOrdering(F);
2
Calling 'MemorySSA::verifyOrdering'
1613 Walker->verify(this);
1614}
1615
1616/// \brief Verify that the order and existence of MemoryAccesses matches the
1617/// order and existence of memory affecting instructions.
1618void MemorySSA::verifyOrdering(Function &F) const {
1619 // Walk all the blocks, comparing what the lookups think and what the access
1620 // lists think, as well as the order in the blocks vs the order in the access
1621 // lists.
1622 SmallVector<MemoryAccess *, 32> ActualAccesses;
1623 SmallVector<MemoryAccess *, 32> ActualDefs;
1624 for (BasicBlock &B : F) {
1625 const AccessList *AL = getBlockAccesses(&B);
3
Calling 'MemorySSA::getBlockAccesses'
20
Returning from 'MemorySSA::getBlockAccesses'
26
Calling 'MemorySSA::getBlockAccesses'
43
Returning from 'MemorySSA::getBlockAccesses'
44
'AL' initialized here
1626 const auto *DL = getBlockDefs(&B);
1627 MemoryAccess *Phi = getMemoryAccess(&B);
1628 if (Phi) {
21
Assuming 'Phi' is null
22
Taking false branch
45
Taking false branch
1629 ActualAccesses.push_back(Phi);
1630 ActualDefs.push_back(Phi);
1631 }
1632
1633 for (Instruction &I : B) {
1634 MemoryAccess *MA = getMemoryAccess(&I);
1635 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
1636 "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
1637 "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
1638 "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
;
1639 if (MA) {
1640 ActualAccesses.push_back(MA);
1641 if (isa<MemoryDef>(MA))
1642 ActualDefs.push_back(MA);
1643 }
1644 }
1645 // Either we hit the assert, really have no accesses, or we have both
1646 // accesses and an access list.
1647 // Same with defs.
1648 if (!AL && !DL)
23
Assuming 'AL' is non-null
46
Assuming 'AL' is null
47
Assuming pointer value is null
48
Assuming 'DL' is non-null
49
Taking false branch
1649 continue;
1650 assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1652, __extension__ __PRETTY_FUNCTION__))
50
Within the expansion of the macro 'assert':
a
Called C++ object pointer is null
1651 "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1652, __extension__ __PRETTY_FUNCTION__))
1652 "access list")(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1652, __extension__ __PRETTY_FUNCTION__))
;
1653 assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1654, __extension__ __PRETTY_FUNCTION__))
1654 "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1654, __extension__ __PRETTY_FUNCTION__))
;
1655 assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1657, __extension__ __PRETTY_FUNCTION__))
1656 "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1657, __extension__ __PRETTY_FUNCTION__))
1657 "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1657, __extension__ __PRETTY_FUNCTION__))
;
1658 auto ALI = AL->begin();
1659 auto AAI = ActualAccesses.begin();
1660 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
24
Loop condition is false. Execution continues on line 1665
1661 assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order"
) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1661, __extension__ __PRETTY_FUNCTION__))
;
1662 ++ALI;
1663 ++AAI;
1664 }
1665 ActualAccesses.clear();
1666 if (DL) {
25
Taking false branch
1667 auto DLI = DL->begin();
1668 auto ADI = ActualDefs.begin();
1669 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1670 assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order"
) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1670, __extension__ __PRETTY_FUNCTION__))
;
1671 ++DLI;
1672 ++ADI;
1673 }
1674 }
1675 ActualDefs.clear();
1676 }
1677}
1678
1679/// \brief Verify the domination properties of MemorySSA by checking that each
1680/// definition dominates all of its uses.
1681void MemorySSA::verifyDomination(Function &F) const {
1682#ifndef NDEBUG
1683 for (BasicBlock &B : F) {
1684 // Phi nodes are attached to basic blocks
1685 if (MemoryPhi *MP = getMemoryAccess(&B))
1686 for (const Use &U : MP->uses())
1687 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(MP, U) && "Memory PHI does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1687, __extension__ __PRETTY_FUNCTION__))
;
1688
1689 for (Instruction &I : B) {
1690 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1691 if (!MD)
1692 continue;
1693
1694 for (const Use &U : MD->uses())
1695 assert(dominates(MD, U) && "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1695, __extension__ __PRETTY_FUNCTION__))
;
1696 }
1697 }
1698#endif
1699}
1700
1701/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1702/// appears in the use list of \p Def.
1703void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1704#ifndef NDEBUG
1705 // The live on entry use may cause us to get a NULL def here
1706 if (!Def)
1707 assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1708, __extension__ __PRETTY_FUNCTION__))
1708 "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1708, __extension__ __PRETTY_FUNCTION__))
;
1709 else
1710 assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1711, __extension__ __PRETTY_FUNCTION__))
1711 "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1711, __extension__ __PRETTY_FUNCTION__))
;
1712#endif
1713}
1714
1715/// \brief Verify the immediate use information, by walking all the memory
1716/// accesses and verifying that, for each use, it appears in the
1717/// appropriate def's use list
1718void MemorySSA::verifyDefUses(Function &F) const {
1719 for (BasicBlock &B : F) {
1720 // Phi nodes are attached to basic blocks
1721 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1722 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1724, __extension__ __PRETTY_FUNCTION__))
1723 pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1724, __extension__ __PRETTY_FUNCTION__))
1724 "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1724, __extension__ __PRETTY_FUNCTION__))
;
1725 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1726 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1727 }
1728
1729 for (Instruction &I : B) {
1730 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1731 verifyUseInDefs(MA->getDefiningAccess(), MA);
1732 }
1733 }
1734 }
1735}
1736
1737MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
1738 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1739}
1740
1741MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
1742 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1743}
1744
1745/// Perform a local numbering on blocks so that instruction ordering can be
1746/// determined in constant time.
1747/// TODO: We currently just number in order. If we numbered by N, we could
1748/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1749/// log2(N) sequences of mixed before and after) without needing to invalidate
1750/// the numbering.
1751void MemorySSA::renumberBlock(const BasicBlock *B) const {
1752 // The pre-increment ensures the numbers really start at 1.
1753 unsigned long CurrentNumber = 0;
1754 const AccessList *AL = getBlockAccesses(B);
1755 assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block"
) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
;
1756 for (const auto &I : *AL)
1757 BlockNumbering[&I] = ++CurrentNumber;
1758 BlockNumberingValid.insert(B);
1759}
1760
1761/// \brief Determine, for two memory accesses in the same block,
1762/// whether \p Dominator dominates \p Dominatee.
1763/// \returns True if \p Dominator dominates \p Dominatee.
1764bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1765 const MemoryAccess *Dominatee) const {
1766 const BasicBlock *DominatorBlock = Dominator->getBlock();
1767
1768 assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1769, __extension__ __PRETTY_FUNCTION__))
1769 "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1769, __extension__ __PRETTY_FUNCTION__))
;
1770 // A node dominates itself.
1771 if (Dominatee == Dominator)
1772 return true;
1773
1774 // When Dominatee is defined on function entry, it is not dominated by another
1775 // memory access.
1776 if (isLiveOnEntryDef(Dominatee))
1777 return false;
1778
1779 // When Dominator is defined on function entry, it dominates the other memory
1780 // access.
1781 if (isLiveOnEntryDef(Dominator))
1782 return true;
1783
1784 if (!BlockNumberingValid.count(DominatorBlock))
1785 renumberBlock(DominatorBlock);
1786
1787 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1788 // All numbers start with 1
1789 assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1789, __extension__ __PRETTY_FUNCTION__))
;
1790 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1791 assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1791, __extension__ __PRETTY_FUNCTION__))
;
1792 return DominatorNum < DominateeNum;
1793}
1794
1795bool MemorySSA::dominates(const MemoryAccess *Dominator,
1796 const MemoryAccess *Dominatee) const {
1797 if (Dominator == Dominatee)
1798 return true;
1799
1800 if (isLiveOnEntryDef(Dominatee))
1801 return false;
1802
1803 if (Dominator->getBlock() != Dominatee->getBlock())
1804 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1805 return locallyDominates(Dominator, Dominatee);
1806}
1807
1808bool MemorySSA::dominates(const MemoryAccess *Dominator,
1809 const Use &Dominatee) const {
1810 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1811 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1812 // The def must dominate the incoming block of the phi.
1813 if (UseBB != Dominator->getBlock())
1814 return DT->dominates(Dominator->getBlock(), UseBB);
1815 // If the UseBB and the DefBB are the same, compare locally.
1816 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1817 }
1818 // If it's not a PHI node use, the normal dominates can already handle it.
1819 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1820}
1821
1822const static char LiveOnEntryStr[] = "liveOnEntry";
1823
1824void MemoryAccess::print(raw_ostream &OS) const {
1825 switch (getValueID()) {
1826 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1827 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1828 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1829 }
1830 llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1830)
;
1831}
1832
1833void MemoryDef::print(raw_ostream &OS) const {
1834 MemoryAccess *UO = getDefiningAccess();
1835
1836 OS << getID() << " = MemoryDef(";
1837 if (UO && UO->getID())
1838 OS << UO->getID();
1839 else
1840 OS << LiveOnEntryStr;
1841 OS << ')';
1842}
1843
1844void MemoryPhi::print(raw_ostream &OS) const {
1845 bool First = true;
1846 OS << getID() << " = MemoryPhi(";
1847 for (const auto &Op : operands()) {
1848 BasicBlock *BB = getIncomingBlock(Op);
1849 MemoryAccess *MA = cast<MemoryAccess>(Op);
1850 if (!First)
1851 OS << ',';
1852 else
1853 First = false;
1854
1855 OS << '{';
1856 if (BB->hasName())
1857 OS << BB->getName();
1858 else
1859 BB->printAsOperand(OS, false);
1860 OS << ',';
1861 if (unsigned ID = MA->getID())
1862 OS << ID;
1863 else
1864 OS << LiveOnEntryStr;
1865 OS << '}';
1866 }
1867 OS << ')';
1868}
1869
1870void MemoryUse::print(raw_ostream &OS) const {
1871 MemoryAccess *UO = getDefiningAccess();
1872 OS << "MemoryUse(";
1873 if (UO && UO->getID())
1874 OS << UO->getID();
1875 else
1876 OS << LiveOnEntryStr;
1877 OS << ')';
1878}
1879
1880void MemoryAccess::dump() const {
1881// Cannot completely remove virtual function even in release mode.
1882#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1883 print(dbgs());
1884 dbgs() << "\n";
1885#endif
1886}
1887
1888char MemorySSAPrinterLegacyPass::ID = 0;
1889
1890MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1891 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1892}
1893
1894void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1895 AU.setPreservesAll();
1896 AU.addRequired<MemorySSAWrapperPass>();
1897}
1898
1899bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1900 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1901 MSSA.print(dbgs());
1902 if (VerifyMemorySSA)
1903 MSSA.verifyMemorySSA();
1904 return false;
1905}
1906
1907AnalysisKey MemorySSAAnalysis::Key;
1908
1909MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
1910 FunctionAnalysisManager &AM) {
1911 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1912 auto &AA = AM.getResult<AAManager>(F);
1913 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1914}
1915
1916PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
1917 FunctionAnalysisManager &AM) {
1918 OS << "MemorySSA for function: " << F.getName() << "\n";
1919 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1920
1921 return PreservedAnalyses::all();
1922}
1923
1924PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
1925 FunctionAnalysisManager &AM) {
1926 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1927
1928 return PreservedAnalyses::all();
1929}
1930
1931char MemorySSAWrapperPass::ID = 0;
1932
1933MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
1934 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
1935}
1936
1937void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1938
1939void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1940 AU.setPreservesAll();
1941 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1942 AU.addRequiredTransitive<AAResultsWrapperPass>();
1943}
1944
1945bool MemorySSAWrapperPass::runOnFunction(Function &F) {
1946 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1947 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1948 MSSA.reset(new MemorySSA(F, &AA, &DT));
1949 return false;
1950}
1951
1952void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1
Calling 'MemorySSA::verifyMemorySSA'
1953
1954void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
1955 MSSA->print(OS);
1956}
1957
1958MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
1959
1960MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
1961 DominatorTree *D)
1962 : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1963
1964void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
1965 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1966 MUD->resetOptimized();
1967}
1968
1969/// \brief Walk the use-def chains starting at \p MA and find
1970/// the MemoryAccess that actually clobbers Loc.
1971///
1972/// \returns our clobbering memory access
1973MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1974 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1975 MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
1976#ifdef EXPENSIVE_CHECKS
1977 MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
1978 assert(NewNoCache == New && "Cache made us hand back a different result?")(static_cast <bool> (NewNoCache == New && "Cache made us hand back a different result?"
) ? void (0) : __assert_fail ("NewNoCache == New && \"Cache made us hand back a different result?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/lib/Analysis/MemorySSA.cpp"
, 1978, __extension__ __PRETTY_FUNCTION__))
;
1979 (void)NewNoCache;
1980#endif
1981 if (AutoResetWalker)
1982 resetClobberWalker();
1983 return New;
1984}
1985
1986MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1987 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
1988 if (isa<MemoryPhi>(StartingAccess))
1989 return StartingAccess;
1990
1991 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
1992 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
1993 return StartingUseOrDef;
1994
1995 Instruction *I = StartingUseOrDef->getMemoryInst();
1996
1997 // Conservatively, fences are always clobbers, so don't perform the walk if we
1998 // hit a fence.
1999 if (!ImmutableCallSite(I) && I->isFenceLike())
2000 return StartingUseOrDef;
2001
2002 UpwardsMemoryQuery Q;
2003 Q.OriginalAccess = StartingUseOrDef;
2004 Q.StartingLoc = Loc;
2005 Q.Inst = I;
2006 Q.IsCall = false;
2007
2008 // Unlike the other function, do not walk to the def of a def, because we are
2009 // handed something we already believe is the clobbering access.
2010 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2011 ? StartingUseOrDef->getDefiningAccess()
2012 : StartingUseOrDef;
2013
2014 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2015 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2016 DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *StartingUseOrDef << "\n"
; } } while (false)
;
2017 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2018 DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Clobber << "\n"; } } while
(false)
;
2019 return Clobber;
2020}
2021
2022MemoryAccess *
2023MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2024 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2025 // If this is a MemoryPhi, we can't do anything.
2026 if (!StartingAccess)
2027 return MA;
2028
2029 // If this is an already optimized use or def, return the optimized result.
2030 // Note: Currently, we do not store the optimized def result because we'd need
2031 // a separate field, since we can't use it as the defining access.
2032 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2033 if (MUD->isOptimized())
2034 return MUD->getOptimized();
2035
2036 const Instruction *I = StartingAccess->getMemoryInst();
2037 UpwardsMemoryQuery Q(I, StartingAccess);
2038 // We can't sanely do anything with a fences, they conservatively
2039 // clobber all memory, and have no locations to get pointers from to
2040 // try to disambiguate.
2041 if (!Q.IsCall && I->isFenceLike())
2042 return StartingAccess;
2043
2044 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2045 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2046 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2047 MUD->setOptimized(LiveOnEntry);
2048 return LiveOnEntry;
2049 }
2050
2051 // Start with the thing we already think clobbers this location
2052 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2053
2054 // At this point, DefiningAccess may be the live on entry def.
2055 // If it is, we will not get a better result.
2056 if (MSSA->isLiveOnEntryDef(DefiningAccess))
2057 return DefiningAccess;
2058
2059 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2060 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2061 DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *DefiningAccess << "\n"
; } } while (false)
;
2062 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2063 DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Result << "\n"; } } while
(false)
;
2064 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2065 MUD->setOptimized(Result);
2066
2067 return Result;
2068}
2069
2070MemoryAccess *
2071DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2072 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2073 return Use->getDefiningAccess();
2074 return MA;
2075}
2076
2077MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2078 MemoryAccess *StartingAccess, const MemoryLocation &) {
2079 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2080 return Use->getDefiningAccess();
2081 return StartingAccess;
2082}
2083
2084void MemoryPhi::deleteMe(DerivedUser *Self) {
2085 delete static_cast<MemoryPhi *>(Self);
2086}
2087
2088void MemoryDef::deleteMe(DerivedUser *Self) {
2089 delete static_cast<MemoryDef *>(Self);
2090}
2091
2092void MemoryUse::deleteMe(DerivedUser *Self) {
2093 delete static_cast<MemoryUse *>(Self);
2094}

/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h

1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This file exposes an interface to building/using memory SSA to
12/// walk memory instructions using a use/def graph.
13///
14/// Memory SSA class builds an SSA form that links together memory access
15/// instructions such as loads, stores, atomics, and calls. Additionally, it
16/// does a trivial form of "heap versioning" Every time the memory state changes
17/// in the program, we generate a new heap version. It generates
18/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19///
20/// As a trivial example,
21/// define i32 @main() #0 {
22/// entry:
23/// %call = call noalias i8* @_Znwm(i64 4) #2
24/// %0 = bitcast i8* %call to i32*
25/// %call1 = call noalias i8* @_Znwm(i64 4) #2
26/// %1 = bitcast i8* %call1 to i32*
27/// store i32 5, i32* %0, align 4
28/// store i32 7, i32* %1, align 4
29/// %2 = load i32* %0, align 4
30/// %3 = load i32* %1, align 4
31/// %add = add nsw i32 %2, %3
32/// ret i32 %add
33/// }
34///
35/// Will become
36/// define i32 @main() #0 {
37/// entry:
38/// ; 1 = MemoryDef(0)
39/// %call = call noalias i8* @_Znwm(i64 4) #3
40/// %2 = bitcast i8* %call to i32*
41/// ; 2 = MemoryDef(1)
42/// %call1 = call noalias i8* @_Znwm(i64 4) #3
43/// %4 = bitcast i8* %call1 to i32*
44/// ; 3 = MemoryDef(2)
45/// store i32 5, i32* %2, align 4
46/// ; 4 = MemoryDef(3)
47/// store i32 7, i32* %4, align 4
48/// ; MemoryUse(3)
49/// %7 = load i32* %2, align 4
50/// ; MemoryUse(4)
51/// %8 = load i32* %4, align 4
52/// %add = add nsw i32 %7, %8
53/// ret i32 %add
54/// }
55///
56/// Given this form, all the stores that could ever effect the load at %8 can be
57/// gotten by using the MemoryUse associated with it, and walking from use to
58/// def until you hit the top of the function.
59///
60/// Each def also has a list of users associated with it, so you can walk from
61/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62/// but not the RHS of MemoryDefs. You can see this above at %7, which would
63/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64/// store, all the MemoryUses on its use lists are may-aliases of that store
65/// (but the MemoryDefs on its use list may not be).
66///
67/// MemoryDefs are not disambiguated because it would require multiple reaching
68/// definitions, which would require multiple phis, and multiple memoryaccesses
69/// per instruction.
70//
71//===----------------------------------------------------------------------===//
72
73#ifndef LLVM_ANALYSIS_MEMORYSSA_H
74#define LLVM_ANALYSIS_MEMORYSSA_H
75
76#include "llvm/ADT/DenseMap.h"
77#include "llvm/ADT/GraphTraits.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/ilist.h"
81#include "llvm/ADT/ilist_node.h"
82#include "llvm/ADT/iterator.h"
83#include "llvm/ADT/iterator_range.h"
84#include "llvm/ADT/simple_ilist.h"
85#include "llvm/Analysis/AliasAnalysis.h"
86#include "llvm/Analysis/MemoryLocation.h"
87#include "llvm/Analysis/PHITransAddr.h"
88#include "llvm/IR/BasicBlock.h"
89#include "llvm/IR/DerivedUser.h"
90#include "llvm/IR/Dominators.h"
91#include "llvm/IR/Module.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/Pass.h"
97#include "llvm/Support/Casting.h"
98#include <algorithm>
99#include <cassert>
100#include <cstddef>
101#include <iterator>
102#include <memory>
103#include <utility>
104
105namespace llvm {
106
107class Function;
108class Instruction;
109class MemoryAccess;
110class MemorySSAWalker;
111class LLVMContext;
112class raw_ostream;
113
114namespace MSSAHelpers {
115
116struct AllAccessTag {};
117struct DefsOnlyTag {};
118
119} // end namespace MSSAHelpers
120
121enum {
122 // Used to signify what the default invalid ID is for MemoryAccess's
123 // getID()
124 INVALID_MEMORYACCESS_ID = 0
125};
126
127template <class T> class memoryaccess_def_iterator_base;
128using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
129using const_memoryaccess_def_iterator =
130 memoryaccess_def_iterator_base<const MemoryAccess>;
131
132// \brief The base for all memory accesses. All memory accesses in a block are
133// linked together using an intrusive list.
134class MemoryAccess
135 : public DerivedUser,
136 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
137 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
138public:
139 using AllAccessType =
140 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
141 using DefsOnlyType =
142 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
143
144 MemoryAccess(const MemoryAccess &) = delete;
145 MemoryAccess &operator=(const MemoryAccess &) = delete;
146
147 void *operator new(size_t) = delete;
148
149 // Methods for support type inquiry through isa, cast, and
150 // dyn_cast
151 static bool classof(const Value *V) {
152 unsigned ID = V->getValueID();
153 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
154 }
155
156 BasicBlock *getBlock() const { return Block; }
157
158 void print(raw_ostream &OS) const;
159 void dump() const;
160
161 /// \brief The user iterators for a memory access
162 using iterator = user_iterator;
163 using const_iterator = const_user_iterator;
164
165 /// \brief This iterator walks over all of the defs in a given
166 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
167 /// MemoryUse/MemoryDef, this walks the defining access.
168 memoryaccess_def_iterator defs_begin();
169 const_memoryaccess_def_iterator defs_begin() const;
170 memoryaccess_def_iterator defs_end();
171 const_memoryaccess_def_iterator defs_end() const;
172
173 /// \brief Get the iterators for the all access list and the defs only list
174 /// We default to the all access list.
175 AllAccessType::self_iterator getIterator() {
176 return this->AllAccessType::getIterator();
177 }
178 AllAccessType::const_self_iterator getIterator() const {
179 return this->AllAccessType::getIterator();
180 }
181 AllAccessType::reverse_self_iterator getReverseIterator() {
182 return this->AllAccessType::getReverseIterator();
183 }
184 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
185 return this->AllAccessType::getReverseIterator();
186 }
187 DefsOnlyType::self_iterator getDefsIterator() {
188 return this->DefsOnlyType::getIterator();
189 }
190 DefsOnlyType::const_self_iterator getDefsIterator() const {
191 return this->DefsOnlyType::getIterator();
192 }
193 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
194 return this->DefsOnlyType::getReverseIterator();
195 }
196 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
197 return this->DefsOnlyType::getReverseIterator();
198 }
199
200protected:
201 friend class MemoryDef;
202 friend class MemoryPhi;
203 friend class MemorySSA;
204 friend class MemoryUse;
205 friend class MemoryUseOrDef;
206
207 /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
208 /// moved.
209 void setBlock(BasicBlock *BB) { Block = BB; }
210
211 /// \brief Used for debugging and tracking things about MemoryAccesses.
212 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
213 inline unsigned getID() const;
214
215 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
216 BasicBlock *BB, unsigned NumOperands)
217 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
218 Block(BB) {}
219
220private:
221 BasicBlock *Block;
222};
223
224inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
225 MA.print(OS);
226 return OS;
227}
228
229/// \brief Class that has the common methods + fields of memory uses/defs. It's
230/// a little awkward to have, but there are many cases where we want either a
231/// use or def, and there are many cases where uses are needed (defs aren't
232/// acceptable), and vice-versa.
233///
234/// This class should never be instantiated directly; make a MemoryUse or
235/// MemoryDef instead.
236class MemoryUseOrDef : public MemoryAccess {
237public:
238 void *operator new(size_t) = delete;
239
240 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
241
242 /// \brief Get the instruction that this MemoryUse represents.
243 Instruction *getMemoryInst() const { return MemoryInst; }
244
245 /// \brief Get the access that produces the memory state used by this Use.
246 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
247
248 static bool classof(const Value *MA) {
249 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
250 }
251
252 // Sadly, these have to be public because they are needed in some of the
253 // iterators.
254 inline bool isOptimized() const;
255 inline MemoryAccess *getOptimized() const;
256 inline void setOptimized(MemoryAccess *);
257
258 /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
259 /// be rewalked by the walker if necessary.
260 /// This really should only be called by tests.
261 inline void resetOptimized();
262
263protected:
264 friend class MemorySSA;
265 friend class MemorySSAUpdater;
266
267 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
268 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
269 : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
270 setDefiningAccess(DMA);
271 }
272
273 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
274 if (!Optimized) {
275 setOperand(0, DMA);
276 return;
277 }
278 setOptimized(DMA);
279 }
280
281private:
282 Instruction *MemoryInst;
283};
284
285template <>
286struct OperandTraits<MemoryUseOrDef>
287 : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
288DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return
OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef
::const_op_iterator MemoryUseOrDef::op_begin() const { return
OperandTraits<MemoryUseOrDef>::op_begin(const_cast<
MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef
::op_end() { return OperandTraits<MemoryUseOrDef>::op_end
(this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end
() const { return OperandTraits<MemoryUseOrDef>::op_end
(const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 288, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin
(const_cast<MemoryUseOrDef*>(this))[i_nocapture].get())
; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<MemoryUseOrDef>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 288, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits
<MemoryUseOrDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryUseOrDef::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &MemoryUseOrDef::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
289
290/// \brief Represents read-only accesses to memory
291///
292/// In particular, the set of Instructions that will be represented by
293/// MemoryUse's is exactly the set of Instructions for which
294/// AliasAnalysis::getModRefInfo returns "Ref".
295class MemoryUse final : public MemoryUseOrDef {
296public:
297 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
298
299 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
300 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
301
302 // allocate space for exactly one operand
303 void *operator new(size_t s) { return User::operator new(s, 1); }
304
305 static bool classof(const Value *MA) {
306 return MA->getValueID() == MemoryUseVal;
307 }
308
309 void print(raw_ostream &OS) const;
310
311 void setOptimized(MemoryAccess *DMA) {
312 OptimizedID = DMA->getID();
313 setOperand(0, DMA);
314 }
315
316 bool isOptimized() const {
317 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
318 }
319
320 MemoryAccess *getOptimized() const {
321 return getDefiningAccess();
322 }
323
324 void resetOptimized() {
325 OptimizedID = INVALID_MEMORYACCESS_ID;
326 }
327
328protected:
329 friend class MemorySSA;
330
331private:
332 static void deleteMe(DerivedUser *Self);
333
334 unsigned int OptimizedID = 0;
335};
336
337template <>
338struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
339DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits
<MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator
MemoryUse::op_begin() const { return OperandTraits<MemoryUse
>::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse
::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse
>::op_end(this); } MemoryUse::const_op_iterator MemoryUse::
op_end() const { return OperandTraits<MemoryUse>::op_end
(const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUse>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 339, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUse>::op_begin
(const_cast<MemoryUse*>(this))[i_nocapture].get()); } void
MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryUse>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 339, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryUse::getNumOperands() const { return OperandTraits
<MemoryUse>::operands(this); } template <int Idx_nocapture
> Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryUse::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
340
341/// \brief Represents a read-write access to memory, whether it is a must-alias,
342/// or a may-alias.
343///
344/// In particular, the set of Instructions that will be represented by
345/// MemoryDef's is exactly the set of Instructions for which
346/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
347/// Note that, in order to provide def-def chains, all defs also have a use
348/// associated with them. This use points to the nearest reaching
349/// MemoryDef/MemoryPhi.
350class MemoryDef final : public MemoryUseOrDef {
351public:
352 friend class MemorySSA;
353
354 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
355
356 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
357 unsigned Ver)
358 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
359
360 // allocate space for exactly one operand
361 void *operator new(size_t s) { return User::operator new(s, 1); }
362
363 static bool classof(const Value *MA) {
364 return MA->getValueID() == MemoryDefVal;
365 }
366
367 void setOptimized(MemoryAccess *MA) {
368 Optimized = MA;
369 OptimizedID = getDefiningAccess()->getID();
370 }
371
372 MemoryAccess *getOptimized() const { return Optimized; }
373
374 bool isOptimized() const {
375 return getOptimized() && getDefiningAccess() &&
376 OptimizedID == getDefiningAccess()->getID();
377 }
378
379 void resetOptimized() {
380 OptimizedID = INVALID_MEMORYACCESS_ID;
381 }
382
383 void print(raw_ostream &OS) const;
384
385 unsigned getID() const { return ID; }
386
387private:
388 static void deleteMe(DerivedUser *Self);
389
390 const unsigned ID;
391 MemoryAccess *Optimized = nullptr;
392 unsigned int OptimizedID = INVALID_MEMORYACCESS_ID;
393};
394
395template <>
396struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
397DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits
<MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator
MemoryDef::op_begin() const { return OperandTraits<MemoryDef
>::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef
::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef
>::op_end(this); } MemoryDef::const_op_iterator MemoryDef::
op_end() const { return OperandTraits<MemoryDef>::op_end
(const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 397, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryDef>::op_begin
(const_cast<MemoryDef*>(this))[i_nocapture].get()); } void
MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryDef>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 397, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryDef::getNumOperands() const { return OperandTraits
<MemoryDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryDef::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
398
399/// \brief Represents phi nodes for memory accesses.
400///
401/// These have the same semantic as regular phi nodes, with the exception that
402/// only one phi will ever exist in a given basic block.
403/// Guaranteeing one phi per block means guaranteeing there is only ever one
404/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
405/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
406/// a MemoryPhi's operands.
407/// That is, given
408/// if (a) {
409/// store %a
410/// store %b
411/// }
412/// it *must* be transformed into
413/// if (a) {
414/// 1 = MemoryDef(liveOnEntry)
415/// store %a
416/// 2 = MemoryDef(1)
417/// store %b
418/// }
419/// and *not*
420/// if (a) {
421/// 1 = MemoryDef(liveOnEntry)
422/// store %a
423/// 2 = MemoryDef(liveOnEntry)
424/// store %b
425/// }
426/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
427/// end of the branch, and if there are not two phi nodes, one will be
428/// disconnected completely from the SSA graph below that point.
429/// Because MemoryUse's do not generate new definitions, they do not have this
430/// issue.
431class MemoryPhi final : public MemoryAccess {
432 // allocate space for exactly zero operands
433 void *operator new(size_t s) { return User::operator new(s); }
434
435public:
436 /// Provide fast operand accessors
437 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
438
439 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
440 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
441 ReservedSpace(NumPreds) {
442 allocHungoffUses(ReservedSpace);
443 }
444
445 // Block iterator interface. This provides access to the list of incoming
446 // basic blocks, which parallels the list of incoming values.
447 using block_iterator = BasicBlock **;
448 using const_block_iterator = BasicBlock *const *;
449
450 block_iterator block_begin() {
451 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
452 return reinterpret_cast<block_iterator>(Ref + 1);
453 }
454
455 const_block_iterator block_begin() const {
456 const auto *Ref =
457 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
458 return reinterpret_cast<const_block_iterator>(Ref + 1);
459 }
460
461 block_iterator block_end() { return block_begin() + getNumOperands(); }
462
463 const_block_iterator block_end() const {
464 return block_begin() + getNumOperands();
465 }
466
467 iterator_range<block_iterator> blocks() {
468 return make_range(block_begin(), block_end());
469 }
470
471 iterator_range<const_block_iterator> blocks() const {
472 return make_range(block_begin(), block_end());
473 }
474
475 op_range incoming_values() { return operands(); }
476
477 const_op_range incoming_values() const { return operands(); }
478
479 /// \brief Return the number of incoming edges
480 unsigned getNumIncomingValues() const { return getNumOperands(); }
481
482 /// \brief Return incoming value number x
483 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
484 void setIncomingValue(unsigned I, MemoryAccess *V) {
485 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 485, __extension__ __PRETTY_FUNCTION__))
;
486 setOperand(I, V);
487 }
488
489 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
490 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
491
492 /// \brief Return incoming basic block number @p i.
493 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
494
495 /// \brief Return incoming basic block corresponding
496 /// to an operand of the PHI.
497 BasicBlock *getIncomingBlock(const Use &U) const {
498 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 498, __extension__ __PRETTY_FUNCTION__))
;
499 return getIncomingBlock(unsigned(&U - op_begin()));
500 }
501
502 /// \brief Return incoming basic block corresponding
503 /// to value use iterator.
504 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
505 return getIncomingBlock(I.getUse());
506 }
507
508 void setIncomingBlock(unsigned I, BasicBlock *BB) {
509 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 block_begin()[I] = BB;
511 }
512
513 /// \brief Add an incoming value to the end of the PHI list
514 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
515 if (getNumOperands() == ReservedSpace)
516 growOperands(); // Get more space!
517 // Initialize some new operands.
518 setNumHungOffUseOperands(getNumOperands() + 1);
519 setIncomingValue(getNumOperands() - 1, V);
520 setIncomingBlock(getNumOperands() - 1, BB);
521 }
522
523 /// \brief Return the first index of the specified basic
524 /// block in the value list for this PHI. Returns -1 if no instance.
525 int getBasicBlockIndex(const BasicBlock *BB) const {
526 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
527 if (block_begin()[I] == BB)
528 return I;
529 return -1;
530 }
531
532 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
533 int Idx = getBasicBlockIndex(BB);
534 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 534, __extension__ __PRETTY_FUNCTION__))
;
535 return getIncomingValue(Idx);
536 }
537
538 static bool classof(const Value *V) {
539 return V->getValueID() == MemoryPhiVal;
540 }
541
542 void print(raw_ostream &OS) const;
543
544 unsigned getID() const { return ID; }
545
546protected:
547 friend class MemorySSA;
548
549 /// \brief this is more complicated than the generic
550 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
551 /// values and pointers to the incoming blocks, all in one allocation.
552 void allocHungoffUses(unsigned N) {
553 User::allocHungoffUses(N, /* IsPhi */ true);
554 }
555
556private:
557 // For debugging only
558 const unsigned ID;
559 unsigned ReservedSpace;
560
561 /// \brief This grows the operand list in response to a push_back style of
562 /// operation. This grows the number of ops by 1.5 times.
563 void growOperands() {
564 unsigned E = getNumOperands();
565 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
566 ReservedSpace = std::max(E + E / 2, 2u);
567 growHungoffUses(ReservedSpace, /* IsPhi */ true);
568 }
569
570 static void deleteMe(DerivedUser *Self);
571};
572
573inline unsigned MemoryAccess::getID() const {
574 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 575, __extension__ __PRETTY_FUNCTION__))
575 "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 575, __extension__ __PRETTY_FUNCTION__))
;
576 if (const auto *MD = dyn_cast<MemoryDef>(this))
577 return MD->getID();
578 return cast<MemoryPhi>(this)->getID();
579}
580
581inline bool MemoryUseOrDef::isOptimized() const {
582 if (const auto *MD = dyn_cast<MemoryDef>(this))
583 return MD->isOptimized();
584 return cast<MemoryUse>(this)->isOptimized();
585}
586
587inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
588 if (const auto *MD = dyn_cast<MemoryDef>(this))
589 return MD->getOptimized();
590 return cast<MemoryUse>(this)->getOptimized();
591}
592
593inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
594 if (auto *MD = dyn_cast<MemoryDef>(this))
595 MD->setOptimized(MA);
596 else
597 cast<MemoryUse>(this)->setOptimized(MA);
598}
599
600inline void MemoryUseOrDef::resetOptimized() {
601 if (auto *MD = dyn_cast<MemoryDef>(this))
602 MD->resetOptimized();
603 else
604 cast<MemoryUse>(this)->resetOptimized();
605}
606
607template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
608DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits
<MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator
MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi
>::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi
::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi
>::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi::
op_end() const { return OperandTraits<MemoryPhi>::op_end
(const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryPhi>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 608, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryPhi>::op_begin
(const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void
MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryPhi>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 608, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryPhi::getNumOperands() const { return OperandTraits
<MemoryPhi>::operands(this); } template <int Idx_nocapture
> Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
609
610/// \brief Encapsulates MemorySSA, including all data associated with memory
611/// accesses.
612class MemorySSA {
613public:
614 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
615 ~MemorySSA();
616
617 MemorySSAWalker *getWalker();
618
619 /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
620 /// access associated with it. If passed a basic block gets the memory phi
621 /// node that exists for that block, if there is one. Otherwise, this will get
622 /// a MemoryUseOrDef.
623 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
624 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
625
626 void dump() const;
627 void print(raw_ostream &) const;
628
629 /// \brief Return true if \p MA represents the live on entry value
630 ///
631 /// Loads and stores from pointer arguments and other global values may be
632 /// defined by memory operations that do not occur in the current function, so
633 /// they may be live on entry to the function. MemorySSA represents such
634 /// memory state by the live on entry definition, which is guaranteed to occur
635 /// before any other memory access in the function.
636 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
637 return MA == LiveOnEntryDef.get();
638 }
639
640 inline MemoryAccess *getLiveOnEntryDef() const {
641 return LiveOnEntryDef.get();
642 }
643
644 // Sadly, iplists, by default, owns and deletes pointers added to the
645 // list. It's not currently possible to have two iplists for the same type,
646 // where one owns the pointers, and one does not. This is because the traits
647 // are per-type, not per-tag. If this ever changes, we should make the
648 // DefList an iplist.
649 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
650 using DefsList =
651 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
652
653 /// \brief Return the list of MemoryAccess's for a given basic block.
654 ///
655 /// This list is not modifiable by the user.
656 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
657 return getWritableBlockAccesses(BB);
4
Calling 'MemorySSA::getWritableBlockAccesses'
19
Returning from 'MemorySSA::getWritableBlockAccesses'
27
Calling 'MemorySSA::getWritableBlockAccesses'
42
Returning from 'MemorySSA::getWritableBlockAccesses'
658 }
659
660 /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
661 /// block.
662 ///
663 /// This list is not modifiable by the user.
664 const DefsList *getBlockDefs(const BasicBlock *BB) const {
665 return getWritableBlockDefs(BB);
666 }
667
668 /// \brief Given two memory accesses in the same basic block, determine
669 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
670 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
671
672 /// \brief Given two memory accesses in potentially different blocks,
673 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
674 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
675
676 /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
677 /// dominates Use \p B.
678 bool dominates(const MemoryAccess *A, const Use &B) const;
679
680 /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
681 /// all uses, uses appear in the right places). This is used by unit tests.
682 void verifyMemorySSA() const;
683
684 /// Used in various insertion functions to specify whether we are talking
685 /// about the beginning or end of a block.
686 enum InsertionPlace { Beginning, End };
687
688protected:
689 // Used by Memory SSA annotater, dumpers, and wrapper pass
690 friend class MemorySSAAnnotatedWriter;
691 friend class MemorySSAPrinterLegacyPass;
692 friend class MemorySSAUpdater;
693
694 void verifyDefUses(Function &F) const;
695 void verifyDomination(Function &F) const;
696 void verifyOrdering(Function &F) const;
697
698 // This is used by the use optimizer and updater.
699 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
700 auto It = PerBlockAccesses.find(BB);
701 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
5
Assuming the condition is false
6
'?' condition is false
7
Calling 'unique_ptr::get'
18
Returning from 'unique_ptr::get'
28
Assuming the condition is false
29
'?' condition is false
30
Calling 'unique_ptr::get'
41
Returning from 'unique_ptr::get'
702 }
703
704 // This is used by the use optimizer and updater.
705 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
706 auto It = PerBlockDefs.find(BB);
707 return It == PerBlockDefs.end() ? nullptr : It->second.get();
708 }
709
710 // These is used by the updater to perform various internal MemorySSA
711 // machinsations. They do not always leave the IR in a correct state, and
712 // relies on the updater to fixup what it breaks, so it is not public.
713
714 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
715 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
716
717 // Rename the dominator tree branch rooted at BB.
718 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
719 SmallPtrSetImpl<BasicBlock *> &Visited) {
720 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
721 }
722
723 void removeFromLookups(MemoryAccess *);
724 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
725 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
726 InsertionPlace);
727 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
728 AccessList::iterator);
729 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
730
731private:
732 class CachingWalker;
733 class OptimizeUses;
734
735 CachingWalker *getWalkerImpl();
736 void buildMemorySSA();
737 void optimizeUses();
738
739 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
740
741 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
742 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
743
744 void
745 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
746 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
747 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
748 MemoryPhi *createMemoryPhi(BasicBlock *BB);
749 MemoryUseOrDef *createNewAccess(Instruction *);
750 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
751 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
752 const DenseMap<const BasicBlock *, unsigned int> &);
753 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
754 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
755 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
756 SmallPtrSetImpl<BasicBlock *> &Visited,
757 bool SkipVisited = false, bool RenameAllUses = false);
758 AccessList *getOrCreateAccessList(const BasicBlock *);
759 DefsList *getOrCreateDefsList(const BasicBlock *);
760 void renumberBlock(const BasicBlock *) const;
761 AliasAnalysis *AA;
762 DominatorTree *DT;
763 Function &F;
764
765 // Memory SSA mappings
766 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
767
768 // These two mappings contain the main block to access/def mappings for
769 // MemorySSA. The list contained in PerBlockAccesses really owns all the
770 // MemoryAccesses.
771 // Both maps maintain the invariant that if a block is found in them, the
772 // corresponding list is not empty, and if a block is not found in them, the
773 // corresponding list is empty.
774 AccessMap PerBlockAccesses;
775 DefsMap PerBlockDefs;
776 std::unique_ptr<MemoryAccess> LiveOnEntryDef;
777
778 // Domination mappings
779 // Note that the numbering is local to a block, even though the map is
780 // global.
781 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
782 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
783
784 // Memory SSA building info
785 std::unique_ptr<CachingWalker> Walker;
786 unsigned NextID;
787};
788
789// Internal MemorySSA utils, for use by MemorySSA classes and walkers
790class MemorySSAUtil {
791protected:
792 friend class GVNHoist;
793 friend class MemorySSAWalker;
794
795 // This function should not be used by new passes.
796 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
797 AliasAnalysis &AA);
798};
799
800// This pass does eager building and then printing of MemorySSA. It is used by
801// the tests to be able to build, dump, and verify Memory SSA.
802class MemorySSAPrinterLegacyPass : public FunctionPass {
803public:
804 MemorySSAPrinterLegacyPass();
805
806 bool runOnFunction(Function &) override;
807 void getAnalysisUsage(AnalysisUsage &AU) const override;
808
809 static char ID;
810};
811
812/// An analysis that produces \c MemorySSA for a function.
813///
814class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
815 friend AnalysisInfoMixin<MemorySSAAnalysis>;
816
817 static AnalysisKey Key;
818
819public:
820 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
821 // pointers after construction. Use a wrapper class instead of plain
822 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
823 struct Result {
824 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
825
826 MemorySSA &getMSSA() { return *MSSA.get(); }
827
828 std::unique_ptr<MemorySSA> MSSA;
829 };
830
831 Result run(Function &F, FunctionAnalysisManager &AM);
832};
833
834/// \brief Printer pass for \c MemorySSA.
835class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
836 raw_ostream &OS;
837
838public:
839 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
840
841 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
842};
843
844/// \brief Verifier pass for \c MemorySSA.
845struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
846 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
847};
848
849/// \brief Legacy analysis pass which computes \c MemorySSA.
850class MemorySSAWrapperPass : public FunctionPass {
851public:
852 MemorySSAWrapperPass();
853
854 static char ID;
855
856 bool runOnFunction(Function &) override;
857 void releaseMemory() override;
858 MemorySSA &getMSSA() { return *MSSA; }
859 const MemorySSA &getMSSA() const { return *MSSA; }
860
861 void getAnalysisUsage(AnalysisUsage &AU) const override;
862
863 void verifyAnalysis() const override;
864 void print(raw_ostream &OS, const Module *M = nullptr) const override;
865
866private:
867 std::unique_ptr<MemorySSA> MSSA;
868};
869
870/// \brief This is the generic walker interface for walkers of MemorySSA.
871/// Walkers are used to be able to further disambiguate the def-use chains
872/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
873/// you.
874/// In particular, while the def-use chains provide basic information, and are
875/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
876/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
877/// information. In particular, they may want to use SCEV info to further
878/// disambiguate memory accesses, or they may want the nearest dominating
879/// may-aliasing MemoryDef for a call or a store. This API enables a
880/// standardized interface to getting and using that info.
881class MemorySSAWalker {
882public:
883 MemorySSAWalker(MemorySSA *);
884 virtual ~MemorySSAWalker() = default;
885
886 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
887
888 /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
889 /// will give you the nearest dominating MemoryAccess that Mod's the location
890 /// the instruction accesses (by skipping any def which AA can prove does not
891 /// alias the location(s) accessed by the instruction given).
892 ///
893 /// Note that this will return a single access, and it must dominate the
894 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
895 /// this will return the MemoryPhi, not the operand. This means that
896 /// given:
897 /// if (a) {
898 /// 1 = MemoryDef(liveOnEntry)
899 /// store %a
900 /// } else {
901 /// 2 = MemoryDef(liveOnEntry)
902 /// store %b
903 /// }
904 /// 3 = MemoryPhi(2, 1)
905 /// MemoryUse(3)
906 /// load %a
907 ///
908 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
909 /// in the if (a) branch.
910 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
911 MemoryAccess *MA = MSSA->getMemoryAccess(I);
912 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?"
) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 912, __extension__ __PRETTY_FUNCTION__))
;
913 return getClobberingMemoryAccess(MA);
914 }
915
916 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
917 /// but takes a MemoryAccess instead of an Instruction.
918 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
919
920 /// \brief Given a potentially clobbering memory access and a new location,
921 /// calling this will give you the nearest dominating clobbering MemoryAccess
922 /// (by skipping non-aliasing def links).
923 ///
924 /// This version of the function is mainly used to disambiguate phi translated
925 /// pointers, where the value of a pointer may have changed from the initial
926 /// memory access. Note that this expects to be handed either a MemoryUse,
927 /// or an already potentially clobbering access. Unlike the above API, if
928 /// given a MemoryDef that clobbers the pointer as the starting access, it
929 /// will return that MemoryDef, whereas the above would return the clobber
930 /// starting from the use side of the memory def.
931 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
932 const MemoryLocation &) = 0;
933
934 /// \brief Given a memory access, invalidate anything this walker knows about
935 /// that access.
936 /// This API is used by walkers that store information to perform basic cache
937 /// invalidation. This will be called by MemorySSA at appropriate times for
938 /// the walker it uses or returns.
939 virtual void invalidateInfo(MemoryAccess *) {}
940
941 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA)(static_cast <bool> (MSSA == this->MSSA) ? void (0) :
__assert_fail ("MSSA == this->MSSA", "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 941, __extension__ __PRETTY_FUNCTION__))
; }
942
943protected:
944 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
945 // constructor.
946 MemorySSA *MSSA;
947};
948
949/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
950/// simply returns the links as they were constructed by the builder.
951class DoNothingMemorySSAWalker final : public MemorySSAWalker {
952public:
953 // Keep the overrides below from hiding the Instruction overload of
954 // getClobberingMemoryAccess.
955 using MemorySSAWalker::getClobberingMemoryAccess;
956
957 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
958 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
959 const MemoryLocation &) override;
960};
961
962using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
963using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
964
965/// \brief Iterator base class used to implement const and non-const iterators
966/// over the defining accesses of a MemoryAccess.
967template <class T>
968class memoryaccess_def_iterator_base
969 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
970 std::forward_iterator_tag, T, ptrdiff_t, T *,
971 T *> {
972 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
973
974public:
975 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
976 memoryaccess_def_iterator_base() = default;
977
978 bool operator==(const memoryaccess_def_iterator_base &Other) const {
979 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
980 }
981
982 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
983 // block from the operand in constant time (In a PHINode, the uselist has
984 // both, so it's just subtraction). We provide it as part of the
985 // iterator to avoid callers having to linear walk to get the block.
986 // If the operation becomes constant time on MemoryPHI's, this bit of
987 // abstraction breaking should be removed.
988 BasicBlock *getPhiArgBlock() const {
989 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
990 assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI"
) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 990, __extension__ __PRETTY_FUNCTION__))
;
991 return MP->getIncomingBlock(ArgNo);
992 }
993
994 typename BaseT::iterator::pointer operator*() const {
995 assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 995, __extension__ __PRETTY_FUNCTION__))
;
996 // Go to the first argument for phis, and the defining access for everything
997 // else.
998 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
999 return MP->getIncomingValue(ArgNo);
1000 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1001 }
1002
1003 using BaseT::operator++;
1004 memoryaccess_def_iterator &operator++() {
1005 assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator"
) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1005, __extension__ __PRETTY_FUNCTION__))
;
1006 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1007 if (++ArgNo >= MP->getNumIncomingValues()) {
1008 ArgNo = 0;
1009 Access = nullptr;
1010 }
1011 } else {
1012 Access = nullptr;
1013 }
1014 return *this;
1015 }
1016
1017private:
1018 T *Access = nullptr;
1019 unsigned ArgNo = 0;
1020};
1021
1022inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1023 return memoryaccess_def_iterator(this);
1024}
1025
1026inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1027 return const_memoryaccess_def_iterator(this);
1028}
1029
1030inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1031 return memoryaccess_def_iterator();
1032}
1033
1034inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1035 return const_memoryaccess_def_iterator();
1036}
1037
1038/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
1039/// and uses in the inverse case.
1040template <> struct GraphTraits<MemoryAccess *> {
1041 using NodeRef = MemoryAccess *;
1042 using ChildIteratorType = memoryaccess_def_iterator;
1043
1044 static NodeRef getEntryNode(NodeRef N) { return N; }
1045 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1046 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1047};
1048
1049template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1050 using NodeRef = MemoryAccess *;
1051 using ChildIteratorType = MemoryAccess::iterator;
1052
1053 static NodeRef getEntryNode(NodeRef N) { return N; }
1054 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1055 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1056};
1057
1058/// \brief Provide an iterator that walks defs, giving both the memory access,
1059/// and the current pointer location, updating the pointer location as it
1060/// changes due to phi node translation.
1061///
1062/// This iterator, while somewhat specialized, is what most clients actually
1063/// want when walking upwards through MemorySSA def chains. It takes a pair of
1064/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1065/// memory location through phi nodes for the user.
1066class upward_defs_iterator
1067 : public iterator_facade_base<upward_defs_iterator,
1068 std::forward_iterator_tag,
1069 const MemoryAccessPair> {
1070 using BaseT = upward_defs_iterator::iterator_facade_base;
1071
1072public:
1073 upward_defs_iterator(const MemoryAccessPair &Info)
1074 : DefIterator(Info.first), Location(Info.second),
1075 OriginalAccess(Info.first) {
1076 CurrentPair.first = nullptr;
1077
1078 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1079 fillInCurrentPair();
1080 }
1081
1082 upward_defs_iterator() { CurrentPair.first = nullptr; }
1083
1084 bool operator==(const upward_defs_iterator &Other) const {
1085 return DefIterator == Other.DefIterator;
1086 }
1087
1088 BaseT::iterator::reference operator*() const {
1089 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1090, __extension__ __PRETTY_FUNCTION__))
1090 "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1090, __extension__ __PRETTY_FUNCTION__))
;
1091 return CurrentPair;
1092 }
1093
1094 using BaseT::operator++;
1095 upward_defs_iterator &operator++() {
1096 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1097, __extension__ __PRETTY_FUNCTION__))
1097 "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1097, __extension__ __PRETTY_FUNCTION__))
;
1098 ++DefIterator;
1099 if (DefIterator != OriginalAccess->defs_end())
1100 fillInCurrentPair();
1101 return *this;
1102 }
1103
1104 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1105
1106private:
1107 void fillInCurrentPair() {
1108 CurrentPair.first = *DefIterator;
1109 if (WalkingPhi && Location.Ptr) {
1110 PHITransAddr Translator(
1111 const_cast<Value *>(Location.Ptr),
1112 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1113 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1114 DefIterator.getPhiArgBlock(), nullptr,
1115 false))
1116 if (Translator.getAddr() != Location.Ptr) {
1117 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1118 return;
1119 }
1120 }
1121 CurrentPair.second = Location;
1122 }
1123
1124 MemoryAccessPair CurrentPair;
1125 memoryaccess_def_iterator DefIterator;
1126 MemoryLocation Location;
1127 MemoryAccess *OriginalAccess = nullptr;
1128 bool WalkingPhi = false;
1129};
1130
1131inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1132 return upward_defs_iterator(Pair);
1133}
1134
1135inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1136
1137inline iterator_range<upward_defs_iterator>
1138upward_defs(const MemoryAccessPair &Pair) {
1139 return make_range(upward_defs_begin(Pair), upward_defs_end());
1140}
1141
1142/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1143/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1144/// comparing against a null def_chain_iterator, this will compare equal only
1145/// after walking said Phi/liveOnEntry.
1146///
1147/// The UseOptimizedChain flag specifies whether to walk the clobbering
1148/// access chain, or all the accesses.
1149///
1150/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1151/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1152/// a phi node. The optimized chain walks the clobbering access of a store.
1153/// So if you are just trying to find, given a store, what the next
1154/// thing that would clobber the same memory is, you want the optimized chain.
1155template <class T, bool UseOptimizedChain = false>
1156struct def_chain_iterator
1157 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1158 std::forward_iterator_tag, MemoryAccess *> {
1159 def_chain_iterator() : MA(nullptr) {}
1160 def_chain_iterator(T MA) : MA(MA) {}
1161
1162 T operator*() const { return MA; }
1163
1164 def_chain_iterator &operator++() {
1165 // N.B. liveOnEntry has a null defining access.
1166 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1167 if (UseOptimizedChain && MUD->isOptimized())
1168 MA = MUD->getOptimized();
1169 else
1170 MA = MUD->getDefiningAccess();
1171 } else {
1172 MA = nullptr;
1173 }
1174
1175 return *this;
1176 }
1177
1178 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1179
1180private:
1181 T MA;
1182};
1183
1184template <class T>
1185inline iterator_range<def_chain_iterator<T>>
1186def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1187#ifdef EXPENSIVE_CHECKS
1188 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-7~svn325874/include/llvm/Analysis/MemorySSA.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190#endif
1191 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1192}
1193
1194template <class T>
1195inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1196 return make_range(def_chain_iterator<T, true>(MA),
1197 def_chain_iterator<T, true>(nullptr));
1198}
1199
1200} // end namespace llvm
1201
1202#endif // LLVM_ANALYSIS_MEMORYSSA_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 /**
46 * @addtogroup pointer_abstractions
47 * @{
48 */
49
50#if _GLIBCXX_USE_DEPRECATED1
51 template<typename> class auto_ptr;
52#endif
53
54 /// Primary template of default_delete, used by unique_ptr
55 template<typename _Tp>
56 struct default_delete
57 {
58 /// Default constructor
59 constexpr default_delete() noexcept = default;
60
61 /** @brief Converting constructor.
62 *
63 * Allows conversion from a deleter for arrays of another type, @p _Up,
64 * only if @p _Up* is convertible to @p _Tp*.
65 */
66 template<typename _Up, typename = typename
67 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
68 default_delete(const default_delete<_Up>&) noexcept { }
69
70 /// Calls @c delete @p __ptr
71 void
72 operator()(_Tp* __ptr) const
73 {
74 static_assert(!is_void<_Tp>::value,
75 "can't delete pointer to incomplete type");
76 static_assert(sizeof(_Tp)>0,
77 "can't delete pointer to incomplete type");
78 delete __ptr;
79 }
80 };
81
82 // _GLIBCXX_RESOLVE_LIB_DEFECTS
83 // DR 740 - omit specialization for array objects with a compile time length
84 /// Specialization for arrays, default_delete.
85 template<typename _Tp>
86 struct default_delete<_Tp[]>
87 {
88 public:
89 /// Default constructor
90 constexpr default_delete() noexcept = default;
91
92 /** @brief Converting constructor.
93 *
94 * Allows conversion from a deleter for arrays of another type, such as
95 * a const-qualified version of @p _Tp.
96 *
97 * Conversions from types derived from @c _Tp are not allowed because
98 * it is unsafe to @c delete[] an array of derived types through a
99 * pointer to the base type.
100 */
101 template<typename _Up, typename = typename
102 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
103 default_delete(const default_delete<_Up[]>&) noexcept { }
104
105 /// Calls @c delete[] @p __ptr
106 template<typename _Up>
107 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
108 operator()(_Up* __ptr) const
109 {
110 static_assert(sizeof(_Tp)>0,
111 "can't delete pointer to incomplete type");
112 delete [] __ptr;
113 }
114 };
115
116 template <typename _Tp, typename _Dp>
117 class __uniq_ptr_impl
118 {
119 template <typename _Up, typename _Ep, typename = void>
120 struct _Ptr
121 {
122 using type = _Up*;
123 };
124
125 template <typename _Up, typename _Ep>
126 struct
127 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
128 {
129 using type = typename remove_reference<_Ep>::type::pointer;
130 };
131
132 public:
133 using _DeleterConstraint = enable_if<
134 __and_<__not_<is_pointer<_Dp>>,
135 is_default_constructible<_Dp>>::value>;
136
137 using pointer = typename _Ptr<_Tp, _Dp>::type;
138
139 __uniq_ptr_impl() = default;
140 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
141
142 template<typename _Del>
143 __uniq_ptr_impl(pointer __p, _Del&& __d)
144 : _M_t(__p, std::forward<_Del>(__d)) { }
145
146 pointer& _M_ptr() { return std::get<0>(_M_t); }
147 pointer _M_ptr() const { return std::get<0>(_M_t); }
9
Calling 'get'
16
Returning from 'get'
32
Calling 'get'
39
Returning from 'get'
148 _Dp& _M_deleter() { return std::get<1>(_M_t); }
149 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
150
151 private:
152 tuple<pointer, _Dp> _M_t;
153 };
154
155 /// 20.7.1.2 unique_ptr for single objects.
156 template <typename _Tp, typename _Dp = default_delete<_Tp>>
157 class unique_ptr
158 {
159 template <class _Up>
160 using _DeleterConstraint =
161 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
162
163 __uniq_ptr_impl<_Tp, _Dp> _M_t;
164
165 public:
166 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
167 using element_type = _Tp;
168 using deleter_type = _Dp;
169
170 // helper template for detecting a safe conversion from another
171 // unique_ptr
172 template<typename _Up, typename _Ep>
173 using __safe_conversion_up = __and_<
174 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
175 __not_<is_array<_Up>>,
176 __or_<__and_<is_reference<deleter_type>,
177 is_same<deleter_type, _Ep>>,
178 __and_<__not_<is_reference<deleter_type>>,
179 is_convertible<_Ep, deleter_type>>
180 >
181 >;
182
183 // Constructors.
184
185 /// Default constructor, creates a unique_ptr that owns nothing.
186 template <typename _Up = _Dp,
187 typename = _DeleterConstraint<_Up>>
188 constexpr unique_ptr() noexcept
189 : _M_t()
190 { }
191
192 /** Takes ownership of a pointer.
193 *
194 * @param __p A pointer to an object of @c element_type
195 *
196 * The deleter will be value-initialized.
197 */
198 template <typename _Up = _Dp,
199 typename = _DeleterConstraint<_Up>>
200 explicit
201 unique_ptr(pointer __p) noexcept
202 : _M_t(__p)
203 { }
204
205 /** Takes ownership of a pointer.
206 *
207 * @param __p A pointer to an object of @c element_type
208 * @param __d A reference to a deleter.
209 *
210 * The deleter will be initialized with @p __d
211 */
212 unique_ptr(pointer __p,
213 typename conditional<is_reference<deleter_type>::value,
214 deleter_type, const deleter_type&>::type __d) noexcept
215 : _M_t(__p, __d) { }
216
217 /** Takes ownership of a pointer.
218 *
219 * @param __p A pointer to an object of @c element_type
220 * @param __d An rvalue reference to a deleter.
221 *
222 * The deleter will be initialized with @p std::move(__d)
223 */
224 unique_ptr(pointer __p,
225 typename remove_reference<deleter_type>::type&& __d) noexcept
226 : _M_t(std::move(__p), std::move(__d))
227 { static_assert(!std::is_reference<deleter_type>::value,
228 "rvalue deleter bound to reference"); }
229
230 /// Creates a unique_ptr that owns nothing.
231 template <typename _Up = _Dp,
232 typename = _DeleterConstraint<_Up>>
233 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
234
235 // Move constructors.
236
237 /// Move constructor.
238 unique_ptr(unique_ptr&& __u) noexcept
239 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
240
241 /** @brief Converting constructor from another type
242 *
243 * Requires that the pointer owned by @p __u is convertible to the
244 * type of pointer owned by this object, @p __u does not own an array,
245 * and @p __u has a compatible deleter type.
246 */
247 template<typename _Up, typename _Ep, typename = _Require<
248 __safe_conversion_up<_Up, _Ep>,
249 typename conditional<is_reference<_Dp>::value,
250 is_same<_Ep, _Dp>,
251 is_convertible<_Ep, _Dp>>::type>>
252 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
253 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
254 { }
255
256#if _GLIBCXX_USE_DEPRECATED1
257 /// Converting constructor from @c auto_ptr
258 template<typename _Up, typename = _Require<
259 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
260 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
261#endif
262
263 /// Destructor, invokes the deleter if the stored pointer is not null.
264 ~unique_ptr() noexcept
265 {
266 auto& __ptr = _M_t._M_ptr();
267 if (__ptr != nullptr)
268 get_deleter()(__ptr);
269 __ptr = pointer();
270 }
271
272 // Assignment.
273
274 /** @brief Move assignment operator.
275 *
276 * @param __u The object to transfer ownership from.
277 *
278 * Invokes the deleter first if this object owns a pointer.
279 */
280 unique_ptr&
281 operator=(unique_ptr&& __u) noexcept
282 {
283 reset(__u.release());
284 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
285 return *this;
286 }
287
288 /** @brief Assignment from another type.
289 *
290 * @param __u The object to transfer ownership from, which owns a
291 * convertible pointer to a non-array object.
292 *
293 * Invokes the deleter first if this object owns a pointer.
294 */
295 template<typename _Up, typename _Ep>
296 typename enable_if< __and_<
297 __safe_conversion_up<_Up, _Ep>,
298 is_assignable<deleter_type&, _Ep&&>
299 >::value,
300 unique_ptr&>::type
301 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
302 {
303 reset(__u.release());
304 get_deleter() = std::forward<_Ep>(__u.get_deleter());
305 return *this;
306 }
307
308 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
309 unique_ptr&
310 operator=(nullptr_t) noexcept
311 {
312 reset();
313 return *this;
314 }
315
316 // Observers.
317
318 /// Dereference the stored pointer.
319 typename add_lvalue_reference<element_type>::type
320 operator*() const
321 {
322 __glibcxx_assert(get() != pointer());
323 return *get();
324 }
325
326 /// Return the stored pointer.
327 pointer
328 operator->() const noexcept
329 {
330 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
331 return get();
332 }
333
334 /// Return the stored pointer.
335 pointer
336 get() const noexcept
337 { return _M_t._M_ptr(); }
8
Calling '__uniq_ptr_impl::_M_ptr'
17
Returning from '__uniq_ptr_impl::_M_ptr'
31
Calling '__uniq_ptr_impl::_M_ptr'
40
Returning from '__uniq_ptr_impl::_M_ptr'
338
339 /// Return a reference to the stored deleter.
340 deleter_type&
341 get_deleter() noexcept
342 { return _M_t._M_deleter(); }
343
344 /// Return a reference to the stored deleter.
345 const deleter_type&
346 get_deleter() const noexcept
347 { return _M_t._M_deleter(); }
348
349 /// Return @c true if the stored pointer is not null.
350 explicit operator bool() const noexcept
351 { return get() == pointer() ? false : true; }
352
353 // Modifiers.
354
355 /// Release ownership of any stored pointer.
356 pointer
357 release() noexcept
358 {
359 pointer __p = get();
360 _M_t._M_ptr() = pointer();
361 return __p;
362 }
363
364 /** @brief Replace the stored pointer.
365 *
366 * @param __p The new pointer to store.
367 *
368 * The deleter will be invoked if a pointer is already owned.
369 */
370 void
371 reset(pointer __p = pointer()) noexcept
372 {
373 using std::swap;
374 swap(_M_t._M_ptr(), __p);
375 if (__p != pointer())
376 get_deleter()(__p);
377 }
378
379 /// Exchange the pointer and deleter with another object.
380 void
381 swap(unique_ptr& __u) noexcept
382 {
383 using std::swap;
384 swap(_M_t, __u._M_t);
385 }
386
387 // Disable copy from lvalue.
388 unique_ptr(const unique_ptr&) = delete;
389 unique_ptr& operator=(const unique_ptr&) = delete;
390 };
391
392 /// 20.7.1.3 unique_ptr for array objects with a runtime length
393 // [unique.ptr.runtime]
394 // _GLIBCXX_RESOLVE_LIB_DEFECTS
395 // DR 740 - omit specialization for array objects with a compile time length
396 template<typename _Tp, typename _Dp>
397 class unique_ptr<_Tp[], _Dp>
398 {
399 template <typename _Up>
400 using _DeleterConstraint =
401 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
402
403 __uniq_ptr_impl<_Tp, _Dp> _M_t;
404
405 template<typename _Up>
406 using __remove_cv = typename remove_cv<_Up>::type;
407
408 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
409 template<typename _Up>
410 using __is_derived_Tp
411 = __and_< is_base_of<_Tp, _Up>,
412 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
413
414 public:
415 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
416 using element_type = _Tp;
417 using deleter_type = _Dp;
418
419 // helper template for detecting a safe conversion from another
420 // unique_ptr
421 template<typename _Up, typename _Ep,
422 typename _Up_up = unique_ptr<_Up, _Ep>,
423 typename _Up_element_type = typename _Up_up::element_type>
424 using __safe_conversion_up = __and_<
425 is_array<_Up>,
426 is_same<pointer, element_type*>,
427 is_same<typename _Up_up::pointer, _Up_element_type*>,
428 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
429 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
430 __and_<__not_<is_reference<deleter_type>>,
431 is_convertible<_Ep, deleter_type>>>
432 >;
433
434 // helper template for detecting a safe conversion from a raw pointer
435 template<typename _Up>
436 using __safe_conversion_raw = __and_<
437 __or_<__or_<is_same<_Up, pointer>,
438 is_same<_Up, nullptr_t>>,
439 __and_<is_pointer<_Up>,
440 is_same<pointer, element_type*>,
441 is_convertible<
442 typename remove_pointer<_Up>::type(*)[],
443 element_type(*)[]>
444 >
445 >
446 >;
447
448 // Constructors.
449
450 /// Default constructor, creates a unique_ptr that owns nothing.
451 template <typename _Up = _Dp,
452 typename = _DeleterConstraint<_Up>>
453 constexpr unique_ptr() noexcept
454 : _M_t()
455 { }
456
457 /** Takes ownership of a pointer.
458 *
459 * @param __p A pointer to an array of a type safely convertible
460 * to an array of @c element_type
461 *
462 * The deleter will be value-initialized.
463 */
464 template<typename _Up,
465 typename _Vp = _Dp,
466 typename = _DeleterConstraint<_Vp>,
467 typename = typename enable_if<
468 __safe_conversion_raw<_Up>::value, bool>::type>
469 explicit
470 unique_ptr(_Up __p) noexcept
471 : _M_t(__p)
472 { }
473
474 /** Takes ownership of a pointer.
475 *
476 * @param __p A pointer to an array of a type safely convertible
477 * to an array of @c element_type
478 * @param __d A reference to a deleter.
479 *
480 * The deleter will be initialized with @p __d
481 */
482 template<typename _Up,
483 typename = typename enable_if<
484 __safe_conversion_raw<_Up>::value, bool>::type>
485 unique_ptr(_Up __p,
486 typename conditional<is_reference<deleter_type>::value,
487 deleter_type, const deleter_type&>::type __d) noexcept
488 : _M_t(__p, __d) { }
489
490 /** Takes ownership of a pointer.
491 *
492 * @param __p A pointer to an array of a type safely convertible
493 * to an array of @c element_type
494 * @param __d A reference to a deleter.
495 *
496 * The deleter will be initialized with @p std::move(__d)
497 */
498 template<typename _Up,
499 typename = typename enable_if<
500 __safe_conversion_raw<_Up>::value, bool>::type>
501 unique_ptr(_Up __p, typename
502 remove_reference<deleter_type>::type&& __d) noexcept
503 : _M_t(std::move(__p), std::move(__d))
504 { static_assert(!is_reference<deleter_type>::value,
505 "rvalue deleter bound to reference"); }
506
507 /// Move constructor.
508 unique_ptr(unique_ptr&& __u) noexcept
509 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
510
511 /// Creates a unique_ptr that owns nothing.
512 template <typename _Up = _Dp,
513 typename = _DeleterConstraint<_Up>>
514 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
515
516 template<typename _Up, typename _Ep,
517 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
518 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
519 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
520 { }
521
522 /// Destructor, invokes the deleter if the stored pointer is not null.
523 ~unique_ptr()
524 {
525 auto& __ptr = _M_t._M_ptr();
526 if (__ptr != nullptr)
527 get_deleter()(__ptr);
528 __ptr = pointer();
529 }
530
531 // Assignment.
532
533 /** @brief Move assignment operator.
534 *
535 * @param __u The object to transfer ownership from.
536 *
537 * Invokes the deleter first if this object owns a pointer.
538 */
539 unique_ptr&
540 operator=(unique_ptr&& __u) noexcept
541 {
542 reset(__u.release());
543 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
544 return *this;
545 }
546
547 /** @brief Assignment from another type.
548 *
549 * @param __u The object to transfer ownership from, which owns a
550 * convertible pointer to an array object.
551 *
552 * Invokes the deleter first if this object owns a pointer.
553 */
554 template<typename _Up, typename _Ep>
555 typename
556 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
557 is_assignable<deleter_type&, _Ep&&>
558 >::value,
559 unique_ptr&>::type
560 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
561 {
562 reset(__u.release());
563 get_deleter() = std::forward<_Ep>(__u.get_deleter());
564 return *this;
565 }
566
567 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
568 unique_ptr&
569 operator=(nullptr_t) noexcept
570 {
571 reset();
572 return *this;
573 }
574
575 // Observers.
576
577 /// Access an element of owned array.
578 typename std::add_lvalue_reference<element_type>::type
579 operator[](size_t __i) const
580 {
581 __glibcxx_assert(get() != pointer());
582 return get()[__i];
583 }
584
585 /// Return the stored pointer.
586 pointer
587 get() const noexcept
588 { return _M_t._M_ptr(); }
589
590 /// Return a reference to the stored deleter.
591 deleter_type&
592 get_deleter() noexcept
593 { return _M_t._M_deleter(); }
594
595 /// Return a reference to the stored deleter.
596 const deleter_type&
597 get_deleter() const noexcept
598 { return _M_t._M_deleter(); }
599
600 /// Return @c true if the stored pointer is not null.
601 explicit operator bool() const noexcept
602 { return get() == pointer() ? false : true; }
603
604 // Modifiers.
605
606 /// Release ownership of any stored pointer.
607 pointer
608 release() noexcept
609 {
610 pointer __p = get();
611 _M_t._M_ptr() = pointer();
612 return __p;
613 }
614
615 /** @brief Replace the stored pointer.
616 *
617 * @param __p The new pointer to store.
618 *
619 * The deleter will be invoked if a pointer is already owned.
620 */
621 template <typename _Up,
622 typename = _Require<
623 __or_<is_same<_Up, pointer>,
624 __and_<is_same<pointer, element_type*>,
625 is_pointer<_Up>,
626 is_convertible<
627 typename remove_pointer<_Up>::type(*)[],
628 element_type(*)[]
629 >
630 >
631 >
632 >>
633 void
634 reset(_Up __p) noexcept
635 {
636 pointer __ptr = __p;
637 using std::swap;
638 swap(_M_t._M_ptr(), __ptr);
639 if (__ptr != nullptr)
640 get_deleter()(__ptr);
641 }
642
643 void reset(nullptr_t = nullptr) noexcept
644 {
645 reset(pointer());
646 }
647
648 /// Exchange the pointer and deleter with another object.
649 void
650 swap(unique_ptr& __u) noexcept
651 {
652 using std::swap;
653 swap(_M_t, __u._M_t);
654 }
655
656 // Disable copy from lvalue.
657 unique_ptr(const unique_ptr&) = delete;
658 unique_ptr& operator=(const unique_ptr&) = delete;
659 };
660
661 template<typename _Tp, typename _Dp>
662 inline
663#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
664 // Constrained free swap overload, see p0185r1
665 typename enable_if<__is_swappable<_Dp>::value>::type
666#else
667 void
668#endif
669 swap(unique_ptr<_Tp, _Dp>& __x,
670 unique_ptr<_Tp, _Dp>& __y) noexcept
671 { __x.swap(__y); }
672
673#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
674 template<typename _Tp, typename _Dp>
675 typename enable_if<!__is_swappable<_Dp>::value>::type
676 swap(unique_ptr<_Tp, _Dp>&,
677 unique_ptr<_Tp, _Dp>&) = delete;
678#endif
679
680 template<typename _Tp, typename _Dp,
681 typename _Up, typename _Ep>
682 inline bool
683 operator==(const unique_ptr<_Tp, _Dp>& __x,
684 const unique_ptr<_Up, _Ep>& __y)
685 { return __x.get() == __y.get(); }
686
687 template<typename _Tp, typename _Dp>
688 inline bool
689 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
690 { return !__x; }
691
692 template<typename _Tp, typename _Dp>
693 inline bool
694 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
695 { return !__x; }
696
697 template<typename _Tp, typename _Dp,
698 typename _Up, typename _Ep>
699 inline bool
700 operator!=(const unique_ptr<_Tp, _Dp>& __x,
701 const unique_ptr<_Up, _Ep>& __y)
702 { return __x.get() != __y.get(); }
703
704 template<typename _Tp, typename _Dp>
705 inline bool
706 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
707 { return (bool)__x; }
708
709 template<typename _Tp, typename _Dp>
710 inline bool
711 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
712 { return (bool)__x; }
713
714 template<typename _Tp, typename _Dp,
715 typename _Up, typename _Ep>
716 inline bool
717 operator<(const unique_ptr<_Tp, _Dp>& __x,
718 const unique_ptr<_Up, _Ep>& __y)
719 {
720 typedef typename
721 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
722 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
723 return std::less<_CT>()(__x.get(), __y.get());
724 }
725
726 template<typename _Tp, typename _Dp>
727 inline bool
728 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
729 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
730 nullptr); }
731
732 template<typename _Tp, typename _Dp>
733 inline bool
734 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
735 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
736 __x.get()); }
737
738 template<typename _Tp, typename _Dp,
739 typename _Up, typename _Ep>
740 inline bool
741 operator<=(const unique_ptr<_Tp, _Dp>& __x,
742 const unique_ptr<_Up, _Ep>& __y)
743 { return !(__y < __x); }
744
745 template<typename _Tp, typename _Dp>
746 inline bool
747 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
748 { return !(nullptr < __x); }
749
750 template<typename _Tp, typename _Dp>
751 inline bool
752 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
753 { return !(__x < nullptr); }
754
755 template<typename _Tp, typename _Dp,
756 typename _Up, typename _Ep>
757 inline bool
758 operator>(const unique_ptr<_Tp, _Dp>& __x,
759 const unique_ptr<_Up, _Ep>& __y)
760 { return (__y < __x); }
761
762 template<typename _Tp, typename _Dp>
763 inline bool
764 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
765 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
766 __x.get()); }
767
768 template<typename _Tp, typename _Dp>
769 inline bool
770 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
771 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
772 nullptr); }
773
774 template<typename _Tp, typename _Dp,
775 typename _Up, typename _Ep>
776 inline bool
777 operator>=(const unique_ptr<_Tp, _Dp>& __x,
778 const unique_ptr<_Up, _Ep>& __y)
779 { return !(__x < __y); }
780
781 template<typename _Tp, typename _Dp>
782 inline bool
783 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
784 { return !(__x < nullptr); }
785
786 template<typename _Tp, typename _Dp>
787 inline bool
788 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
789 { return !(nullptr < __x); }
790
791 /// std::hash specialization for unique_ptr.
792 template<typename _Tp, typename _Dp>
793 struct hash<unique_ptr<_Tp, _Dp>>
794 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
795 private __poison_hash<typename unique_ptr<_Tp, _Dp>::pointer>
796 {
797 size_t
798 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
799 {
800 typedef unique_ptr<_Tp, _Dp> _UP;
801 return std::hash<typename _UP::pointer>()(__u.get());
802 }
803 };
804
805#if __cplusplus201103L > 201103L
806
807#define __cpp_lib_make_unique 201304
808
809 template<typename _Tp>
810 struct _MakeUniq
811 { typedef unique_ptr<_Tp> __single_object; };
812
813 template<typename _Tp>
814 struct _MakeUniq<_Tp[]>
815 { typedef unique_ptr<_Tp[]> __array; };
816
817 template<typename _Tp, size_t _Bound>
818 struct _MakeUniq<_Tp[_Bound]>
819 { struct __invalid_type { }; };
820
821 /// std::make_unique for single objects
822 template<typename _Tp, typename... _Args>
823 inline typename _MakeUniq<_Tp>::__single_object
824 make_unique(_Args&&... __args)
825 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
826
827 /// std::make_unique for arrays of unknown bound
828 template<typename _Tp>
829 inline typename _MakeUniq<_Tp>::__array
830 make_unique(size_t __num)
831 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
832
833 /// Disable std::make_unique for arrays of known bound
834 template<typename _Tp, typename... _Args>
835 inline typename _MakeUniq<_Tp>::__invalid_type
836 make_unique(_Args&&...) = delete;
837#endif
838
839 // @} group pointer_abstractions
840
841_GLIBCXX_END_NAMESPACE_VERSION
842} // namespace
843
844#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41#include <bits/invoke.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @addtogroup utilities
49 * @{
50 */
51
52 template<typename... _Elements>
53 class tuple;
54
55 template<typename _Tp>
56 struct __is_empty_non_tuple : is_empty<_Tp> { };
57
58 // Using EBO for elements that are tuples causes ambiguous base errors.
59 template<typename _El0, typename... _El>
60 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
61
62 // Use the Empty Base-class Optimization for empty, non-final types.
63 template<typename _Tp>
64 using __empty_not_final
65 = typename conditional<__is_final(_Tp), false_type,
66 __is_empty_non_tuple<_Tp>>::type;
67
68 template<std::size_t _Idx, typename _Head,
69 bool = __empty_not_final<_Head>::value>
70 struct _Head_base;
71
72 template<std::size_t _Idx, typename _Head>
73 struct _Head_base<_Idx, _Head, true>
74 : public _Head
75 {
76 constexpr _Head_base()
77 : _Head() { }
78
79 constexpr _Head_base(const _Head& __h)
80 : _Head(__h) { }
81
82 constexpr _Head_base(const _Head_base&) = default;
83 constexpr _Head_base(_Head_base&&) = default;
84
85 template<typename _UHead>
86 constexpr _Head_base(_UHead&& __h)
87 : _Head(std::forward<_UHead>(__h)) { }
88
89 _Head_base(allocator_arg_t, __uses_alloc0)
90 : _Head() { }
91
92 template<typename _Alloc>
93 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
94 : _Head(allocator_arg, *__a._M_a) { }
95
96 template<typename _Alloc>
97 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
98 : _Head(*__a._M_a) { }
99
100 template<typename _UHead>
101 _Head_base(__uses_alloc0, _UHead&& __uhead)
102 : _Head(std::forward<_UHead>(__uhead)) { }
103
104 template<typename _Alloc, typename _UHead>
105 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
106 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
107
108 template<typename _Alloc, typename _UHead>
109 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
110 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
111
112 static constexpr _Head&
113 _M_head(_Head_base& __b) noexcept { return __b; }
114
115 static constexpr const _Head&
116 _M_head(const _Head_base& __b) noexcept { return __b; }
117 };
118
119 template<std::size_t _Idx, typename _Head>
120 struct _Head_base<_Idx, _Head, false>
121 {
122 constexpr _Head_base()
123 : _M_head_impl() { }
124
125 constexpr _Head_base(const _Head& __h)
126 : _M_head_impl(__h) { }
127
128 constexpr _Head_base(const _Head_base&) = default;
129 constexpr _Head_base(_Head_base&&) = default;
130
131 template<typename _UHead>
132 constexpr _Head_base(_UHead&& __h)
133 : _M_head_impl(std::forward<_UHead>(__h)) { }
134
135 _Head_base(allocator_arg_t, __uses_alloc0)
136 : _M_head_impl() { }
137
138 template<typename _Alloc>
139 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
140 : _M_head_impl(allocator_arg, *__a._M_a) { }
141
142 template<typename _Alloc>
143 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
144 : _M_head_impl(*__a._M_a) { }
145
146 template<typename _UHead>
147 _Head_base(__uses_alloc0, _UHead&& __uhead)
148 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
149
150 template<typename _Alloc, typename _UHead>
151 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
152 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
153 { }
154
155 template<typename _Alloc, typename _UHead>
156 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
157 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
158
159 static constexpr _Head&
160 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
161
162 static constexpr const _Head&
163 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
164
165 _Head _M_head_impl;
166 };
167
168 /**
169 * Contains the actual implementation of the @c tuple template, stored
170 * as a recursive inheritance hierarchy from the first element (most
171 * derived class) to the last (least derived class). The @c Idx
172 * parameter gives the 0-based index of the element stored at this
173 * point in the hierarchy; we use it to implement a constant-time
174 * get() operation.
175 */
176 template<std::size_t _Idx, typename... _Elements>
177 struct _Tuple_impl;
178
179 /**
180 * Recursive tuple implementation. Here we store the @c Head element
181 * and derive from a @c Tuple_impl containing the remaining elements
182 * (which contains the @c Tail).
183 */
184 template<std::size_t _Idx, typename _Head, typename... _Tail>
185 struct _Tuple_impl<_Idx, _Head, _Tail...>
186 : public _Tuple_impl<_Idx + 1, _Tail...>,
187 private _Head_base<_Idx, _Head>
188 {
189 template<std::size_t, typename...> friend class _Tuple_impl;
190
191 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
192 typedef _Head_base<_Idx, _Head> _Base;
193
194 static constexpr _Head&
195 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
196
197 static constexpr const _Head&
198 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
12
Calling '_Head_base::_M_head'
13
Returning from '_Head_base::_M_head'
35
Calling '_Head_base::_M_head'
36
Returning from '_Head_base::_M_head'
199
200 static constexpr _Inherited&
201 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
202
203 static constexpr const _Inherited&
204 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
205
206 constexpr _Tuple_impl()
207 : _Inherited(), _Base() { }
208
209 explicit
210 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
211 : _Inherited(__tail...), _Base(__head) { }
212
213 template<typename _UHead, typename... _UTail, typename = typename
214 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
215 explicit
216 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
217 : _Inherited(std::forward<_UTail>(__tail)...),
218 _Base(std::forward<_UHead>(__head)) { }
219
220 constexpr _Tuple_impl(const _Tuple_impl&) = default;
221
222 constexpr
223 _Tuple_impl(_Tuple_impl&& __in)
224 noexcept(__and_<is_nothrow_move_constructible<_Head>,
225 is_nothrow_move_constructible<_Inherited>>::value)
226 : _Inherited(std::move(_M_tail(__in))),
227 _Base(std::forward<_Head>(_M_head(__in))) { }
228
229 template<typename... _UElements>
230 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
231 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
232 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
233
234 template<typename _UHead, typename... _UTails>
235 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
236 : _Inherited(std::move
237 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
238 _Base(std::forward<_UHead>
239 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
243 : _Inherited(__tag, __a),
244 _Base(__tag, __use_alloc<_Head>(__a)) { }
245
246 template<typename _Alloc>
247 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
248 const _Head& __head, const _Tail&... __tail)
249 : _Inherited(__tag, __a, __tail...),
250 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
251
252 template<typename _Alloc, typename _UHead, typename... _UTail,
253 typename = typename enable_if<sizeof...(_Tail)
254 == sizeof...(_UTail)>::type>
255 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
256 _UHead&& __head, _UTail&&... __tail)
257 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
258 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
259 std::forward<_UHead>(__head)) { }
260
261 template<typename _Alloc>
262 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
263 const _Tuple_impl& __in)
264 : _Inherited(__tag, __a, _M_tail(__in)),
265 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
266
267 template<typename _Alloc>
268 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
269 _Tuple_impl&& __in)
270 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
271 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
272 std::forward<_Head>(_M_head(__in))) { }
273
274 template<typename _Alloc, typename... _UElements>
275 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
276 const _Tuple_impl<_Idx, _UElements...>& __in)
277 : _Inherited(__tag, __a,
278 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
279 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
280 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
281
282 template<typename _Alloc, typename _UHead, typename... _UTails>
283 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
284 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
285 : _Inherited(__tag, __a, std::move
286 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
287 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
288 std::forward<_UHead>
289 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
290
291 _Tuple_impl&
292 operator=(const _Tuple_impl& __in)
293 {
294 _M_head(*this) = _M_head(__in);
295 _M_tail(*this) = _M_tail(__in);
296 return *this;
297 }
298
299 _Tuple_impl&
300 operator=(_Tuple_impl&& __in)
301 noexcept(__and_<is_nothrow_move_assignable<_Head>,
302 is_nothrow_move_assignable<_Inherited>>::value)
303 {
304 _M_head(*this) = std::forward<_Head>(_M_head(__in));
305 _M_tail(*this) = std::move(_M_tail(__in));
306 return *this;
307 }
308
309 template<typename... _UElements>
310 _Tuple_impl&
311 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
312 {
313 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
314 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
315 return *this;
316 }
317
318 template<typename _UHead, typename... _UTails>
319 _Tuple_impl&
320 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
321 {
322 _M_head(*this) = std::forward<_UHead>
323 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
324 _M_tail(*this) = std::move
325 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
326 return *this;
327 }
328
329 protected:
330 void
331 _M_swap(_Tuple_impl& __in)
332 noexcept(__is_nothrow_swappable<_Head>::value
333 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
334 {
335 using std::swap;
336 swap(_M_head(*this), _M_head(__in));
337 _Inherited::_M_swap(_M_tail(__in));
338 }
339 };
340
341 // Basis case of inheritance recursion.
342 template<std::size_t _Idx, typename _Head>
343 struct _Tuple_impl<_Idx, _Head>
344 : private _Head_base<_Idx, _Head>
345 {
346 template<std::size_t, typename...> friend class _Tuple_impl;
347
348 typedef _Head_base<_Idx, _Head> _Base;
349
350 static constexpr _Head&
351 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
352
353 static constexpr const _Head&
354 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
355
356 constexpr _Tuple_impl()
357 : _Base() { }
358
359 explicit
360 constexpr _Tuple_impl(const _Head& __head)
361 : _Base(__head) { }
362
363 template<typename _UHead>
364 explicit
365 constexpr _Tuple_impl(_UHead&& __head)
366 : _Base(std::forward<_UHead>(__head)) { }
367
368 constexpr _Tuple_impl(const _Tuple_impl&) = default;
369
370 constexpr
371 _Tuple_impl(_Tuple_impl&& __in)
372 noexcept(is_nothrow_move_constructible<_Head>::value)
373 : _Base(std::forward<_Head>(_M_head(__in))) { }
374
375 template<typename _UHead>
376 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
377 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
378
379 template<typename _UHead>
380 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
381 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
382 { }
383
384 template<typename _Alloc>
385 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
386 : _Base(__tag, __use_alloc<_Head>(__a)) { }
387
388 template<typename _Alloc>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 const _Head& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
392
393 template<typename _Alloc, typename _UHead>
394 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
395 _UHead&& __head)
396 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
397 std::forward<_UHead>(__head)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 const _Tuple_impl& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
403
404 template<typename _Alloc>
405 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
406 _Tuple_impl&& __in)
407 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
408 std::forward<_Head>(_M_head(__in))) { }
409
410 template<typename _Alloc, typename _UHead>
411 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
412 const _Tuple_impl<_Idx, _UHead>& __in)
413 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
414 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
415
416 template<typename _Alloc, typename _UHead>
417 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
418 _Tuple_impl<_Idx, _UHead>&& __in)
419 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
420 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
421 { }
422
423 _Tuple_impl&
424 operator=(const _Tuple_impl& __in)
425 {
426 _M_head(*this) = _M_head(__in);
427 return *this;
428 }
429
430 _Tuple_impl&
431 operator=(_Tuple_impl&& __in)
432 noexcept(is_nothrow_move_assignable<_Head>::value)
433 {
434 _M_head(*this) = std::forward<_Head>(_M_head(__in));
435 return *this;
436 }
437
438 template<typename _UHead>
439 _Tuple_impl&
440 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
441 {
442 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
443 return *this;
444 }
445
446 template<typename _UHead>
447 _Tuple_impl&
448 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
449 {
450 _M_head(*this)
451 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
452 return *this;
453 }
454
455 protected:
456 void
457 _M_swap(_Tuple_impl& __in)
458 noexcept(__is_nothrow_swappable<_Head>::value)
459 {
460 using std::swap;
461 swap(_M_head(*this), _M_head(__in));
462 }
463 };
464
465 // Concept utility functions, reused in conditionally-explicit
466 // constructors.
467 template<bool, typename... _Elements>
468 struct _TC
469 {
470 template<typename... _UElements>
471 static constexpr bool _ConstructibleTuple()
472 {
473 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
474 }
475
476 template<typename... _UElements>
477 static constexpr bool _ImplicitlyConvertibleTuple()
478 {
479 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
480 }
481
482 template<typename... _UElements>
483 static constexpr bool _MoveConstructibleTuple()
484 {
485 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
486 }
487
488 template<typename... _UElements>
489 static constexpr bool _ImplicitlyMoveConvertibleTuple()
490 {
491 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
492 }
493
494 template<typename _SrcTuple>
495 static constexpr bool _NonNestedTuple()
496 {
497 return __and_<__not_<is_same<tuple<_Elements...>,
498 typename remove_cv<
499 typename remove_reference<_SrcTuple>::type
500 >::type>>,
501 __not_<is_convertible<_SrcTuple, _Elements...>>,
502 __not_<is_constructible<_Elements..., _SrcTuple>>
503 >::value;
504 }
505 template<typename... _UElements>
506 static constexpr bool _NotSameTuple()
507 {
508 return __not_<is_same<tuple<_Elements...>,
509 typename remove_const<
510 typename remove_reference<_UElements...>::type
511 >::type>>::value;
512 }
513 };
514
515 template<typename... _Elements>
516 struct _TC<false, _Elements...>
517 {
518 template<typename... _UElements>
519 static constexpr bool _ConstructibleTuple()
520 {
521 return false;
522 }
523
524 template<typename... _UElements>
525 static constexpr bool _ImplicitlyConvertibleTuple()
526 {
527 return false;
528 }
529
530 template<typename... _UElements>
531 static constexpr bool _MoveConstructibleTuple()
532 {
533 return false;
534 }
535
536 template<typename... _UElements>
537 static constexpr bool _ImplicitlyMoveConvertibleTuple()
538 {
539 return false;
540 }
541
542 template<typename... _UElements>
543 static constexpr bool _NonNestedTuple()
544 {
545 return true;
546 }
547 template<typename... _UElements>
548 static constexpr bool _NotSameTuple()
549 {
550 return true;
551 }
552 };
553
554 /// Primary class template, tuple
555 template<typename... _Elements>
556 class tuple : public _Tuple_impl<0, _Elements...>
557 {
558 typedef _Tuple_impl<0, _Elements...> _Inherited;
559
560 // Used for constraining the default constructor so
561 // that it becomes dependent on the constraints.
562 template<typename _Dummy>
563 struct _TC2
564 {
565 static constexpr bool _DefaultConstructibleTuple()
566 {
567 return __and_<is_default_constructible<_Elements>...>::value;
568 }
569 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
570 {
571 return __and_<__is_implicitly_default_constructible<_Elements>...>
572 ::value;
573 }
574 };
575
576 public:
577 template<typename _Dummy = void,
578 typename enable_if<_TC2<_Dummy>::
579 _ImplicitlyDefaultConstructibleTuple(),
580 bool>::type = true>
581 constexpr tuple()
582 : _Inherited() { }
583
584 template<typename _Dummy = void,
585 typename enable_if<_TC2<_Dummy>::
586 _DefaultConstructibleTuple()
587 &&
588 !_TC2<_Dummy>::
589 _ImplicitlyDefaultConstructibleTuple(),
590 bool>::type = false>
591 explicit constexpr tuple()
592 : _Inherited() { }
593
594 // Shortcut for the cases where constructors taking _Elements...
595 // need to be constrained.
596 template<typename _Dummy> using _TCC =
597 _TC<is_same<_Dummy, void>::value,
598 _Elements...>;
599
600 template<typename _Dummy = void,
601 typename enable_if<
602 _TCC<_Dummy>::template
603 _ConstructibleTuple<_Elements...>()
604 && _TCC<_Dummy>::template
605 _ImplicitlyConvertibleTuple<_Elements...>()
606 && (sizeof...(_Elements) >= 1),
607 bool>::type=true>
608 constexpr tuple(const _Elements&... __elements)
609 : _Inherited(__elements...) { }
610
611 template<typename _Dummy = void,
612 typename enable_if<
613 _TCC<_Dummy>::template
614 _ConstructibleTuple<_Elements...>()
615 && !_TCC<_Dummy>::template
616 _ImplicitlyConvertibleTuple<_Elements...>()
617 && (sizeof...(_Elements) >= 1),
618 bool>::type=false>
619 explicit constexpr tuple(const _Elements&... __elements)
620 : _Inherited(__elements...) { }
621
622 // Shortcut for the cases where constructors taking _UElements...
623 // need to be constrained.
624 template<typename... _UElements> using _TMC =
625 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
626 && (_TC<(sizeof...(_UElements)==1), _Elements...>::
627 template _NotSameTuple<_UElements...>()),
628 _Elements...>;
629
630 // Shortcut for the cases where constructors taking tuple<_UElements...>
631 // need to be constrained.
632 template<typename... _UElements> using _TMCT =
633 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
634 && !is_same<tuple<_Elements...>,
635 tuple<_UElements...>>::value,
636 _Elements...>;
637
638 template<typename... _UElements, typename
639 enable_if<
640 _TMC<_UElements...>::template
641 _MoveConstructibleTuple<_UElements...>()
642 && _TMC<_UElements...>::template
643 _ImplicitlyMoveConvertibleTuple<_UElements...>()
644 && (sizeof...(_Elements) >= 1),
645 bool>::type=true>
646 constexpr tuple(_UElements&&... __elements)
647 : _Inherited(std::forward<_UElements>(__elements)...) { }
648
649 template<typename... _UElements, typename
650 enable_if<
651 _TMC<_UElements...>::template
652 _MoveConstructibleTuple<_UElements...>()
653 && !_TMC<_UElements...>::template
654 _ImplicitlyMoveConvertibleTuple<_UElements...>()
655 && (sizeof...(_Elements) >= 1),
656 bool>::type=false>
657 explicit constexpr tuple(_UElements&&... __elements)
658 : _Inherited(std::forward<_UElements>(__elements)...) { }
659
660 constexpr tuple(const tuple&) = default;
661
662 constexpr tuple(tuple&&) = default;
663
664 // Shortcut for the cases where constructors taking tuples
665 // must avoid creating temporaries.
666 template<typename _Dummy> using _TNTC =
667 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
668 _Elements...>;
669
670 template<typename... _UElements, typename _Dummy = void, typename
671 enable_if<_TMCT<_UElements...>::template
672 _ConstructibleTuple<_UElements...>()
673 && _TMCT<_UElements...>::template
674 _ImplicitlyConvertibleTuple<_UElements...>()
675 && _TNTC<_Dummy>::template
676 _NonNestedTuple<const tuple<_UElements...>&>(),
677 bool>::type=true>
678 constexpr tuple(const tuple<_UElements...>& __in)
679 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
680 { }
681
682 template<typename... _UElements, typename _Dummy = void, typename
683 enable_if<_TMCT<_UElements...>::template
684 _ConstructibleTuple<_UElements...>()
685 && !_TMCT<_UElements...>::template
686 _ImplicitlyConvertibleTuple<_UElements...>()
687 && _TNTC<_Dummy>::template
688 _NonNestedTuple<const tuple<_UElements...>&>(),
689 bool>::type=false>
690 explicit constexpr tuple(const tuple<_UElements...>& __in)
691 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
692 { }
693
694 template<typename... _UElements, typename _Dummy = void, typename
695 enable_if<_TMCT<_UElements...>::template
696 _MoveConstructibleTuple<_UElements...>()
697 && _TMCT<_UElements...>::template
698 _ImplicitlyMoveConvertibleTuple<_UElements...>()
699 && _TNTC<_Dummy>::template
700 _NonNestedTuple<tuple<_UElements...>&&>(),
701 bool>::type=true>
702 constexpr tuple(tuple<_UElements...>&& __in)
703 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
704
705 template<typename... _UElements, typename _Dummy = void, typename
706 enable_if<_TMCT<_UElements...>::template
707 _MoveConstructibleTuple<_UElements...>()
708 && !_TMCT<_UElements...>::template
709 _ImplicitlyMoveConvertibleTuple<_UElements...>()
710 && _TNTC<_Dummy>::template
711 _NonNestedTuple<tuple<_UElements...>&&>(),
712 bool>::type=false>
713 explicit constexpr tuple(tuple<_UElements...>&& __in)
714 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
715
716 // Allocator-extended constructors.
717
718 template<typename _Alloc>
719 tuple(allocator_arg_t __tag, const _Alloc& __a)
720 : _Inherited(__tag, __a) { }
721
722 template<typename _Alloc, typename _Dummy = void,
723 typename enable_if<
724 _TCC<_Dummy>::template
725 _ConstructibleTuple<_Elements...>()
726 && _TCC<_Dummy>::template
727 _ImplicitlyConvertibleTuple<_Elements...>(),
728 bool>::type=true>
729 tuple(allocator_arg_t __tag, const _Alloc& __a,
730 const _Elements&... __elements)
731 : _Inherited(__tag, __a, __elements...) { }
732
733 template<typename _Alloc, typename _Dummy = void,
734 typename enable_if<
735 _TCC<_Dummy>::template
736 _ConstructibleTuple<_Elements...>()
737 && !_TCC<_Dummy>::template
738 _ImplicitlyConvertibleTuple<_Elements...>(),
739 bool>::type=false>
740 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
741 const _Elements&... __elements)
742 : _Inherited(__tag, __a, __elements...) { }
743
744 template<typename _Alloc, typename... _UElements, typename
745 enable_if<_TMC<_UElements...>::template
746 _MoveConstructibleTuple<_UElements...>()
747 && _TMC<_UElements...>::template
748 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
749 bool>::type=true>
750 tuple(allocator_arg_t __tag, const _Alloc& __a,
751 _UElements&&... __elements)
752 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
753 { }
754
755 template<typename _Alloc, typename... _UElements, typename
756 enable_if<_TMC<_UElements...>::template
757 _MoveConstructibleTuple<_UElements...>()
758 && !_TMC<_UElements...>::template
759 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
760 bool>::type=false>
761 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
762 _UElements&&... __elements)
763 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
764 { }
765
766 template<typename _Alloc>
767 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
768 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
769
770 template<typename _Alloc>
771 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
772 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
773
774 template<typename _Alloc, typename _Dummy = void,
775 typename... _UElements, typename
776 enable_if<_TMCT<_UElements...>::template
777 _ConstructibleTuple<_UElements...>()
778 && _TMCT<_UElements...>::template
779 _ImplicitlyConvertibleTuple<_UElements...>()
780 && _TNTC<_Dummy>::template
781 _NonNestedTuple<tuple<_UElements...>&&>(),
782 bool>::type=true>
783 tuple(allocator_arg_t __tag, const _Alloc& __a,
784 const tuple<_UElements...>& __in)
785 : _Inherited(__tag, __a,
786 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
787 { }
788
789 template<typename _Alloc, typename _Dummy = void,
790 typename... _UElements, typename
791 enable_if<_TMCT<_UElements...>::template
792 _ConstructibleTuple<_UElements...>()
793 && !_TMCT<_UElements...>::template
794 _ImplicitlyConvertibleTuple<_UElements...>()
795 && _TNTC<_Dummy>::template
796 _NonNestedTuple<tuple<_UElements...>&&>(),
797 bool>::type=false>
798 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
799 const tuple<_UElements...>& __in)
800 : _Inherited(__tag, __a,
801 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
802 { }
803
804 template<typename _Alloc, typename _Dummy = void,
805 typename... _UElements, typename
806 enable_if<_TMCT<_UElements...>::template
807 _MoveConstructibleTuple<_UElements...>()
808 && _TMCT<_UElements...>::template
809 _ImplicitlyMoveConvertibleTuple<_UElements...>()
810 && _TNTC<_Dummy>::template
811 _NonNestedTuple<tuple<_UElements...>&&>(),
812 bool>::type=true>
813 tuple(allocator_arg_t __tag, const _Alloc& __a,
814 tuple<_UElements...>&& __in)
815 : _Inherited(__tag, __a,
816 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
817 { }
818
819 template<typename _Alloc, typename _Dummy = void,
820 typename... _UElements, typename
821 enable_if<_TMCT<_UElements...>::template
822 _MoveConstructibleTuple<_UElements...>()
823 && !_TMCT<_UElements...>::template
824 _ImplicitlyMoveConvertibleTuple<_UElements...>()
825 && _TNTC<_Dummy>::template
826 _NonNestedTuple<tuple<_UElements...>&&>(),
827 bool>::type=false>
828 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
829 tuple<_UElements...>&& __in)
830 : _Inherited(__tag, __a,
831 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
832 { }
833
834 tuple&
835 operator=(const tuple& __in)
836 {
837 static_cast<_Inherited&>(*this) = __in;
838 return *this;
839 }
840
841 tuple&
842 operator=(tuple&& __in)
843 noexcept(is_nothrow_move_assignable<_Inherited>::value)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 template<typename... _UElements>
850 typename
851 enable_if<sizeof...(_UElements)
852 == sizeof...(_Elements), tuple&>::type
853 operator=(const tuple<_UElements...>& __in)
854 {
855 static_cast<_Inherited&>(*this) = __in;
856 return *this;
857 }
858
859 template<typename... _UElements>
860 typename
861 enable_if<sizeof...(_UElements)
862 == sizeof...(_Elements), tuple&>::type
863 operator=(tuple<_UElements...>&& __in)
864 {
865 static_cast<_Inherited&>(*this) = std::move(__in);
866 return *this;
867 }
868
869 void
870 swap(tuple& __in)
871 noexcept(noexcept(__in._M_swap(__in)))
872 { _Inherited::_M_swap(__in); }
873 };
874
875#if __cpp_deduction_guides >= 201606
876 template<typename... _UTypes>
877 tuple(_UTypes...) -> tuple<_UTypes...>;
878 template<typename _T1, typename _T2>
879 tuple(pair<_T1, _T2>) -> tuple<_T1, _T2>;
880 template<typename _Alloc, typename... _UTypes>
881 tuple(allocator_arg_t, _Alloc, _UTypes...) -> tuple<_UTypes...>;
882 template<typename _Alloc, typename _T1, typename _T2>
883 tuple(allocator_arg_t, _Alloc, pair<_T1, _T2>) -> tuple<_T1, _T2>;
884 template<typename _Alloc, typename... _UTypes>
885 tuple(allocator_arg_t, _Alloc, tuple<_UTypes...>) -> tuple<_UTypes...>;
886#endif
887
888 // Explicit specialization, zero-element tuple.
889 template<>
890 class tuple<>
891 {
892 public:
893 void swap(tuple&) noexcept { /* no-op */ }
894 // We need the default since we're going to define no-op
895 // allocator constructors.
896 tuple() = default;
897 // No-op allocator constructors.
898 template<typename _Alloc>
899 tuple(allocator_arg_t, const _Alloc&) { }
900 template<typename _Alloc>
901 tuple(allocator_arg_t, const _Alloc&, const tuple&) { }
902 };
903
904 /// Partial specialization, 2-element tuple.
905 /// Includes construction and assignment from a pair.
906 template<typename _T1, typename _T2>
907 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
908 {
909 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
910
911 public:
912 template <typename _U1 = _T1,
913 typename _U2 = _T2,
914 typename enable_if<__and_<
915 __is_implicitly_default_constructible<_U1>,
916 __is_implicitly_default_constructible<_U2>>
917 ::value, bool>::type = true>
918
919 constexpr tuple()
920 : _Inherited() { }
921
922 template <typename _U1 = _T1,
923 typename _U2 = _T2,
924 typename enable_if<
925 __and_<
926 is_default_constructible<_U1>,
927 is_default_constructible<_U2>,
928 __not_<
929 __and_<__is_implicitly_default_constructible<_U1>,
930 __is_implicitly_default_constructible<_U2>>>>
931 ::value, bool>::type = false>
932
933 explicit constexpr tuple()
934 : _Inherited() { }
935
936 // Shortcut for the cases where constructors taking _T1, _T2
937 // need to be constrained.
938 template<typename _Dummy> using _TCC =
939 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
940
941 template<typename _Dummy = void, typename
942 enable_if<_TCC<_Dummy>::template
943 _ConstructibleTuple<_T1, _T2>()
944 && _TCC<_Dummy>::template
945 _ImplicitlyConvertibleTuple<_T1, _T2>(),
946 bool>::type = true>
947 constexpr tuple(const _T1& __a1, const _T2& __a2)
948 : _Inherited(__a1, __a2) { }
949
950 template<typename _Dummy = void, typename
951 enable_if<_TCC<_Dummy>::template
952 _ConstructibleTuple<_T1, _T2>()
953 && !_TCC<_Dummy>::template
954 _ImplicitlyConvertibleTuple<_T1, _T2>(),
955 bool>::type = false>
956 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
957 : _Inherited(__a1, __a2) { }
958
959 // Shortcut for the cases where constructors taking _U1, _U2
960 // need to be constrained.
961 using _TMC = _TC<true, _T1, _T2>;
962
963 template<typename _U1, typename _U2, typename
964 enable_if<_TMC::template
965 _MoveConstructibleTuple<_U1, _U2>()
966 && _TMC::template
967 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
968 && !is_same<typename decay<_U1>::type,
969 allocator_arg_t>::value,
970 bool>::type = true>
971 constexpr tuple(_U1&& __a1, _U2&& __a2)
972 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
973
974 template<typename _U1, typename _U2, typename
975 enable_if<_TMC::template
976 _MoveConstructibleTuple<_U1, _U2>()
977 && !_TMC::template
978 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
979 && !is_same<typename decay<_U1>::type,
980 allocator_arg_t>::value,
981 bool>::type = false>
982 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
983 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
984
985 constexpr tuple(const tuple&) = default;
986
987 constexpr tuple(tuple&&) = default;
988
989 template<typename _U1, typename _U2, typename
990 enable_if<_TMC::template
991 _ConstructibleTuple<_U1, _U2>()
992 && _TMC::template
993 _ImplicitlyConvertibleTuple<_U1, _U2>(),
994 bool>::type = true>
995 constexpr tuple(const tuple<_U1, _U2>& __in)
996 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
997
998 template<typename _U1, typename _U2, typename
999 enable_if<_TMC::template
1000 _ConstructibleTuple<_U1, _U2>()
1001 && !_TMC::template
1002 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1003 bool>::type = false>
1004 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
1005 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
1006
1007 template<typename _U1, typename _U2, typename
1008 enable_if<_TMC::template
1009 _MoveConstructibleTuple<_U1, _U2>()
1010 && _TMC::template
1011 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1012 bool>::type = true>
1013 constexpr tuple(tuple<_U1, _U2>&& __in)
1014 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1015
1016 template<typename _U1, typename _U2, typename
1017 enable_if<_TMC::template
1018 _MoveConstructibleTuple<_U1, _U2>()
1019 && !_TMC::template
1020 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1021 bool>::type = false>
1022 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
1023 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1024
1025 template<typename _U1, typename _U2, typename
1026 enable_if<_TMC::template
1027 _ConstructibleTuple<_U1, _U2>()
1028 && _TMC::template
1029 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1030 bool>::type = true>
1031 constexpr tuple(const pair<_U1, _U2>& __in)
1032 : _Inherited(__in.first, __in.second) { }
1033
1034 template<typename _U1, typename _U2, typename
1035 enable_if<_TMC::template
1036 _ConstructibleTuple<_U1, _U2>()
1037 && !_TMC::template
1038 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1039 bool>::type = false>
1040 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1041 : _Inherited(__in.first, __in.second) { }
1042
1043 template<typename _U1, typename _U2, typename
1044 enable_if<_TMC::template
1045 _MoveConstructibleTuple<_U1, _U2>()
1046 && _TMC::template
1047 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1048 bool>::type = true>
1049 constexpr tuple(pair<_U1, _U2>&& __in)
1050 : _Inherited(std::forward<_U1>(__in.first),
1051 std::forward<_U2>(__in.second)) { }
1052
1053 template<typename _U1, typename _U2, typename
1054 enable_if<_TMC::template
1055 _MoveConstructibleTuple<_U1, _U2>()
1056 && !_TMC::template
1057 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1058 bool>::type = false>
1059 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1060 : _Inherited(std::forward<_U1>(__in.first),
1061 std::forward<_U2>(__in.second)) { }
1062
1063 // Allocator-extended constructors.
1064
1065 template<typename _Alloc>
1066 tuple(allocator_arg_t __tag, const _Alloc& __a)
1067 : _Inherited(__tag, __a) { }
1068
1069 template<typename _Alloc, typename _Dummy = void,
1070 typename enable_if<
1071 _TCC<_Dummy>::template
1072 _ConstructibleTuple<_T1, _T2>()
1073 && _TCC<_Dummy>::template
1074 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1075 bool>::type=true>
1076
1077 tuple(allocator_arg_t __tag, const _Alloc& __a,
1078 const _T1& __a1, const _T2& __a2)
1079 : _Inherited(__tag, __a, __a1, __a2) { }
1080
1081 template<typename _Alloc, typename _Dummy = void,
1082 typename enable_if<
1083 _TCC<_Dummy>::template
1084 _ConstructibleTuple<_T1, _T2>()
1085 && !_TCC<_Dummy>::template
1086 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1087 bool>::type=false>
1088
1089 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1090 const _T1& __a1, const _T2& __a2)
1091 : _Inherited(__tag, __a, __a1, __a2) { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _MoveConstructibleTuple<_U1, _U2>()
1096 && _TMC::template
1097 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1098 bool>::type = true>
1099 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1100 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1101 std::forward<_U2>(__a2)) { }
1102
1103 template<typename _Alloc, typename _U1, typename _U2, typename
1104 enable_if<_TMC::template
1105 _MoveConstructibleTuple<_U1, _U2>()
1106 && !_TMC::template
1107 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1108 bool>::type = false>
1109 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1110 _U1&& __a1, _U2&& __a2)
1111 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1112 std::forward<_U2>(__a2)) { }
1113
1114 template<typename _Alloc>
1115 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1116 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1117
1118 template<typename _Alloc>
1119 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1120 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1121
1122 template<typename _Alloc, typename _U1, typename _U2, typename
1123 enable_if<_TMC::template
1124 _ConstructibleTuple<_U1, _U2>()
1125 && _TMC::template
1126 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1127 bool>::type = true>
1128 tuple(allocator_arg_t __tag, const _Alloc& __a,
1129 const tuple<_U1, _U2>& __in)
1130 : _Inherited(__tag, __a,
1131 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1132 { }
1133
1134 template<typename _Alloc, typename _U1, typename _U2, typename
1135 enable_if<_TMC::template
1136 _ConstructibleTuple<_U1, _U2>()
1137 && !_TMC::template
1138 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1139 bool>::type = false>
1140 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1141 const tuple<_U1, _U2>& __in)
1142 : _Inherited(__tag, __a,
1143 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1144 { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1154 { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 tuple<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1165 { }
1166
1167 template<typename _Alloc, typename _U1, typename _U2, typename
1168 enable_if<_TMC::template
1169 _ConstructibleTuple<_U1, _U2>()
1170 && _TMC::template
1171 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1172 bool>::type = true>
1173 tuple(allocator_arg_t __tag, const _Alloc& __a,
1174 const pair<_U1, _U2>& __in)
1175 : _Inherited(__tag, __a, __in.first, __in.second) { }
1176
1177 template<typename _Alloc, typename _U1, typename _U2, typename
1178 enable_if<_TMC::template
1179 _ConstructibleTuple<_U1, _U2>()
1180 && !_TMC::template
1181 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1182 bool>::type = false>
1183 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1184 const pair<_U1, _U2>& __in)
1185 : _Inherited(__tag, __a, __in.first, __in.second) { }
1186
1187 template<typename _Alloc, typename _U1, typename _U2, typename
1188 enable_if<_TMC::template
1189 _MoveConstructibleTuple<_U1, _U2>()
1190 && _TMC::template
1191 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1192 bool>::type = true>
1193 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1194 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1195 std::forward<_U2>(__in.second)) { }
1196
1197 template<typename _Alloc, typename _U1, typename _U2, typename
1198 enable_if<_TMC::template
1199 _MoveConstructibleTuple<_U1, _U2>()
1200 && !_TMC::template
1201 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1202 bool>::type = false>
1203 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1204 pair<_U1, _U2>&& __in)
1205 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1206 std::forward<_U2>(__in.second)) { }
1207
1208 tuple&
1209 operator=(const tuple& __in)
1210 {
1211 static_cast<_Inherited&>(*this) = __in;
1212 return *this;
1213 }
1214
1215 tuple&
1216 operator=(tuple&& __in)
1217 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1218 {
1219 static_cast<_Inherited&>(*this) = std::move(__in);
1220 return *this;
1221 }
1222
1223 template<typename _U1, typename _U2>
1224 tuple&
1225 operator=(const tuple<_U1, _U2>& __in)
1226 {
1227 static_cast<_Inherited&>(*this) = __in;
1228 return *this;
1229 }
1230
1231 template<typename _U1, typename _U2>
1232 tuple&
1233 operator=(tuple<_U1, _U2>&& __in)
1234 {
1235 static_cast<_Inherited&>(*this) = std::move(__in);
1236 return *this;
1237 }
1238
1239 template<typename _U1, typename _U2>
1240 tuple&
1241 operator=(const pair<_U1, _U2>& __in)
1242 {
1243 this->_M_head(*this) = __in.first;
1244 this->_M_tail(*this)._M_head(*this) = __in.second;
1245 return *this;
1246 }
1247
1248 template<typename _U1, typename _U2>
1249 tuple&
1250 operator=(pair<_U1, _U2>&& __in)
1251 {
1252 this->_M_head(*this) = std::forward<_U1>(__in.first);
1253 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1254 return *this;
1255 }
1256
1257 void
1258 swap(tuple& __in)
1259 noexcept(noexcept(__in._M_swap(__in)))
1260 { _Inherited::_M_swap(__in); }
1261 };
1262
1263
1264 /// class tuple_size
1265 template<typename... _Elements>
1266 struct tuple_size<tuple<_Elements...>>
1267 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1268
1269#if __cplusplus201103L > 201402L
1270 template <typename _Tp>
1271 inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
1272#endif
1273
1274 /**
1275 * Recursive case for tuple_element: strip off the first element in
1276 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1277 */
1278 template<std::size_t __i, typename _Head, typename... _Tail>
1279 struct tuple_element<__i, tuple<_Head, _Tail...> >
1280 : tuple_element<__i - 1, tuple<_Tail...> > { };
1281
1282 /**
1283 * Basis case for tuple_element: The first element is the one we're seeking.
1284 */
1285 template<typename _Head, typename... _Tail>
1286 struct tuple_element<0, tuple<_Head, _Tail...> >
1287 {
1288 typedef _Head type;
1289 };
1290
1291 /**
1292 * Error case for tuple_element: invalid index.
1293 */
1294 template<size_t __i>
1295 struct tuple_element<__i, tuple<>>
1296 {
1297 static_assert(__i < tuple_size<tuple<>>::value,
1298 "tuple index is in range");
1299 };
1300
1301 template<std::size_t __i, typename _Head, typename... _Tail>
1302 constexpr _Head&
1303 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1304 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1305
1306 template<std::size_t __i, typename _Head, typename... _Tail>
1307 constexpr const _Head&
1308 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1309 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
11
Calling '_Tuple_impl::_M_head'
14
Returning from '_Tuple_impl::_M_head'
34
Calling '_Tuple_impl::_M_head'
37
Returning from '_Tuple_impl::_M_head'
1310
1311 /// Return a reference to the ith element of a tuple.
1312 template<std::size_t __i, typename... _Elements>
1313 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1314 get(tuple<_Elements...>& __t) noexcept
1315 { return std::__get_helper<__i>(__t); }
1316
1317 /// Return a const reference to the ith element of a const tuple.
1318 template<std::size_t __i, typename... _Elements>
1319 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1320 get(const tuple<_Elements...>& __t) noexcept
1321 { return std::__get_helper<__i>(__t); }
10
Calling '__get_helper'
15
Returning from '__get_helper'
33
Calling '__get_helper'
38
Returning from '__get_helper'
1322
1323 /// Return an rvalue reference to the ith element of a tuple rvalue.
1324 template<std::size_t __i, typename... _Elements>
1325 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1326 get(tuple<_Elements...>&& __t) noexcept
1327 {
1328 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1329 return std::forward<__element_type&&>(std::get<__i>(__t));
1330 }
1331
1332#if __cplusplus201103L > 201103L
1333
1334#define __cpp_lib_tuples_by_type 201304
1335
1336 template<typename _Head, size_t __i, typename... _Tail>
1337 constexpr _Head&
1338 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1339 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1340
1341 template<typename _Head, size_t __i, typename... _Tail>
1342 constexpr const _Head&
1343 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1344 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1345
1346 /// Return a reference to the unique element of type _Tp of a tuple.
1347 template <typename _Tp, typename... _Types>
1348 constexpr _Tp&
1349 get(tuple<_Types...>& __t) noexcept
1350 { return std::__get_helper2<_Tp>(__t); }
1351
1352 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1353 template <typename _Tp, typename... _Types>
1354 constexpr _Tp&&
1355 get(tuple<_Types...>&& __t) noexcept
1356 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1357
1358 /// Return a const reference to the unique element of type _Tp of a tuple.
1359 template <typename _Tp, typename... _Types>
1360 constexpr const _Tp&
1361 get(const tuple<_Types...>& __t) noexcept
1362 { return std::__get_helper2<_Tp>(__t); }
1363#endif
1364
1365 // This class performs the comparison operations on tuples
1366 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1367 struct __tuple_compare
1368 {
1369 static constexpr bool
1370 __eq(const _Tp& __t, const _Up& __u)
1371 {
1372 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1373 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1374 }
1375
1376 static constexpr bool
1377 __less(const _Tp& __t, const _Up& __u)
1378 {
1379 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1380 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1381 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1382 }
1383 };
1384
1385 template<typename _Tp, typename _Up, size_t __size>
1386 struct __tuple_compare<_Tp, _Up, __size, __size>
1387 {
1388 static constexpr bool
1389 __eq(const _Tp&, const _Up&) { return true; }
1390
1391 static constexpr bool
1392 __less(const _Tp&, const _Up&) { return false; }
1393 };
1394
1395 template<typename... _TElements, typename... _UElements>
1396 constexpr bool
1397 operator==(const tuple<_TElements...>& __t,
1398 const tuple<_UElements...>& __u)
1399 {
1400 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1401 "tuple objects can only be compared if they have equal sizes.");
1402 using __compare = __tuple_compare<tuple<_TElements...>,
1403 tuple<_UElements...>,
1404 0, sizeof...(_TElements)>;
1405 return __compare::__eq(__t, __u);
1406 }
1407
1408 template<typename... _TElements, typename... _UElements>
1409 constexpr bool
1410 operator<(const tuple<_TElements...>& __t,
1411 const tuple<_UElements...>& __u)
1412 {
1413 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1414 "tuple objects can only be compared if they have equal sizes.");
1415 using __compare = __tuple_compare<tuple<_TElements...>,
1416 tuple<_UElements...>,
1417 0, sizeof...(_TElements)>;
1418 return __compare::__less(__t, __u);
1419 }
1420
1421 template<typename... _TElements, typename... _UElements>
1422 constexpr bool
1423 operator!=(const tuple<_TElements...>& __t,
1424 const tuple<_UElements...>& __u)
1425 { return !(__t == __u); }
1426
1427 template<typename... _TElements, typename... _UElements>
1428 constexpr bool
1429 operator>(const tuple<_TElements...>& __t,
1430 const tuple<_UElements...>& __u)
1431 { return __u < __t; }
1432
1433 template<typename... _TElements, typename... _UElements>
1434 constexpr bool
1435 operator<=(const tuple<_TElements...>& __t,
1436 const tuple<_UElements...>& __u)
1437 { return !(__u < __t); }
1438
1439 template<typename... _TElements, typename... _UElements>
1440 constexpr bool
1441 operator>=(const tuple<_TElements...>& __t,
1442 const tuple<_UElements...>& __u)
1443 { return !(__t < __u); }
1444
1445 // NB: DR 705.
1446 template<typename... _Elements>
1447 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1448 make_tuple(_Elements&&... __args)
1449 {
1450 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1451 __result_type;
1452 return __result_type(std::forward<_Elements>(__args)...);
1453 }
1454
1455 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1456 // 2275. Why is forward_as_tuple not constexpr?
1457 template<typename... _Elements>
1458 constexpr tuple<_Elements&&...>
1459 forward_as_tuple(_Elements&&... __args) noexcept
1460 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1461
1462 template<size_t, typename, typename, size_t>
1463 struct __make_tuple_impl;
1464
1465 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1466 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1467 : __make_tuple_impl<_Idx + 1,
1468 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1469 _Tuple, _Nm>
1470 { };
1471
1472 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1473 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1474 {
1475 typedef tuple<_Tp...> __type;
1476 };
1477
1478 template<typename _Tuple>
1479 struct __do_make_tuple
1480 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1481 { };
1482
1483 // Returns the std::tuple equivalent of a tuple-like type.
1484 template<typename _Tuple>
1485 struct __make_tuple
1486 : public __do_make_tuple<typename std::remove_cv
1487 <typename std::remove_reference<_Tuple>::type>::type>
1488 { };
1489
1490 // Combines several std::tuple's into a single one.
1491 template<typename...>
1492 struct __combine_tuples;
1493
1494 template<>
1495 struct _