Bug Summary

File:lib/Analysis/MemorySSA.cpp
Warning:line 1670, column 5
Called C++ object pointer is null

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp

1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the MemorySSA class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/MemorySSA.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DenseMapInfo.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/DepthFirstIterator.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/Optional.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/IteratedDominanceFrontier.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallSite.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/FormattedStream.h"
50#include "llvm/Support/raw_ostream.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <memory>
55#include <utility>
56
57using namespace llvm;
58
59#define DEBUG_TYPE"memoryssa" "memoryssa"
60
61INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
62 true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
63INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
64INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
65INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
66 true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
67
68INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
69 "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
70INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
71INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
72 "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
73
74static cl::opt<unsigned> MaxCheckLimit(
75 "memssa-check-limit", cl::Hidden, cl::init(100),
76 cl::desc("The maximum number of stores/phis MemorySSA"
77 "will consider trying to walk past (default = 100)"));
78
79static cl::opt<bool>
80 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81 cl::desc("Verify MemorySSA in legacy printer pass."));
82
83namespace llvm {
84
85/// \brief An assembly annotator class to print Memory SSA information in
86/// comments.
87class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
88 friend class MemorySSA;
89
90 const MemorySSA *MSSA;
91
92public:
93 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94
95 void emitBasicBlockStartAnnot(const BasicBlock *BB,
96 formatted_raw_ostream &OS) override {
97 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98 OS << "; " << *MA << "\n";
99 }
100
101 void emitInstructionAnnot(const Instruction *I,
102 formatted_raw_ostream &OS) override {
103 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104 OS << "; " << *MA << "\n";
105 }
106};
107
108} // end namespace llvm
109
110namespace {
111
112/// Our current alias analysis API differentiates heavily between calls and
113/// non-calls, and functions called on one usually assert on the other.
114/// This class encapsulates the distinction to simplify other code that wants
115/// "Memory affecting instructions and related data" to use as a key.
116/// For example, this class is used as a densemap key in the use optimizer.
117class MemoryLocOrCall {
118public:
119 bool IsCall = false;
120
121 MemoryLocOrCall() = default;
122 MemoryLocOrCall(MemoryUseOrDef *MUD)
123 : MemoryLocOrCall(MUD->getMemoryInst()) {}
124 MemoryLocOrCall(const MemoryUseOrDef *MUD)
125 : MemoryLocOrCall(MUD->getMemoryInst()) {}
126
127 MemoryLocOrCall(Instruction *Inst) {
128 if (ImmutableCallSite(Inst)) {
129 IsCall = true;
130 CS = ImmutableCallSite(Inst);
131 } else {
132 IsCall = false;
133 // There is no such thing as a memorylocation for a fence inst, and it is
134 // unique in that regard.
135 if (!isa<FenceInst>(Inst))
136 Loc = MemoryLocation::get(Inst);
137 }
138 }
139
140 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141
142 ImmutableCallSite getCS() const {
143 assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail
("IsCall", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144 return CS;
145 }
146
147 MemoryLocation getLoc() const {
148 assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail
("!IsCall", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 148, __extension__ __PRETTY_FUNCTION__))
;
149 return Loc;
150 }
151
152 bool operator==(const MemoryLocOrCall &Other) const {
153 if (IsCall != Other.IsCall)
154 return false;
155
156 if (IsCall)
157 return CS.getCalledValue() == Other.CS.getCalledValue();
158 return Loc == Other.Loc;
159 }
160
161private:
162 union {
163 ImmutableCallSite CS;
164 MemoryLocation Loc;
165 };
166};
167
168} // end anonymous namespace
169
170namespace llvm {
171
172template <> struct DenseMapInfo<MemoryLocOrCall> {
173 static inline MemoryLocOrCall getEmptyKey() {
174 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
175 }
176
177 static inline MemoryLocOrCall getTombstoneKey() {
178 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
179 }
180
181 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
182 if (MLOC.IsCall)
183 return hash_combine(MLOC.IsCall,
184 DenseMapInfo<const Value *>::getHashValue(
185 MLOC.getCS().getCalledValue()));
186 return hash_combine(
187 MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
188 }
189
190 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
191 return LHS == RHS;
192 }
193};
194
195enum class Reorderability { Always, IfNoAlias, Never };
196
197} // end namespace llvm
198
199/// This does one-way checks to see if Use could theoretically be hoisted above
200/// MayClobber. This will not check the other way around.
201///
202/// This assumes that, for the purposes of MemorySSA, Use comes directly after
203/// MayClobber, with no potentially clobbering operations in between them.
204/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
205static Reorderability getLoadReorderability(const LoadInst *Use,
206 const LoadInst *MayClobber) {
207 bool VolatileUse = Use->isVolatile();
208 bool VolatileClobber = MayClobber->isVolatile();
209 // Volatile operations may never be reordered with other volatile operations.
210 if (VolatileUse && VolatileClobber)
211 return Reorderability::Never;
212
213 // The lang ref allows reordering of volatile and non-volatile operations.
214 // Whether an aliasing nonvolatile load and volatile load can be reordered,
215 // though, is ambiguous. Because it may not be best to exploit this ambiguity,
216 // we only allow volatile/non-volatile reordering if the volatile and
217 // non-volatile operations don't alias.
218 Reorderability Result = VolatileUse || VolatileClobber
219 ? Reorderability::IfNoAlias
220 : Reorderability::Always;
221
222 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
223 // is weaker, it can be moved above other loads. We just need to be sure that
224 // MayClobber isn't an acquire load, because loads can't be moved above
225 // acquire loads.
226 //
227 // Note that this explicitly *does* allow the free reordering of monotonic (or
228 // weaker) loads of the same address.
229 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
230 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
231 AtomicOrdering::Acquire);
232 if (SeqCstUse || MayClobberIsAcquire)
233 return Reorderability::Never;
234 return Result;
235}
236
237static bool instructionClobbersQuery(MemoryDef *MD,
238 const MemoryLocation &UseLoc,
239 const Instruction *UseInst,
240 AliasAnalysis &AA) {
241 Instruction *DefInst = MD->getMemoryInst();
242 assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction"
) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 242, __extension__ __PRETTY_FUNCTION__))
;
243 ImmutableCallSite UseCS(UseInst);
244
245 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
246 // These intrinsics will show up as affecting memory, but they are just
247 // markers.
248 switch (II->getIntrinsicID()) {
249 case Intrinsic::lifetime_start:
250 if (UseCS)
251 return false;
252 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
253 case Intrinsic::lifetime_end:
254 case Intrinsic::invariant_start:
255 case Intrinsic::invariant_end:
256 case Intrinsic::assume:
257 return false;
258 default:
259 break;
260 }
261 }
262
263 if (UseCS) {
264 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
265 return I != MRI_NoModRef;
266 }
267
268 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
269 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
270 switch (getLoadReorderability(UseLoad, DefLoad)) {
271 case Reorderability::Always:
272 return false;
273 case Reorderability::Never:
274 return true;
275 case Reorderability::IfNoAlias:
276 return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
277 }
278 }
279 }
280
281 return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
282}
283
284static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
285 const MemoryLocOrCall &UseMLOC,
286 AliasAnalysis &AA) {
287 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
288 // to exist while MemoryLocOrCall is pushed through places.
289 if (UseMLOC.IsCall)
290 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
291 AA);
292 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
293 AA);
294}
295
296// Return true when MD may alias MU, return false otherwise.
297bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
298 AliasAnalysis &AA) {
299 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
300}
301
302namespace {
303
304struct UpwardsMemoryQuery {
305 // True if our original query started off as a call
306 bool IsCall = false;
307 // The pointer location we started the query with. This will be empty if
308 // IsCall is true.
309 MemoryLocation StartingLoc;
310 // This is the instruction we were querying about.
311 const Instruction *Inst = nullptr;
312 // The MemoryAccess we actually got called with, used to test local domination
313 const MemoryAccess *OriginalAccess = nullptr;
314
315 UpwardsMemoryQuery() = default;
316
317 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
318 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
319 if (!IsCall)
320 StartingLoc = MemoryLocation::get(Inst);
321 }
322};
323
324} // end anonymous namespace
325
326static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
327 AliasAnalysis &AA) {
328 Instruction *Inst = MD->getMemoryInst();
329 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
330 switch (II->getIntrinsicID()) {
331 case Intrinsic::lifetime_end:
332 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
333 default:
334 return false;
335 }
336 }
337 return false;
338}
339
340static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
341 const Instruction *I) {
342 // If the memory can't be changed, then loads of the memory can't be
343 // clobbered.
344 //
345 // FIXME: We should handle invariant groups, as well. It's a bit harder,
346 // because we need to pay close attention to invariant group barriers.
347 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
348 AA.pointsToConstantMemory(cast<LoadInst>(I)->
349 getPointerOperand()));
350}
351
352/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
353/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
354///
355/// This is meant to be as simple and self-contained as possible. Because it
356/// uses no cache, etc., it can be relatively expensive.
357///
358/// \param Start The MemoryAccess that we want to walk from.
359/// \param ClobberAt A clobber for Start.
360/// \param StartLoc The MemoryLocation for Start.
361/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
362/// \param Query The UpwardsMemoryQuery we used for our search.
363/// \param AA The AliasAnalysis we used for our search.
364static void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__))
365checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
366 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
367 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
368 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?") ? void (0) : __assert_fail
("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 368, __extension__ __PRETTY_FUNCTION__))
;
369
370 if (MSSA.isLiveOnEntryDef(Start)) {
371 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 372, __extension__ __PRETTY_FUNCTION__))
372 "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 372, __extension__ __PRETTY_FUNCTION__))
;
373 return;
374 }
375
376 bool FoundClobber = false;
377 DenseSet<MemoryAccessPair> VisitedPhis;
378 SmallVector<MemoryAccessPair, 8> Worklist;
379 Worklist.emplace_back(Start, StartLoc);
380 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
381 // is found, complain.
382 while (!Worklist.empty()) {
383 MemoryAccessPair MAP = Worklist.pop_back_val();
384 // All we care about is that nothing from Start to ClobberAt clobbers Start.
385 // We learn nothing from revisiting nodes.
386 if (!VisitedPhis.insert(MAP).second)
387 continue;
388
389 for (MemoryAccess *MA : def_chain(MAP.first)) {
390 if (MA == ClobberAt) {
391 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
392 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
393 // since it won't let us short-circuit.
394 //
395 // Also, note that this can't be hoisted out of the `Worklist` loop,
396 // since MD may only act as a clobber for 1 of N MemoryLocations.
397 FoundClobber =
398 FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
399 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
400 }
401 break;
402 }
403
404 // We should never hit liveOnEntry, unless it's the clobber.
405 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) &&
"Hit liveOnEntry before clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 405, __extension__ __PRETTY_FUNCTION__))
;
406
407 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
408 (void)MD;
409 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 410, __extension__ __PRETTY_FUNCTION__))
410 "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 410, __extension__ __PRETTY_FUNCTION__))
;
411 continue;
412 }
413
414 assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void (
0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 414, __extension__ __PRETTY_FUNCTION__))
;
415 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
416 }
417 }
418
419 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
420 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
421 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 422, __extension__ __PRETTY_FUNCTION__))
422 "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 422, __extension__ __PRETTY_FUNCTION__))
;
423}
424
425namespace {
426
427/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
428/// in one class.
429class ClobberWalker {
430 /// Save a few bytes by using unsigned instead of size_t.
431 using ListIndex = unsigned;
432
433 /// Represents a span of contiguous MemoryDefs, potentially ending in a
434 /// MemoryPhi.
435 struct DefPath {
436 MemoryLocation Loc;
437 // Note that, because we always walk in reverse, Last will always dominate
438 // First. Also note that First and Last are inclusive.
439 MemoryAccess *First;
440 MemoryAccess *Last;
441 Optional<ListIndex> Previous;
442
443 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
444 Optional<ListIndex> Previous)
445 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
446
447 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
448 Optional<ListIndex> Previous)
449 : DefPath(Loc, Init, Init, Previous) {}
450 };
451
452 const MemorySSA &MSSA;
453 AliasAnalysis &AA;
454 DominatorTree &DT;
455 UpwardsMemoryQuery *Query;
456
457 // Phi optimization bookkeeping
458 SmallVector<DefPath, 32> Paths;
459 DenseSet<ConstMemoryAccessPair> VisitedPhis;
460
461 /// Find the nearest def or phi that `From` can legally be optimized to.
462 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
463 assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() &&
"Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 463, __extension__ __PRETTY_FUNCTION__))
;
464
465 BasicBlock *BB = From->getBlock();
466 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
467 DomTreeNode *Node = DT.getNode(BB);
468 while ((Node = Node->getIDom())) {
469 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
470 if (Defs)
471 return &*Defs->rbegin();
472 }
473 return Result;
474 }
475
476 /// Result of calling walkToPhiOrClobber.
477 struct UpwardsWalkResult {
478 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
479 /// both.
480 MemoryAccess *Result;
481 bool IsKnownClobber;
482 };
483
484 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
485 /// This will update Desc.Last as it walks. It will (optionally) also stop at
486 /// StopAt.
487 ///
488 /// This does not test for whether StopAt is a clobber
489 UpwardsWalkResult
490 walkToPhiOrClobber(DefPath &Desc,
491 const MemoryAccess *StopAt = nullptr) const {
492 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) &&
"Uses don't exist in my world") ? void (0) : __assert_fail (
"!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 492, __extension__ __PRETTY_FUNCTION__))
;
493
494 for (MemoryAccess *Current : def_chain(Desc.Last)) {
495 Desc.Last = Current;
496 if (Current == StopAt)
497 return {Current, false};
498
499 if (auto *MD = dyn_cast<MemoryDef>(Current))
500 if (MSSA.isLiveOnEntryDef(MD) ||
501 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
502 return {MD, true};
503 }
504
505 assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 506, __extension__ __PRETTY_FUNCTION__))
506 "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 506, __extension__ __PRETTY_FUNCTION__))
;
507 return {Desc.Last, false};
508 }
509
510 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
511 ListIndex PriorNode) {
512 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
513 upward_defs_end());
514 for (const MemoryAccessPair &P : UpwardDefs) {
515 PausedSearches.push_back(Paths.size());
516 Paths.emplace_back(P.second, P.first, PriorNode);
517 }
518 }
519
520 /// Represents a search that terminated after finding a clobber. This clobber
521 /// may or may not be present in the path of defs from LastNode..SearchStart,
522 /// since it may have been retrieved from cache.
523 struct TerminatedPath {
524 MemoryAccess *Clobber;
525 ListIndex LastNode;
526 };
527
528 /// Get an access that keeps us from optimizing to the given phi.
529 ///
530 /// PausedSearches is an array of indices into the Paths array. Its incoming
531 /// value is the indices of searches that stopped at the last phi optimization
532 /// target. It's left in an unspecified state.
533 ///
534 /// If this returns None, NewPaused is a vector of searches that terminated
535 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
536 Optional<TerminatedPath>
537 getBlockingAccess(const MemoryAccess *StopWhere,
538 SmallVectorImpl<ListIndex> &PausedSearches,
539 SmallVectorImpl<ListIndex> &NewPaused,
540 SmallVectorImpl<TerminatedPath> &Terminated) {
541 assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() &&
"No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 541, __extension__ __PRETTY_FUNCTION__))
;
542
543 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
544 // PausedSearches as our stack.
545 while (!PausedSearches.empty()) {
546 ListIndex PathIndex = PausedSearches.pop_back_val();
547 DefPath &Node = Paths[PathIndex];
548
549 // If we've already visited this path with this MemoryLocation, we don't
550 // need to do so again.
551 //
552 // NOTE: That we just drop these paths on the ground makes caching
553 // behavior sporadic. e.g. given a diamond:
554 // A
555 // B C
556 // D
557 //
558 // ...If we walk D, B, A, C, we'll only cache the result of phi
559 // optimization for A, B, and D; C will be skipped because it dies here.
560 // This arguably isn't the worst thing ever, since:
561 // - We generally query things in a top-down order, so if we got below D
562 // without needing cache entries for {C, MemLoc}, then chances are
563 // that those cache entries would end up ultimately unused.
564 // - We still cache things for A, so C only needs to walk up a bit.
565 // If this behavior becomes problematic, we can fix without a ton of extra
566 // work.
567 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
568 continue;
569
570 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
571 if (Res.IsKnownClobber) {
572 assert(Res.Result != StopWhere)(static_cast <bool> (Res.Result != StopWhere) ? void (0
) : __assert_fail ("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 572, __extension__ __PRETTY_FUNCTION__))
;
573 // If this wasn't a cache hit, we hit a clobber when walking. That's a
574 // failure.
575 TerminatedPath Term{Res.Result, PathIndex};
576 if (!MSSA.dominates(Res.Result, StopWhere))
577 return Term;
578
579 // Otherwise, it's a valid thing to potentially optimize to.
580 Terminated.push_back(Term);
581 continue;
582 }
583
584 if (Res.Result == StopWhere) {
585 // We've hit our target. Save this path off for if we want to continue
586 // walking.
587 NewPaused.push_back(PathIndex);
588 continue;
589 }
590
591 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result)
&& "liveOnEntry is a clobber") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 591, __extension__ __PRETTY_FUNCTION__))
;
592 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
593 }
594
595 return None;
596 }
597
598 template <typename T, typename Walker>
599 struct generic_def_path_iterator
600 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
601 std::forward_iterator_tag, T *> {
602 generic_def_path_iterator() = default;
603 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
604
605 T &operator*() const { return curNode(); }
606
607 generic_def_path_iterator &operator++() {
608 N = curNode().Previous;
609 return *this;
610 }
611
612 bool operator==(const generic_def_path_iterator &O) const {
613 if (N.hasValue() != O.N.hasValue())
614 return false;
615 return !N.hasValue() || *N == *O.N;
616 }
617
618 private:
619 T &curNode() const { return W->Paths[*N]; }
620
621 Walker *W = nullptr;
622 Optional<ListIndex> N = None;
623 };
624
625 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
626 using const_def_path_iterator =
627 generic_def_path_iterator<const DefPath, const ClobberWalker>;
628
629 iterator_range<def_path_iterator> def_path(ListIndex From) {
630 return make_range(def_path_iterator(this, From), def_path_iterator());
631 }
632
633 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
634 return make_range(const_def_path_iterator(this, From),
635 const_def_path_iterator());
636 }
637
638 struct OptznResult {
639 /// The path that contains our result.
640 TerminatedPath PrimaryClobber;
641 /// The paths that we can legally cache back from, but that aren't
642 /// necessarily the result of the Phi optimization.
643 SmallVector<TerminatedPath, 4> OtherClobbers;
644 };
645
646 ListIndex defPathIndex(const DefPath &N) const {
647 // The assert looks nicer if we don't need to do &N
648 const DefPath *NP = &N;
649 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 650, __extension__ __PRETTY_FUNCTION__))
650 "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 650, __extension__ __PRETTY_FUNCTION__))
;
651 return NP - &Paths.front();
652 }
653
654 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
655 /// that act as legal clobbers. Note that this won't return *all* clobbers.
656 ///
657 /// Phi optimization algorithm tl;dr:
658 /// - Find the earliest def/phi, A, we can optimize to
659 /// - Find if all paths from the starting memory access ultimately reach A
660 /// - If not, optimization isn't possible.
661 /// - Otherwise, walk from A to another clobber or phi, A'.
662 /// - If A' is a def, we're done.
663 /// - If A' is a phi, try to optimize it.
664 ///
665 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
666 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
667 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
668 const MemoryLocation &Loc) {
669 assert(Paths.empty() && VisitedPhis.empty() &&(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 670, __extension__ __PRETTY_FUNCTION__))
670 "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 670, __extension__ __PRETTY_FUNCTION__))
;
671
672 Paths.emplace_back(Loc, Start, Phi, None);
673 // Stores how many "valid" optimization nodes we had prior to calling
674 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
675 auto PriorPathsSize = Paths.size();
676
677 SmallVector<ListIndex, 16> PausedSearches;
678 SmallVector<ListIndex, 8> NewPaused;
679 SmallVector<TerminatedPath, 4> TerminatedPaths;
680
681 addSearches(Phi, PausedSearches, 0);
682
683 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
684 // Paths.
685 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
686 assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move"
) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 686, __extension__ __PRETTY_FUNCTION__))
;
687 auto Dom = Paths.begin();
688 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
689 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
690 Dom = I;
691 auto Last = Paths.end() - 1;
692 if (Last != Dom)
693 std::iter_swap(Last, Dom);
694 };
695
696 MemoryPhi *Current = Phi;
697 while (true) {
698 assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 699, __extension__ __PRETTY_FUNCTION__))
699 "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 699, __extension__ __PRETTY_FUNCTION__))
;
700
701 const auto *Target = getWalkTarget(Current);
702 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
703 // optimization for the prior phi.
704 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 706, __extension__ __PRETTY_FUNCTION__))
705 return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 706, __extension__ __PRETTY_FUNCTION__))
706 }))(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 706, __extension__ __PRETTY_FUNCTION__))
;
707
708 // FIXME: This is broken, because the Blocker may be reported to be
709 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
710 // For the moment, this is fine, since we do nothing with blocker info.
711 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
712 Target, PausedSearches, NewPaused, TerminatedPaths)) {
713
714 // Find the node we started at. We can't search based on N->Last, since
715 // we may have gone around a loop with a different MemoryLocation.
716 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
717 return defPathIndex(N) < PriorPathsSize;
718 });
719 assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void
(0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 719, __extension__ __PRETTY_FUNCTION__))
;
720
721 DefPath &CurNode = *Iter;
722 assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0
) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 722, __extension__ __PRETTY_FUNCTION__))
;
723
724 // Two things:
725 // A. We can't reliably cache all of NewPaused back. Consider a case
726 // where we have two paths in NewPaused; one of which can't optimize
727 // above this phi, whereas the other can. If we cache the second path
728 // back, we'll end up with suboptimal cache entries. We can handle
729 // cases like this a bit better when we either try to find all
730 // clobbers that block phi optimization, or when our cache starts
731 // supporting unfinished searches.
732 // B. We can't reliably cache TerminatedPaths back here without doing
733 // extra checks; consider a case like:
734 // T
735 // / \
736 // D C
737 // \ /
738 // S
739 // Where T is our target, C is a node with a clobber on it, D is a
740 // diamond (with a clobber *only* on the left or right node, N), and
741 // S is our start. Say we walk to D, through the node opposite N
742 // (read: ignoring the clobber), and see a cache entry in the top
743 // node of D. That cache entry gets put into TerminatedPaths. We then
744 // walk up to C (N is later in our worklist), find the clobber, and
745 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
746 // the bottom part of D to the cached clobber, ignoring the clobber
747 // in N. Again, this problem goes away if we start tracking all
748 // blockers for a given phi optimization.
749 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
750 return {Result, {}};
751 }
752
753 // If there's nothing left to search, then all paths led to valid clobbers
754 // that we got from our cache; pick the nearest to the start, and allow
755 // the rest to be cached back.
756 if (NewPaused.empty()) {
757 MoveDominatedPathToEnd(TerminatedPaths);
758 TerminatedPath Result = TerminatedPaths.pop_back_val();
759 return {Result, std::move(TerminatedPaths)};
760 }
761
762 MemoryAccess *DefChainEnd = nullptr;
763 SmallVector<TerminatedPath, 4> Clobbers;
764 for (ListIndex Paused : NewPaused) {
765 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
766 if (WR.IsKnownClobber)
767 Clobbers.push_back({WR.Result, Paused});
768 else
769 // Micro-opt: If we hit the end of the chain, save it.
770 DefChainEnd = WR.Result;
771 }
772
773 if (!TerminatedPaths.empty()) {
774 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
775 // do it now.
776 if (!DefChainEnd)
777 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
778 DefChainEnd = MA;
779
780 // If any of the terminated paths don't dominate the phi we'll try to
781 // optimize, we need to figure out what they are and quit.
782 const BasicBlock *ChainBB = DefChainEnd->getBlock();
783 for (const TerminatedPath &TP : TerminatedPaths) {
784 // Because we know that DefChainEnd is as "high" as we can go, we
785 // don't need local dominance checks; BB dominance is sufficient.
786 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
787 Clobbers.push_back(TP);
788 }
789 }
790
791 // If we have clobbers in the def chain, find the one closest to Current
792 // and quit.
793 if (!Clobbers.empty()) {
794 MoveDominatedPathToEnd(Clobbers);
795 TerminatedPath Result = Clobbers.pop_back_val();
796 return {Result, std::move(Clobbers)};
797 }
798
799 assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 800, __extension__ __PRETTY_FUNCTION__))
800 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 800, __extension__ __PRETTY_FUNCTION__))
;
801
802 // Because liveOnEntry is a clobber, this must be a phi.
803 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
804
805 PriorPathsSize = Paths.size();
806 PausedSearches.clear();
807 for (ListIndex I : NewPaused)
808 addSearches(DefChainPhi, PausedSearches, I);
809 NewPaused.clear();
810
811 Current = DefChainPhi;
812 }
813 }
814
815 void verifyOptResult(const OptznResult &R) const {
816 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 818, __extension__ __PRETTY_FUNCTION__))
817 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 818, __extension__ __PRETTY_FUNCTION__))
818 }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 818, __extension__ __PRETTY_FUNCTION__))
;
819 }
820
821 void resetPhiOptznState() {
822 Paths.clear();
823 VisitedPhis.clear();
824 }
825
826public:
827 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
828 : MSSA(MSSA), AA(AA), DT(DT) {}
829
830 void reset() {}
831
832 /// Finds the nearest clobber for the given query, optimizing phis if
833 /// possible.
834 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
835 Query = &Q;
836
837 MemoryAccess *Current = Start;
838 // This walker pretends uses don't exist. If we're handed one, silently grab
839 // its def. (This has the nice side-effect of ensuring we never cache uses)
840 if (auto *MU = dyn_cast<MemoryUse>(Start))
841 Current = MU->getDefiningAccess();
842
843 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
844 // Fast path for the overly-common case (no crazy phi optimization
845 // necessary)
846 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
847 MemoryAccess *Result;
848 if (WalkResult.IsKnownClobber) {
849 Result = WalkResult.Result;
850 } else {
851 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
852 Current, Q.StartingLoc);
853 verifyOptResult(OptRes);
854 resetPhiOptznState();
855 Result = OptRes.PrimaryClobber.Clobber;
856 }
857
858#ifdef EXPENSIVE_CHECKS
859 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
860#endif
861 return Result;
862 }
863
864 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)(static_cast <bool> (MSSA == &this->MSSA) ? void
(0) : __assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 864, __extension__ __PRETTY_FUNCTION__))
; }
865};
866
867struct RenamePassData {
868 DomTreeNode *DTN;
869 DomTreeNode::const_iterator ChildIt;
870 MemoryAccess *IncomingVal;
871
872 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
873 MemoryAccess *M)
874 : DTN(D), ChildIt(It), IncomingVal(M) {}
875
876 void swap(RenamePassData &RHS) {
877 std::swap(DTN, RHS.DTN);
878 std::swap(ChildIt, RHS.ChildIt);
879 std::swap(IncomingVal, RHS.IncomingVal);
880 }
881};
882
883} // end anonymous namespace
884
885namespace llvm {
886
887/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
888/// longer does caching on its own,
889/// but the name has been retained for the moment.
890class MemorySSA::CachingWalker final : public MemorySSAWalker {
891 ClobberWalker Walker;
892 bool AutoResetWalker = true;
893
894 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
895 void verifyRemoved(MemoryAccess *);
896
897public:
898 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
899 ~CachingWalker() override = default;
900
901 using MemorySSAWalker::getClobberingMemoryAccess;
902
903 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
904 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
905 const MemoryLocation &) override;
906 void invalidateInfo(MemoryAccess *) override;
907
908 /// Whether we call resetClobberWalker() after each time we *actually* walk to
909 /// answer a clobber query.
910 void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
911
912 /// Drop the walker's persistent data structures.
913 void resetClobberWalker() { Walker.reset(); }
914
915 void verify(const MemorySSA *MSSA) override {
916 MemorySSAWalker::verify(MSSA);
917 Walker.verify(MSSA);
918 }
919};
920
921} // end namespace llvm
922
923void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
924 bool RenameAllUses) {
925 // Pass through values to our successors
926 for (const BasicBlock *S : successors(BB)) {
927 auto It = PerBlockAccesses.find(S);
928 // Rename the phi nodes in our successor block
929 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
930 continue;
931 AccessList *Accesses = It->second.get();
932 auto *Phi = cast<MemoryPhi>(&Accesses->front());
933 if (RenameAllUses) {
934 int PhiIndex = Phi->getBasicBlockIndex(BB);
935 assert(PhiIndex != -1 && "Incomplete phi during partial rename")(static_cast <bool> (PhiIndex != -1 && "Incomplete phi during partial rename"
) ? void (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 935, __extension__ __PRETTY_FUNCTION__))
;
936 Phi->setIncomingValue(PhiIndex, IncomingVal);
937 } else
938 Phi->addIncoming(IncomingVal, BB);
939 }
940}
941
942/// \brief Rename a single basic block into MemorySSA form.
943/// Uses the standard SSA renaming algorithm.
944/// \returns The new incoming value.
945MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
946 bool RenameAllUses) {
947 auto It = PerBlockAccesses.find(BB);
948 // Skip most processing if the list is empty.
949 if (It != PerBlockAccesses.end()) {
950 AccessList *Accesses = It->second.get();
951 for (MemoryAccess &L : *Accesses) {
952 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
953 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
954 MUD->setDefiningAccess(IncomingVal);
955 if (isa<MemoryDef>(&L))
956 IncomingVal = &L;
957 } else {
958 IncomingVal = &L;
959 }
960 }
961 }
962 return IncomingVal;
963}
964
965/// \brief This is the standard SSA renaming algorithm.
966///
967/// We walk the dominator tree in preorder, renaming accesses, and then filling
968/// in phi nodes in our successors.
969void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
970 SmallPtrSetImpl<BasicBlock *> &Visited,
971 bool SkipVisited, bool RenameAllUses) {
972 SmallVector<RenamePassData, 32> WorkStack;
973 // Skip everything if we already renamed this block and we are skipping.
974 // Note: You can't sink this into the if, because we need it to occur
975 // regardless of whether we skip blocks or not.
976 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
977 if (SkipVisited && AlreadyVisited)
978 return;
979
980 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
981 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
982 WorkStack.push_back({Root, Root->begin(), IncomingVal});
983
984 while (!WorkStack.empty()) {
985 DomTreeNode *Node = WorkStack.back().DTN;
986 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
987 IncomingVal = WorkStack.back().IncomingVal;
988
989 if (ChildIt == Node->end()) {
990 WorkStack.pop_back();
991 } else {
992 DomTreeNode *Child = *ChildIt;
993 ++WorkStack.back().ChildIt;
994 BasicBlock *BB = Child->getBlock();
995 // Note: You can't sink this into the if, because we need it to occur
996 // regardless of whether we skip blocks or not.
997 AlreadyVisited = !Visited.insert(BB).second;
998 if (SkipVisited && AlreadyVisited) {
999 // We already visited this during our renaming, which can happen when
1000 // being asked to rename multiple blocks. Figure out the incoming val,
1001 // which is the last def.
1002 // Incoming value can only change if there is a block def, and in that
1003 // case, it's the last block def in the list.
1004 if (auto *BlockDefs = getWritableBlockDefs(BB))
1005 IncomingVal = &*BlockDefs->rbegin();
1006 } else
1007 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1008 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1009 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1010 }
1011 }
1012}
1013
1014/// \brief This handles unreachable block accesses by deleting phi nodes in
1015/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1016/// being uses of the live on entry definition.
1017void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1018 assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1019, __extension__ __PRETTY_FUNCTION__))
1019 "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1019, __extension__ __PRETTY_FUNCTION__))
;
1020
1021 // Make sure phi nodes in our reachable successors end up with a
1022 // LiveOnEntryDef for our incoming edge, even though our block is forward
1023 // unreachable. We could just disconnect these blocks from the CFG fully,
1024 // but we do not right now.
1025 for (const BasicBlock *S : successors(BB)) {
1026 if (!DT->isReachableFromEntry(S))
1027 continue;
1028 auto It = PerBlockAccesses.find(S);
1029 // Rename the phi nodes in our successor block
1030 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1031 continue;
1032 AccessList *Accesses = It->second.get();
1033 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1034 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1035 }
1036
1037 auto It = PerBlockAccesses.find(BB);
1038 if (It == PerBlockAccesses.end())
1039 return;
1040
1041 auto &Accesses = It->second;
1042 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1043 auto Next = std::next(AI);
1044 // If we have a phi, just remove it. We are going to replace all
1045 // users with live on entry.
1046 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1047 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1048 else
1049 Accesses->erase(AI);
1050 AI = Next;
1051 }
1052}
1053
1054MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1055 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1056 NextID(INVALID_MEMORYACCESS_ID) {
1057 buildMemorySSA();
1058}
1059
1060MemorySSA::~MemorySSA() {
1061 // Drop all our references
1062 for (const auto &Pair : PerBlockAccesses)
1063 for (MemoryAccess &MA : *Pair.second)
1064 MA.dropAllReferences();
1065}
1066
1067MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1068 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1069
1070 if (Res.second)
1071 Res.first->second = llvm::make_unique<AccessList>();
1072 return Res.first->second.get();
1073}
1074
1075MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1076 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1077
1078 if (Res.second)
1079 Res.first->second = llvm::make_unique<DefsList>();
1080 return Res.first->second.get();
1081}
1082
1083namespace llvm {
1084
1085/// This class is a batch walker of all MemoryUse's in the program, and points
1086/// their defining access at the thing that actually clobbers them. Because it
1087/// is a batch walker that touches everything, it does not operate like the
1088/// other walkers. This walker is basically performing a top-down SSA renaming
1089/// pass, where the version stack is used as the cache. This enables it to be
1090/// significantly more time and memory efficient than using the regular walker,
1091/// which is walking bottom-up.
1092class MemorySSA::OptimizeUses {
1093public:
1094 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1095 DominatorTree *DT)
1096 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1097 Walker = MSSA->getWalker();
1098 }
1099
1100 void optimizeUses();
1101
1102private:
1103 /// This represents where a given memorylocation is in the stack.
1104 struct MemlocStackInfo {
1105 // This essentially is keeping track of versions of the stack. Whenever
1106 // the stack changes due to pushes or pops, these versions increase.
1107 unsigned long StackEpoch;
1108 unsigned long PopEpoch;
1109 // This is the lower bound of places on the stack to check. It is equal to
1110 // the place the last stack walk ended.
1111 // Note: Correctness depends on this being initialized to 0, which densemap
1112 // does
1113 unsigned long LowerBound;
1114 const BasicBlock *LowerBoundBlock;
1115 // This is where the last walk for this memory location ended.
1116 unsigned long LastKill;
1117 bool LastKillValid;
1118 };
1119
1120 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1121 SmallVectorImpl<MemoryAccess *> &,
1122 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1123
1124 MemorySSA *MSSA;
1125 MemorySSAWalker *Walker;
1126 AliasAnalysis *AA;
1127 DominatorTree *DT;
1128};
1129
1130} // end namespace llvm
1131
1132/// Optimize the uses in a given block This is basically the SSA renaming
1133/// algorithm, with one caveat: We are able to use a single stack for all
1134/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1135/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1136/// going to be some position in that stack of possible ones.
1137///
1138/// We track the stack positions that each MemoryLocation needs
1139/// to check, and last ended at. This is because we only want to check the
1140/// things that changed since last time. The same MemoryLocation should
1141/// get clobbered by the same store (getModRefInfo does not use invariantness or
1142/// things like this, and if they start, we can modify MemoryLocOrCall to
1143/// include relevant data)
1144void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1145 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1146 SmallVectorImpl<MemoryAccess *> &VersionStack,
1147 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1148
1149 /// If no accesses, nothing to do.
1150 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1151 if (Accesses == nullptr)
1152 return;
1153
1154 // Pop everything that doesn't dominate the current block off the stack,
1155 // increment the PopEpoch to account for this.
1156 while (true) {
1157 assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1159, __extension__ __PRETTY_FUNCTION__))
1158 !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1159, __extension__ __PRETTY_FUNCTION__))
1159 "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1159, __extension__ __PRETTY_FUNCTION__))
;
1160 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1161 if (DT->dominates(BackBlock, BB))
1162 break;
1163 while (VersionStack.back()->getBlock() == BackBlock)
1164 VersionStack.pop_back();
1165 ++PopEpoch;
1166 }
1167
1168 for (MemoryAccess &MA : *Accesses) {
1169 auto *MU = dyn_cast<MemoryUse>(&MA);
1170 if (!MU) {
1171 VersionStack.push_back(&MA);
1172 ++StackEpoch;
1173 continue;
1174 }
1175
1176 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1177 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1178 continue;
1179 }
1180
1181 MemoryLocOrCall UseMLOC(MU);
1182 auto &LocInfo = LocStackInfo[UseMLOC];
1183 // If the pop epoch changed, it means we've removed stuff from top of
1184 // stack due to changing blocks. We may have to reset the lower bound or
1185 // last kill info.
1186 if (LocInfo.PopEpoch != PopEpoch) {
1187 LocInfo.PopEpoch = PopEpoch;
1188 LocInfo.StackEpoch = StackEpoch;
1189 // If the lower bound was in something that no longer dominates us, we
1190 // have to reset it.
1191 // We can't simply track stack size, because the stack may have had
1192 // pushes/pops in the meantime.
1193 // XXX: This is non-optimal, but only is slower cases with heavily
1194 // branching dominator trees. To get the optimal number of queries would
1195 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1196 // the top of that stack dominates us. This does not seem worth it ATM.
1197 // A much cheaper optimization would be to always explore the deepest
1198 // branch of the dominator tree first. This will guarantee this resets on
1199 // the smallest set of blocks.
1200 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1201 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1202 // Reset the lower bound of things to check.
1203 // TODO: Some day we should be able to reset to last kill, rather than
1204 // 0.
1205 LocInfo.LowerBound = 0;
1206 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1207 LocInfo.LastKillValid = false;
1208 }
1209 } else if (LocInfo.StackEpoch != StackEpoch) {
1210 // If all that has changed is the StackEpoch, we only have to check the
1211 // new things on the stack, because we've checked everything before. In
1212 // this case, the lower bound of things to check remains the same.
1213 LocInfo.PopEpoch = PopEpoch;
1214 LocInfo.StackEpoch = StackEpoch;
1215 }
1216 if (!LocInfo.LastKillValid) {
1217 LocInfo.LastKill = VersionStack.size() - 1;
1218 LocInfo.LastKillValid = true;
1219 }
1220
1221 // At this point, we should have corrected last kill and LowerBound to be
1222 // in bounds.
1223 assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1224, __extension__ __PRETTY_FUNCTION__))
1224 "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1224, __extension__ __PRETTY_FUNCTION__))
;
1225 assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1226, __extension__ __PRETTY_FUNCTION__))
1226 "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1226, __extension__ __PRETTY_FUNCTION__))
;
1227 // In any case, the new upper bound is the top of the stack.
1228 unsigned long UpperBound = VersionStack.size() - 1;
1229
1230 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1231 DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1232 << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1233 << " because there are " << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1234 << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
;
1235 // Because we did not walk, LastKill is no longer valid, as this may
1236 // have been a kill.
1237 LocInfo.LastKillValid = false;
1238 continue;
1239 }
1240 bool FoundClobberResult = false;
1241 while (UpperBound > LocInfo.LowerBound) {
1242 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1243 // For phis, use the walker, see where we ended up, go there
1244 Instruction *UseInst = MU->getMemoryInst();
1245 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1246 // We are guaranteed to find it or something is wrong
1247 while (VersionStack[UpperBound] != Result) {
1248 assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail
("UpperBound != 0", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1248, __extension__ __PRETTY_FUNCTION__))
;
1249 --UpperBound;
1250 }
1251 FoundClobberResult = true;
1252 break;
1253 }
1254
1255 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1256 // If the lifetime of the pointer ends at this instruction, it's live on
1257 // entry.
1258 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1259 // Reset UpperBound to liveOnEntryDef's place in the stack
1260 UpperBound = 0;
1261 FoundClobberResult = true;
1262 break;
1263 }
1264 if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1265 FoundClobberResult = true;
1266 break;
1267 }
1268 --UpperBound;
1269 }
1270 // At the end of this loop, UpperBound is either a clobber, or lower bound
1271 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1272 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1273 MU->setDefiningAccess(VersionStack[UpperBound], true);
1274 // We were last killed now by where we got to
1275 LocInfo.LastKill = UpperBound;
1276 } else {
1277 // Otherwise, we checked all the new ones, and now we know we can get to
1278 // LastKill.
1279 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1280 }
1281 LocInfo.LowerBound = VersionStack.size() - 1;
1282 LocInfo.LowerBoundBlock = BB;
1283 }
1284}
1285
1286/// Optimize uses to point to their actual clobbering definitions.
1287void MemorySSA::OptimizeUses::optimizeUses() {
1288 SmallVector<MemoryAccess *, 16> VersionStack;
1289 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1290 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1291
1292 unsigned long StackEpoch = 1;
1293 unsigned long PopEpoch = 1;
1294 // We perform a non-recursive top-down dominator tree walk.
1295 for (const auto *DomNode : depth_first(DT->getRootNode()))
1296 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1297 LocStackInfo);
1298}
1299
1300void MemorySSA::placePHINodes(
1301 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1302 const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1303 // Determine where our MemoryPhi's should go
1304 ForwardIDFCalculator IDFs(*DT);
1305 IDFs.setDefiningBlocks(DefiningBlocks);
1306 SmallVector<BasicBlock *, 32> IDFBlocks;
1307 IDFs.calculate(IDFBlocks);
1308
1309 std::sort(IDFBlocks.begin(), IDFBlocks.end(),
1310 [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1311 return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1312 });
1313
1314 // Now place MemoryPhi nodes.
1315 for (auto &BB : IDFBlocks)
1316 createMemoryPhi(BB);
1317}
1318
1319void MemorySSA::buildMemorySSA() {
1320 // We create an access to represent "live on entry", for things like
1321 // arguments or users of globals, where the memory they use is defined before
1322 // the beginning of the function. We do not actually insert it into the IR.
1323 // We do not define a live on exit for the immediate uses, and thus our
1324 // semantics do *not* imply that something with no immediate uses can simply
1325 // be removed.
1326 BasicBlock &StartingPoint = F.getEntryBlock();
1327 LiveOnEntryDef =
1328 llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
1329 &StartingPoint, NextID++);
1330 DenseMap<const BasicBlock *, unsigned int> BBNumbers;
1331 unsigned NextBBNum = 0;
1332
1333 // We maintain lists of memory accesses per-block, trading memory for time. We
1334 // could just look up the memory access for every possible instruction in the
1335 // stream.
1336 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1337 // Go through each block, figure out where defs occur, and chain together all
1338 // the accesses.
1339 for (BasicBlock &B : F) {
1340 BBNumbers[&B] = NextBBNum++;
1341 bool InsertIntoDef = false;
1342 AccessList *Accesses = nullptr;
1343 DefsList *Defs = nullptr;
1344 for (Instruction &I : B) {
1345 MemoryUseOrDef *MUD = createNewAccess(&I);
1346 if (!MUD)
1347 continue;
1348
1349 if (!Accesses)
1350 Accesses = getOrCreateAccessList(&B);
1351 Accesses->push_back(MUD);
1352 if (isa<MemoryDef>(MUD)) {
1353 InsertIntoDef = true;
1354 if (!Defs)
1355 Defs = getOrCreateDefsList(&B);
1356 Defs->push_back(*MUD);
1357 }
1358 }
1359 if (InsertIntoDef)
1360 DefiningBlocks.insert(&B);
1361 }
1362 placePHINodes(DefiningBlocks, BBNumbers);
1363
1364 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1365 // filled in with all blocks.
1366 SmallPtrSet<BasicBlock *, 16> Visited;
1367 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1368
1369 CachingWalker *Walker = getWalkerImpl();
1370
1371 // We're doing a batch of updates; don't drop useful caches between them.
1372 Walker->setAutoResetWalker(false);
1373 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1374 Walker->setAutoResetWalker(true);
1375 Walker->resetClobberWalker();
1376
1377 // Mark the uses in unreachable blocks as live on entry, so that they go
1378 // somewhere.
1379 for (auto &BB : F)
1380 if (!Visited.count(&BB))
1381 markUnreachableAsLiveOnEntry(&BB);
1382}
1383
1384MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1385
1386MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1387 if (Walker)
1388 return Walker.get();
1389
1390 Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1391 return Walker.get();
1392}
1393
1394// This is a helper function used by the creation routines. It places NewAccess
1395// into the access and defs lists for a given basic block, at the given
1396// insertion point.
1397void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1398 const BasicBlock *BB,
1399 InsertionPlace Point) {
1400 auto *Accesses = getOrCreateAccessList(BB);
1401 if (Point == Beginning) {
1402 // If it's a phi node, it goes first, otherwise, it goes after any phi
1403 // nodes.
1404 if (isa<MemoryPhi>(NewAccess)) {
1405 Accesses->push_front(NewAccess);
1406 auto *Defs = getOrCreateDefsList(BB);
1407 Defs->push_front(*NewAccess);
1408 } else {
1409 auto AI = find_if_not(
1410 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1411 Accesses->insert(AI, NewAccess);
1412 if (!isa<MemoryUse>(NewAccess)) {
1413 auto *Defs = getOrCreateDefsList(BB);
1414 auto DI = find_if_not(
1415 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1416 Defs->insert(DI, *NewAccess);
1417 }
1418 }
1419 } else {
1420 Accesses->push_back(NewAccess);
1421 if (!isa<MemoryUse>(NewAccess)) {
1422 auto *Defs = getOrCreateDefsList(BB);
1423 Defs->push_back(*NewAccess);
1424 }
1425 }
1426 BlockNumberingValid.erase(BB);
1427}
1428
1429void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1430 AccessList::iterator InsertPt) {
1431 auto *Accesses = getWritableBlockAccesses(BB);
1432 bool WasEnd = InsertPt == Accesses->end();
1433 Accesses->insert(AccessList::iterator(InsertPt), What);
1434 if (!isa<MemoryUse>(What)) {
1435 auto *Defs = getOrCreateDefsList(BB);
1436 // If we got asked to insert at the end, we have an easy job, just shove it
1437 // at the end. If we got asked to insert before an existing def, we also get
1438 // an terator. If we got asked to insert before a use, we have to hunt for
1439 // the next def.
1440 if (WasEnd) {
1441 Defs->push_back(*What);
1442 } else if (isa<MemoryDef>(InsertPt)) {
1443 Defs->insert(InsertPt->getDefsIterator(), *What);
1444 } else {
1445 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1446 ++InsertPt;
1447 // Either we found a def, or we are inserting at the end
1448 if (InsertPt == Accesses->end())
1449 Defs->push_back(*What);
1450 else
1451 Defs->insert(InsertPt->getDefsIterator(), *What);
1452 }
1453 }
1454 BlockNumberingValid.erase(BB);
1455}
1456
1457// Move What before Where in the IR. The end result is taht What will belong to
1458// the right lists and have the right Block set, but will not otherwise be
1459// correct. It will not have the right defining access, and if it is a def,
1460// things below it will not properly be updated.
1461void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1462 AccessList::iterator Where) {
1463 // Keep it in the lookup tables, remove from the lists
1464 removeFromLists(What, false);
1465 What->setBlock(BB);
1466 insertIntoListsBefore(What, BB, Where);
1467}
1468
1469void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1470 InsertionPlace Point) {
1471 removeFromLists(What, false);
1472 What->setBlock(BB);
1473 insertIntoListsForBlock(What, BB, Point);
1474}
1475
1476MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1477 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"
) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1477, __extension__ __PRETTY_FUNCTION__))
;
1478 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1479 // Phi's always are placed at the front of the block.
1480 insertIntoListsForBlock(Phi, BB, Beginning);
1481 ValueToMemoryAccess[BB] = Phi;
1482 return Phi;
1483}
1484
1485MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1486 MemoryAccess *Definition) {
1487 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI"
) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1487, __extension__ __PRETTY_FUNCTION__))
;
1488 MemoryUseOrDef *NewAccess = createNewAccess(I);
1489 assert((static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1491, __extension__ __PRETTY_FUNCTION__))
1490 NewAccess != nullptr &&(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1491, __extension__ __PRETTY_FUNCTION__))
1491 "Tried to create a memory access for a non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1491, __extension__ __PRETTY_FUNCTION__))
;
1492 NewAccess->setDefiningAccess(Definition);
1493 return NewAccess;
1494}
1495
1496// Return true if the instruction has ordering constraints.
1497// Note specifically that this only considers stores and loads
1498// because others are still considered ModRef by getModRefInfo.
1499static inline bool isOrdered(const Instruction *I) {
1500 if (auto *SI = dyn_cast<StoreInst>(I)) {
1501 if (!SI->isUnordered())
1502 return true;
1503 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1504 if (!LI->isUnordered())
1505 return true;
1506 }
1507 return false;
1508}
1509
1510/// \brief Helper function to create new memory accesses
1511MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1512 // The assume intrinsic has a control dependency which we model by claiming
1513 // that it writes arbitrarily. Ignore that fake memory dependency here.
1514 // FIXME: Replace this special casing with a more accurate modelling of
1515 // assume's control dependency.
1516 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1517 if (II->getIntrinsicID() == Intrinsic::assume)
1518 return nullptr;
1519
1520 // Find out what affect this instruction has on memory.
1521 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1522 // The isOrdered check is used to ensure that volatiles end up as defs
1523 // (atomics end up as ModRef right now anyway). Until we separate the
1524 // ordering chain from the memory chain, this enables people to see at least
1525 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1526 // will still give an answer that bypasses other volatile loads. TODO:
1527 // Separate memory aliasing and ordering into two different chains so that we
1528 // can precisely represent both "what memory will this read/write/is clobbered
1529 // by" and "what instructions can I move this past".
1530 bool Def = bool(ModRef & MRI_Mod) || isOrdered(I);
1531 bool Use = bool(ModRef & MRI_Ref);
1532
1533 // It's possible for an instruction to not modify memory at all. During
1534 // construction, we ignore them.
1535 if (!Def && !Use)
1536 return nullptr;
1537
1538 assert((Def || Use) &&(static_cast <bool> ((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? void (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1539, __extension__ __PRETTY_FUNCTION__))
1539 "Trying to create a memory access with a non-memory instruction")(static_cast <bool> ((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? void (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1539, __extension__ __PRETTY_FUNCTION__))
;
1540
1541 MemoryUseOrDef *MUD;
1542 if (Def)
1543 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1544 else
1545 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1546 ValueToMemoryAccess[I] = MUD;
1547 return MUD;
1548}
1549
1550/// \brief Returns true if \p Replacer dominates \p Replacee .
1551bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1552 const MemoryAccess *Replacee) const {
1553 if (isa<MemoryUseOrDef>(Replacee))
1554 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1555 const auto *MP = cast<MemoryPhi>(Replacee);
1556 // For a phi node, the use occurs in the predecessor block of the phi node.
1557 // Since we may occur multiple times in the phi node, we have to check each
1558 // operand to ensure Replacer dominates each operand where Replacee occurs.
1559 for (const Use &Arg : MP->operands()) {
1560 if (Arg.get() != Replacee &&
1561 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1562 return false;
1563 }
1564 return true;
1565}
1566
1567/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1568void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1569 assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1570, __extension__ __PRETTY_FUNCTION__))
1570 "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1570, __extension__ __PRETTY_FUNCTION__))
;
1571 BlockNumbering.erase(MA);
1572 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1573 MUD->setDefiningAccess(nullptr);
1574 // Invalidate our walker's cache if necessary
1575 if (!isa<MemoryUse>(MA))
1576 Walker->invalidateInfo(MA);
1577 // The call below to erase will destroy MA, so we can't change the order we
1578 // are doing things here
1579 Value *MemoryInst;
1580 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1581 MemoryInst = MUD->getMemoryInst();
1582 } else {
1583 MemoryInst = MA->getBlock();
1584 }
1585 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1586 if (VMA->second == MA)
1587 ValueToMemoryAccess.erase(VMA);
1588}
1589
1590/// \brief Properly remove \p MA from all of MemorySSA's lists.
1591///
1592/// Because of the way the intrusive list and use lists work, it is important to
1593/// do removal in the right order.
1594/// ShouldDelete defaults to true, and will cause the memory access to also be
1595/// deleted, not just removed.
1596void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1597 // The access list owns the reference, so we erase it from the non-owning list
1598 // first.
1599 if (!isa<MemoryUse>(MA)) {
1600 auto DefsIt = PerBlockDefs.find(MA->getBlock());
1601 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1602 Defs->remove(*MA);
1603 if (Defs->empty())
1604 PerBlockDefs.erase(DefsIt);
1605 }
1606
1607 // The erase call here will delete it. If we don't want it deleted, we call
1608 // remove instead.
1609 auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1610 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1611 if (ShouldDelete)
1612 Accesses->erase(MA);
1613 else
1614 Accesses->remove(MA);
1615
1616 if (Accesses->empty())
1617 PerBlockAccesses.erase(AccessIt);
1618}
1619
1620void MemorySSA::print(raw_ostream &OS) const {
1621 MemorySSAAnnotatedWriter Writer(this);
1622 F.print(OS, &Writer);
1623}
1624
1625#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1626LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); }
1627#endif
1628
1629void MemorySSA::verifyMemorySSA() const {
1630 verifyDefUses(F);
1631 verifyDomination(F);
1632 verifyOrdering(F);
2
Calling 'MemorySSA::verifyOrdering'
1633 Walker->verify(this);
1634}
1635
1636/// \brief Verify that the order and existence of MemoryAccesses matches the
1637/// order and existence of memory affecting instructions.
1638void MemorySSA::verifyOrdering(Function &F) const {
1639 // Walk all the blocks, comparing what the lookups think and what the access
1640 // lists think, as well as the order in the blocks vs the order in the access
1641 // lists.
1642 SmallVector<MemoryAccess *, 32> ActualAccesses;
1643 SmallVector<MemoryAccess *, 32> ActualDefs;
1644 for (BasicBlock &B : F) {
1645 const AccessList *AL = getBlockAccesses(&B);
3
Calling 'MemorySSA::getBlockAccesses'
20
Returning from 'MemorySSA::getBlockAccesses'
26
Calling 'MemorySSA::getBlockAccesses'
43
Returning from 'MemorySSA::getBlockAccesses'
44
'AL' initialized here
1646 const auto *DL = getBlockDefs(&B);
1647 MemoryAccess *Phi = getMemoryAccess(&B);
1648 if (Phi) {
21
Assuming 'Phi' is null
22
Taking false branch
45
Taking false branch
1649 ActualAccesses.push_back(Phi);
1650 ActualDefs.push_back(Phi);
1651 }
1652
1653 for (Instruction &I : B) {
1654 MemoryAccess *MA = getMemoryAccess(&I);
1655 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
1656 "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
1657 "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
1658 "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
;
1659 if (MA) {
1660 ActualAccesses.push_back(MA);
1661 if (isa<MemoryDef>(MA))
1662 ActualDefs.push_back(MA);
1663 }
1664 }
1665 // Either we hit the assert, really have no accesses, or we have both
1666 // accesses and an access list.
1667 // Same with defs.
1668 if (!AL && !DL)
23
Assuming 'AL' is non-null
46
Assuming 'AL' is null
47
Assuming pointer value is null
48
Assuming 'DL' is non-null
49
Taking false branch
1669 continue;
1670 assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
50
Within the expansion of the macro 'assert':
a
Called C++ object pointer is null
1671 "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
1672 "access list")(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
;
1673 assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1674, __extension__ __PRETTY_FUNCTION__))
1674 "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1674, __extension__ __PRETTY_FUNCTION__))
;
1675 assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1677, __extension__ __PRETTY_FUNCTION__))
1676 "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1677, __extension__ __PRETTY_FUNCTION__))
1677 "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1677, __extension__ __PRETTY_FUNCTION__))
;
1678 auto ALI = AL->begin();
1679 auto AAI = ActualAccesses.begin();
1680 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
24
Loop condition is false. Execution continues on line 1685
1681 assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order"
) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1681, __extension__ __PRETTY_FUNCTION__))
;
1682 ++ALI;
1683 ++AAI;
1684 }
1685 ActualAccesses.clear();
1686 if (DL) {
25
Taking false branch
1687 auto DLI = DL->begin();
1688 auto ADI = ActualDefs.begin();
1689 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1690 assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order"
) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1690, __extension__ __PRETTY_FUNCTION__))
;
1691 ++DLI;
1692 ++ADI;
1693 }
1694 }
1695 ActualDefs.clear();
1696 }
1697}
1698
1699/// \brief Verify the domination properties of MemorySSA by checking that each
1700/// definition dominates all of its uses.
1701void MemorySSA::verifyDomination(Function &F) const {
1702#ifndef NDEBUG
1703 for (BasicBlock &B : F) {
1704 // Phi nodes are attached to basic blocks
1705 if (MemoryPhi *MP = getMemoryAccess(&B))
1706 for (const Use &U : MP->uses())
1707 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(MP, U) && "Memory PHI does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1707, __extension__ __PRETTY_FUNCTION__))
;
1708
1709 for (Instruction &I : B) {
1710 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1711 if (!MD)
1712 continue;
1713
1714 for (const Use &U : MD->uses())
1715 assert(dominates(MD, U) && "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1715, __extension__ __PRETTY_FUNCTION__))
;
1716 }
1717 }
1718#endif
1719}
1720
1721/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1722/// appears in the use list of \p Def.
1723void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1724#ifndef NDEBUG
1725 // The live on entry use may cause us to get a NULL def here
1726 if (!Def)
1727 assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1728, __extension__ __PRETTY_FUNCTION__))
1728 "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1728, __extension__ __PRETTY_FUNCTION__))
;
1729 else
1730 assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1731, __extension__ __PRETTY_FUNCTION__))
1731 "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1731, __extension__ __PRETTY_FUNCTION__))
;
1732#endif
1733}
1734
1735/// \brief Verify the immediate use information, by walking all the memory
1736/// accesses and verifying that, for each use, it appears in the
1737/// appropriate def's use list
1738void MemorySSA::verifyDefUses(Function &F) const {
1739 for (BasicBlock &B : F) {
1740 // Phi nodes are attached to basic blocks
1741 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1742 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
1743 pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
1744 "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
;
1745 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1746 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1747 }
1748
1749 for (Instruction &I : B) {
1750 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1751 verifyUseInDefs(MA->getDefiningAccess(), MA);
1752 }
1753 }
1754 }
1755}
1756
1757MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
1758 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1759}
1760
1761MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
1762 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1763}
1764
1765/// Perform a local numbering on blocks so that instruction ordering can be
1766/// determined in constant time.
1767/// TODO: We currently just number in order. If we numbered by N, we could
1768/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1769/// log2(N) sequences of mixed before and after) without needing to invalidate
1770/// the numbering.
1771void MemorySSA::renumberBlock(const BasicBlock *B) const {
1772 // The pre-increment ensures the numbers really start at 1.
1773 unsigned long CurrentNumber = 0;
1774 const AccessList *AL = getBlockAccesses(B);
1775 assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block"
) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1775, __extension__ __PRETTY_FUNCTION__))
;
1776 for (const auto &I : *AL)
1777 BlockNumbering[&I] = ++CurrentNumber;
1778 BlockNumberingValid.insert(B);
1779}
1780
1781/// \brief Determine, for two memory accesses in the same block,
1782/// whether \p Dominator dominates \p Dominatee.
1783/// \returns True if \p Dominator dominates \p Dominatee.
1784bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1785 const MemoryAccess *Dominatee) const {
1786 const BasicBlock *DominatorBlock = Dominator->getBlock();
1787
1788 assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1789, __extension__ __PRETTY_FUNCTION__))
1789 "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1789, __extension__ __PRETTY_FUNCTION__))
;
1790 // A node dominates itself.
1791 if (Dominatee == Dominator)
1792 return true;
1793
1794 // When Dominatee is defined on function entry, it is not dominated by another
1795 // memory access.
1796 if (isLiveOnEntryDef(Dominatee))
1797 return false;
1798
1799 // When Dominator is defined on function entry, it dominates the other memory
1800 // access.
1801 if (isLiveOnEntryDef(Dominator))
1802 return true;
1803
1804 if (!BlockNumberingValid.count(DominatorBlock))
1805 renumberBlock(DominatorBlock);
1806
1807 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1808 // All numbers start with 1
1809 assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1809, __extension__ __PRETTY_FUNCTION__))
;
1810 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1811 assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1811, __extension__ __PRETTY_FUNCTION__))
;
1812 return DominatorNum < DominateeNum;
1813}
1814
1815bool MemorySSA::dominates(const MemoryAccess *Dominator,
1816 const MemoryAccess *Dominatee) const {
1817 if (Dominator == Dominatee)
1818 return true;
1819
1820 if (isLiveOnEntryDef(Dominatee))
1821 return false;
1822
1823 if (Dominator->getBlock() != Dominatee->getBlock())
1824 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1825 return locallyDominates(Dominator, Dominatee);
1826}
1827
1828bool MemorySSA::dominates(const MemoryAccess *Dominator,
1829 const Use &Dominatee) const {
1830 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1831 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1832 // The def must dominate the incoming block of the phi.
1833 if (UseBB != Dominator->getBlock())
1834 return DT->dominates(Dominator->getBlock(), UseBB);
1835 // If the UseBB and the DefBB are the same, compare locally.
1836 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1837 }
1838 // If it's not a PHI node use, the normal dominates can already handle it.
1839 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1840}
1841
1842const static char LiveOnEntryStr[] = "liveOnEntry";
1843
1844void MemoryAccess::print(raw_ostream &OS) const {
1845 switch (getValueID()) {
1846 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1847 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1848 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1849 }
1850 llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1850)
;
1851}
1852
1853void MemoryDef::print(raw_ostream &OS) const {
1854 MemoryAccess *UO = getDefiningAccess();
1855
1856 OS << getID() << " = MemoryDef(";
1857 if (UO && UO->getID())
1858 OS << UO->getID();
1859 else
1860 OS << LiveOnEntryStr;
1861 OS << ')';
1862}
1863
1864void MemoryPhi::print(raw_ostream &OS) const {
1865 bool First = true;
1866 OS << getID() << " = MemoryPhi(";
1867 for (const auto &Op : operands()) {
1868 BasicBlock *BB = getIncomingBlock(Op);
1869 MemoryAccess *MA = cast<MemoryAccess>(Op);
1870 if (!First)
1871 OS << ',';
1872 else
1873 First = false;
1874
1875 OS << '{';
1876 if (BB->hasName())
1877 OS << BB->getName();
1878 else
1879 BB->printAsOperand(OS, false);
1880 OS << ',';
1881 if (unsigned ID = MA->getID())
1882 OS << ID;
1883 else
1884 OS << LiveOnEntryStr;
1885 OS << '}';
1886 }
1887 OS << ')';
1888}
1889
1890void MemoryUse::print(raw_ostream &OS) const {
1891 MemoryAccess *UO = getDefiningAccess();
1892 OS << "MemoryUse(";
1893 if (UO && UO->getID())
1894 OS << UO->getID();
1895 else
1896 OS << LiveOnEntryStr;
1897 OS << ')';
1898}
1899
1900void MemoryAccess::dump() const {
1901// Cannot completely remove virtual function even in release mode.
1902#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1903 print(dbgs());
1904 dbgs() << "\n";
1905#endif
1906}
1907
1908char MemorySSAPrinterLegacyPass::ID = 0;
1909
1910MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1911 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1912}
1913
1914void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1915 AU.setPreservesAll();
1916 AU.addRequired<MemorySSAWrapperPass>();
1917}
1918
1919bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1920 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1921 MSSA.print(dbgs());
1922 if (VerifyMemorySSA)
1923 MSSA.verifyMemorySSA();
1924 return false;
1925}
1926
1927AnalysisKey MemorySSAAnalysis::Key;
1928
1929MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
1930 FunctionAnalysisManager &AM) {
1931 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1932 auto &AA = AM.getResult<AAManager>(F);
1933 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1934}
1935
1936PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
1937 FunctionAnalysisManager &AM) {
1938 OS << "MemorySSA for function: " << F.getName() << "\n";
1939 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1940
1941 return PreservedAnalyses::all();
1942}
1943
1944PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
1945 FunctionAnalysisManager &AM) {
1946 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1947
1948 return PreservedAnalyses::all();
1949}
1950
1951char MemorySSAWrapperPass::ID = 0;
1952
1953MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
1954 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
1955}
1956
1957void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1958
1959void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1960 AU.setPreservesAll();
1961 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1962 AU.addRequiredTransitive<AAResultsWrapperPass>();
1963}
1964
1965bool MemorySSAWrapperPass::runOnFunction(Function &F) {
1966 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1967 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1968 MSSA.reset(new MemorySSA(F, &AA, &DT));
1969 return false;
1970}
1971
1972void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1
Calling 'MemorySSA::verifyMemorySSA'
1973
1974void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
1975 MSSA->print(OS);
1976}
1977
1978MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
1979
1980MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
1981 DominatorTree *D)
1982 : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1983
1984void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
1985 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1986 MUD->resetOptimized();
1987}
1988
1989/// \brief Walk the use-def chains starting at \p MA and find
1990/// the MemoryAccess that actually clobbers Loc.
1991///
1992/// \returns our clobbering memory access
1993MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1994 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1995 MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
1996#ifdef EXPENSIVE_CHECKS
1997 MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
1998 assert(NewNoCache == New && "Cache made us hand back a different result?")(static_cast <bool> (NewNoCache == New && "Cache made us hand back a different result?"
) ? void (0) : __assert_fail ("NewNoCache == New && \"Cache made us hand back a different result?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/lib/Analysis/MemorySSA.cpp"
, 1998, __extension__ __PRETTY_FUNCTION__))
;
1999 (void)NewNoCache;
2000#endif
2001 if (AutoResetWalker)
2002 resetClobberWalker();
2003 return New;
2004}
2005
2006MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2007 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2008 if (isa<MemoryPhi>(StartingAccess))
2009 return StartingAccess;
2010
2011 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2012 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2013 return StartingUseOrDef;
2014
2015 Instruction *I = StartingUseOrDef->getMemoryInst();
2016
2017 // Conservatively, fences are always clobbers, so don't perform the walk if we
2018 // hit a fence.
2019 if (!ImmutableCallSite(I) && I->isFenceLike())
2020 return StartingUseOrDef;
2021
2022 UpwardsMemoryQuery Q;
2023 Q.OriginalAccess = StartingUseOrDef;
2024 Q.StartingLoc = Loc;
2025 Q.Inst = I;
2026 Q.IsCall = false;
2027
2028 // Unlike the other function, do not walk to the def of a def, because we are
2029 // handed something we already believe is the clobbering access.
2030 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2031 ? StartingUseOrDef->getDefiningAccess()
2032 : StartingUseOrDef;
2033
2034 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2035 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2036 DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *StartingUseOrDef << "\n"
; } } while (false)
;
2037 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2038 DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Clobber << "\n"; } } while
(false)
;
2039 return Clobber;
2040}
2041
2042MemoryAccess *
2043MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2044 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2045 // If this is a MemoryPhi, we can't do anything.
2046 if (!StartingAccess)
2047 return MA;
2048
2049 // If this is an already optimized use or def, return the optimized result.
2050 // Note: Currently, we do not store the optimized def result because we'd need
2051 // a separate field, since we can't use it as the defining access.
2052 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2053 if (MUD->isOptimized())
2054 return MUD->getOptimized();
2055
2056 const Instruction *I = StartingAccess->getMemoryInst();
2057 UpwardsMemoryQuery Q(I, StartingAccess);
2058 // We can't sanely do anything with a fences, they conservatively
2059 // clobber all memory, and have no locations to get pointers from to
2060 // try to disambiguate.
2061 if (!Q.IsCall && I->isFenceLike())
2062 return StartingAccess;
2063
2064 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2065 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2066 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2067 MUD->setOptimized(LiveOnEntry);
2068 return LiveOnEntry;
2069 }
2070
2071 // Start with the thing we already think clobbers this location
2072 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2073
2074 // At this point, DefiningAccess may be the live on entry def.
2075 // If it is, we will not get a better result.
2076 if (MSSA->isLiveOnEntryDef(DefiningAccess))
2077 return DefiningAccess;
2078
2079 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2080 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2081 DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *DefiningAccess << "\n"
; } } while (false)
;
2082 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2083 DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Result << "\n"; } } while
(false)
;
2084 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2085 MUD->setOptimized(Result);
2086
2087 return Result;
2088}
2089
2090MemoryAccess *
2091DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2092 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2093 return Use->getDefiningAccess();
2094 return MA;
2095}
2096
2097MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2098 MemoryAccess *StartingAccess, const MemoryLocation &) {
2099 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2100 return Use->getDefiningAccess();
2101 return StartingAccess;
2102}
2103
2104void MemoryPhi::deleteMe(DerivedUser *Self) {
2105 delete static_cast<MemoryPhi *>(Self);
2106}
2107
2108void MemoryDef::deleteMe(DerivedUser *Self) {
2109 delete static_cast<MemoryDef *>(Self);
2110}
2111
2112void MemoryUse::deleteMe(DerivedUser *Self) {
2113 delete static_cast<MemoryUse *>(Self);
2114}

/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h

1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This file exposes an interface to building/using memory SSA to
12/// walk memory instructions using a use/def graph.
13///
14/// Memory SSA class builds an SSA form that links together memory access
15/// instructions such as loads, stores, atomics, and calls. Additionally, it
16/// does a trivial form of "heap versioning" Every time the memory state changes
17/// in the program, we generate a new heap version. It generates
18/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19///
20/// As a trivial example,
21/// define i32 @main() #0 {
22/// entry:
23/// %call = call noalias i8* @_Znwm(i64 4) #2
24/// %0 = bitcast i8* %call to i32*
25/// %call1 = call noalias i8* @_Znwm(i64 4) #2
26/// %1 = bitcast i8* %call1 to i32*
27/// store i32 5, i32* %0, align 4
28/// store i32 7, i32* %1, align 4
29/// %2 = load i32* %0, align 4
30/// %3 = load i32* %1, align 4
31/// %add = add nsw i32 %2, %3
32/// ret i32 %add
33/// }
34///
35/// Will become
36/// define i32 @main() #0 {
37/// entry:
38/// ; 1 = MemoryDef(0)
39/// %call = call noalias i8* @_Znwm(i64 4) #3
40/// %2 = bitcast i8* %call to i32*
41/// ; 2 = MemoryDef(1)
42/// %call1 = call noalias i8* @_Znwm(i64 4) #3
43/// %4 = bitcast i8* %call1 to i32*
44/// ; 3 = MemoryDef(2)
45/// store i32 5, i32* %2, align 4
46/// ; 4 = MemoryDef(3)
47/// store i32 7, i32* %4, align 4
48/// ; MemoryUse(3)
49/// %7 = load i32* %2, align 4
50/// ; MemoryUse(4)
51/// %8 = load i32* %4, align 4
52/// %add = add nsw i32 %7, %8
53/// ret i32 %add
54/// }
55///
56/// Given this form, all the stores that could ever effect the load at %8 can be
57/// gotten by using the MemoryUse associated with it, and walking from use to
58/// def until you hit the top of the function.
59///
60/// Each def also has a list of users associated with it, so you can walk from
61/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62/// but not the RHS of MemoryDefs. You can see this above at %7, which would
63/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64/// store, all the MemoryUses on its use lists are may-aliases of that store
65/// (but the MemoryDefs on its use list may not be).
66///
67/// MemoryDefs are not disambiguated because it would require multiple reaching
68/// definitions, which would require multiple phis, and multiple memoryaccesses
69/// per instruction.
70//
71//===----------------------------------------------------------------------===//
72
73#ifndef LLVM_ANALYSIS_MEMORYSSA_H
74#define LLVM_ANALYSIS_MEMORYSSA_H
75
76#include "llvm/ADT/DenseMap.h"
77#include "llvm/ADT/GraphTraits.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/ilist.h"
81#include "llvm/ADT/ilist_node.h"
82#include "llvm/ADT/iterator.h"
83#include "llvm/ADT/iterator_range.h"
84#include "llvm/ADT/simple_ilist.h"
85#include "llvm/Analysis/AliasAnalysis.h"
86#include "llvm/Analysis/MemoryLocation.h"
87#include "llvm/Analysis/PHITransAddr.h"
88#include "llvm/IR/BasicBlock.h"
89#include "llvm/IR/DerivedUser.h"
90#include "llvm/IR/Dominators.h"
91#include "llvm/IR/Module.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/Pass.h"
97#include "llvm/Support/Casting.h"
98#include <algorithm>
99#include <cassert>
100#include <cstddef>
101#include <iterator>
102#include <memory>
103#include <utility>
104
105namespace llvm {
106
107class Function;
108class Instruction;
109class MemoryAccess;
110class MemorySSAWalker;
111class LLVMContext;
112class raw_ostream;
113
114namespace MSSAHelpers {
115
116struct AllAccessTag {};
117struct DefsOnlyTag {};
118
119} // end namespace MSSAHelpers
120
121enum {
122 // Used to signify what the default invalid ID is for MemoryAccess's
123 // getID()
124 INVALID_MEMORYACCESS_ID = 0
125};
126
127template <class T> class memoryaccess_def_iterator_base;
128using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
129using const_memoryaccess_def_iterator =
130 memoryaccess_def_iterator_base<const MemoryAccess>;
131
132// \brief The base for all memory accesses. All memory accesses in a block are
133// linked together using an intrusive list.
134class MemoryAccess
135 : public DerivedUser,
136 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
137 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
138public:
139 using AllAccessType =
140 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
141 using DefsOnlyType =
142 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
143
144 MemoryAccess(const MemoryAccess &) = delete;
145 MemoryAccess &operator=(const MemoryAccess &) = delete;
146
147 void *operator new(size_t) = delete;
148
149 // Methods for support type inquiry through isa, cast, and
150 // dyn_cast
151 static bool classof(const Value *V) {
152 unsigned ID = V->getValueID();
153 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
154 }
155
156 BasicBlock *getBlock() const { return Block; }
157
158 void print(raw_ostream &OS) const;
159 void dump() const;
160
161 /// \brief The user iterators for a memory access
162 using iterator = user_iterator;
163 using const_iterator = const_user_iterator;
164
165 /// \brief This iterator walks over all of the defs in a given
166 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
167 /// MemoryUse/MemoryDef, this walks the defining access.
168 memoryaccess_def_iterator defs_begin();
169 const_memoryaccess_def_iterator defs_begin() const;
170 memoryaccess_def_iterator defs_end();
171 const_memoryaccess_def_iterator defs_end() const;
172
173 /// \brief Get the iterators for the all access list and the defs only list
174 /// We default to the all access list.
175 AllAccessType::self_iterator getIterator() {
176 return this->AllAccessType::getIterator();
177 }
178 AllAccessType::const_self_iterator getIterator() const {
179 return this->AllAccessType::getIterator();
180 }
181 AllAccessType::reverse_self_iterator getReverseIterator() {
182 return this->AllAccessType::getReverseIterator();
183 }
184 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
185 return this->AllAccessType::getReverseIterator();
186 }
187 DefsOnlyType::self_iterator getDefsIterator() {
188 return this->DefsOnlyType::getIterator();
189 }
190 DefsOnlyType::const_self_iterator getDefsIterator() const {
191 return this->DefsOnlyType::getIterator();
192 }
193 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
194 return this->DefsOnlyType::getReverseIterator();
195 }
196 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
197 return this->DefsOnlyType::getReverseIterator();
198 }
199
200protected:
201 friend class MemoryDef;
202 friend class MemoryPhi;
203 friend class MemorySSA;
204 friend class MemoryUse;
205 friend class MemoryUseOrDef;
206
207 /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
208 /// moved.
209 void setBlock(BasicBlock *BB) { Block = BB; }
210
211 /// \brief Used for debugging and tracking things about MemoryAccesses.
212 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
213 inline unsigned getID() const;
214
215 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
216 BasicBlock *BB, unsigned NumOperands)
217 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
218 Block(BB) {}
219
220private:
221 BasicBlock *Block;
222};
223
224inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
225 MA.print(OS);
226 return OS;
227}
228
229/// \brief Class that has the common methods + fields of memory uses/defs. It's
230/// a little awkward to have, but there are many cases where we want either a
231/// use or def, and there are many cases where uses are needed (defs aren't
232/// acceptable), and vice-versa.
233///
234/// This class should never be instantiated directly; make a MemoryUse or
235/// MemoryDef instead.
236class MemoryUseOrDef : public MemoryAccess {
237public:
238 void *operator new(size_t) = delete;
239
240 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
241
242 /// \brief Get the instruction that this MemoryUse represents.
243 Instruction *getMemoryInst() const { return MemoryInst; }
244
245 /// \brief Get the access that produces the memory state used by this Use.
246 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
247
248 static bool classof(const Value *MA) {
249 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
250 }
251
252 // Sadly, these have to be public because they are needed in some of the
253 // iterators.
254 inline bool isOptimized() const;
255 inline MemoryAccess *getOptimized() const;
256 inline void setOptimized(MemoryAccess *);
257
258 /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
259 /// be rewalked by the walker if necessary.
260 /// This really should only be called by tests.
261 inline void resetOptimized();
262
263protected:
264 friend class MemorySSA;
265 friend class MemorySSAUpdater;
266
267 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
268 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
269 : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
270 setDefiningAccess(DMA);
271 }
272
273 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
274 if (!Optimized) {
275 setOperand(0, DMA);
276 return;
277 }
278 setOptimized(DMA);
279 }
280
281private:
282 Instruction *MemoryInst;
283};
284
285template <>
286struct OperandTraits<MemoryUseOrDef>
287 : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
288DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return
OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef
::const_op_iterator MemoryUseOrDef::op_begin() const { return
OperandTraits<MemoryUseOrDef>::op_begin(const_cast<
MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef
::op_end() { return OperandTraits<MemoryUseOrDef>::op_end
(this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end
() const { return OperandTraits<MemoryUseOrDef>::op_end
(const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 288, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin
(const_cast<MemoryUseOrDef*>(this))[i_nocapture].get())
; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<MemoryUseOrDef>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 288, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits
<MemoryUseOrDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryUseOrDef::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &MemoryUseOrDef::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
289
290/// \brief Represents read-only accesses to memory
291///
292/// In particular, the set of Instructions that will be represented by
293/// MemoryUse's is exactly the set of Instructions for which
294/// AliasAnalysis::getModRefInfo returns "Ref".
295class MemoryUse final : public MemoryUseOrDef {
296public:
297 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
298
299 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
300 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
301
302 // allocate space for exactly one operand
303 void *operator new(size_t s) { return User::operator new(s, 1); }
304
305 static bool classof(const Value *MA) {
306 return MA->getValueID() == MemoryUseVal;
307 }
308
309 void print(raw_ostream &OS) const;
310
311 void setOptimized(MemoryAccess *DMA) {
312 OptimizedID = DMA->getID();
313 setOperand(0, DMA);
314 }
315
316 bool isOptimized() const {
317 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
318 }
319
320 MemoryAccess *getOptimized() const {
321 return getDefiningAccess();
322 }
323
324 void resetOptimized() {
325 OptimizedID = INVALID_MEMORYACCESS_ID;
326 }
327
328protected:
329 friend class MemorySSA;
330
331private:
332 static void deleteMe(DerivedUser *Self);
333
334 unsigned int OptimizedID = 0;
335};
336
337template <>
338struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
339DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits
<MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator
MemoryUse::op_begin() const { return OperandTraits<MemoryUse
>::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse
::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse
>::op_end(this); } MemoryUse::const_op_iterator MemoryUse::
op_end() const { return OperandTraits<MemoryUse>::op_end
(const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUse>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 339, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUse>::op_begin
(const_cast<MemoryUse*>(this))[i_nocapture].get()); } void
MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryUse>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 339, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryUse::getNumOperands() const { return OperandTraits
<MemoryUse>::operands(this); } template <int Idx_nocapture
> Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryUse::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
340
341/// \brief Represents a read-write access to memory, whether it is a must-alias,
342/// or a may-alias.
343///
344/// In particular, the set of Instructions that will be represented by
345/// MemoryDef's is exactly the set of Instructions for which
346/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
347/// Note that, in order to provide def-def chains, all defs also have a use
348/// associated with them. This use points to the nearest reaching
349/// MemoryDef/MemoryPhi.
350class MemoryDef final : public MemoryUseOrDef {
351public:
352 friend class MemorySSA;
353
354 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
355
356 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
357 unsigned Ver)
358 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
359
360 // allocate space for exactly one operand
361 void *operator new(size_t s) { return User::operator new(s, 1); }
362
363 static bool classof(const Value *MA) {
364 return MA->getValueID() == MemoryDefVal;
365 }
366
367 void setOptimized(MemoryAccess *MA) {
368 Optimized = MA;
369 OptimizedID = getDefiningAccess()->getID();
370 }
371
372 MemoryAccess *getOptimized() const { return Optimized; }
373
374 bool isOptimized() const {
375 return getOptimized() && getDefiningAccess() &&
376 OptimizedID == getDefiningAccess()->getID();
377 }
378
379 void resetOptimized() {
380 OptimizedID = INVALID_MEMORYACCESS_ID;
381 }
382
383 void print(raw_ostream &OS) const;
384
385 unsigned getID() const { return ID; }
386
387private:
388 static void deleteMe(DerivedUser *Self);
389
390 const unsigned ID;
391 MemoryAccess *Optimized = nullptr;
392 unsigned int OptimizedID = INVALID_MEMORYACCESS_ID;
393};
394
395template <>
396struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
397DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits
<MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator
MemoryDef::op_begin() const { return OperandTraits<MemoryDef
>::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef
::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef
>::op_end(this); } MemoryDef::const_op_iterator MemoryDef::
op_end() const { return OperandTraits<MemoryDef>::op_end
(const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 397, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryDef>::op_begin
(const_cast<MemoryDef*>(this))[i_nocapture].get()); } void
MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryDef>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 397, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryDef::getNumOperands() const { return OperandTraits
<MemoryDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryDef::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
398
399/// \brief Represents phi nodes for memory accesses.
400///
401/// These have the same semantic as regular phi nodes, with the exception that
402/// only one phi will ever exist in a given basic block.
403/// Guaranteeing one phi per block means guaranteeing there is only ever one
404/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
405/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
406/// a MemoryPhi's operands.
407/// That is, given
408/// if (a) {
409/// store %a
410/// store %b
411/// }
412/// it *must* be transformed into
413/// if (a) {
414/// 1 = MemoryDef(liveOnEntry)
415/// store %a
416/// 2 = MemoryDef(1)
417/// store %b
418/// }
419/// and *not*
420/// if (a) {
421/// 1 = MemoryDef(liveOnEntry)
422/// store %a
423/// 2 = MemoryDef(liveOnEntry)
424/// store %b
425/// }
426/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
427/// end of the branch, and if there are not two phi nodes, one will be
428/// disconnected completely from the SSA graph below that point.
429/// Because MemoryUse's do not generate new definitions, they do not have this
430/// issue.
431class MemoryPhi final : public MemoryAccess {
432 // allocate space for exactly zero operands
433 void *operator new(size_t s) { return User::operator new(s); }
434
435public:
436 /// Provide fast operand accessors
437 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
438
439 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
440 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
441 ReservedSpace(NumPreds) {
442 allocHungoffUses(ReservedSpace);
443 }
444
445 // Block iterator interface. This provides access to the list of incoming
446 // basic blocks, which parallels the list of incoming values.
447 using block_iterator = BasicBlock **;
448 using const_block_iterator = BasicBlock *const *;
449
450 block_iterator block_begin() {
451 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
452 return reinterpret_cast<block_iterator>(Ref + 1);
453 }
454
455 const_block_iterator block_begin() const {
456 const auto *Ref =
457 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
458 return reinterpret_cast<const_block_iterator>(Ref + 1);
459 }
460
461 block_iterator block_end() { return block_begin() + getNumOperands(); }
462
463 const_block_iterator block_end() const {
464 return block_begin() + getNumOperands();
465 }
466
467 iterator_range<block_iterator> blocks() {
468 return make_range(block_begin(), block_end());
469 }
470
471 iterator_range<const_block_iterator> blocks() const {
472 return make_range(block_begin(), block_end());
473 }
474
475 op_range incoming_values() { return operands(); }
476
477 const_op_range incoming_values() const { return operands(); }
478
479 /// \brief Return the number of incoming edges
480 unsigned getNumIncomingValues() const { return getNumOperands(); }
481
482 /// \brief Return incoming value number x
483 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
484 void setIncomingValue(unsigned I, MemoryAccess *V) {
485 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 485, __extension__ __PRETTY_FUNCTION__))
;
486 setOperand(I, V);
487 }
488
489 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
490 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
491
492 /// \brief Return incoming basic block number @p i.
493 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
494
495 /// \brief Return incoming basic block corresponding
496 /// to an operand of the PHI.
497 BasicBlock *getIncomingBlock(const Use &U) const {
498 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 498, __extension__ __PRETTY_FUNCTION__))
;
499 return getIncomingBlock(unsigned(&U - op_begin()));
500 }
501
502 /// \brief Return incoming basic block corresponding
503 /// to value use iterator.
504 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
505 return getIncomingBlock(I.getUse());
506 }
507
508 void setIncomingBlock(unsigned I, BasicBlock *BB) {
509 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 block_begin()[I] = BB;
511 }
512
513 /// \brief Add an incoming value to the end of the PHI list
514 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
515 if (getNumOperands() == ReservedSpace)
516 growOperands(); // Get more space!
517 // Initialize some new operands.
518 setNumHungOffUseOperands(getNumOperands() + 1);
519 setIncomingValue(getNumOperands() - 1, V);
520 setIncomingBlock(getNumOperands() - 1, BB);
521 }
522
523 /// \brief Return the first index of the specified basic
524 /// block in the value list for this PHI. Returns -1 if no instance.
525 int getBasicBlockIndex(const BasicBlock *BB) const {
526 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
527 if (block_begin()[I] == BB)
528 return I;
529 return -1;
530 }
531
532 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
533 int Idx = getBasicBlockIndex(BB);
534 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 534, __extension__ __PRETTY_FUNCTION__))
;
535 return getIncomingValue(Idx);
536 }
537
538 static bool classof(const Value *V) {
539 return V->getValueID() == MemoryPhiVal;
540 }
541
542 void print(raw_ostream &OS) const;
543
544 unsigned getID() const { return ID; }
545
546protected:
547 friend class MemorySSA;
548
549 /// \brief this is more complicated than the generic
550 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
551 /// values and pointers to the incoming blocks, all in one allocation.
552 void allocHungoffUses(unsigned N) {
553 User::allocHungoffUses(N, /* IsPhi */ true);
554 }
555
556private:
557 // For debugging only
558 const unsigned ID;
559 unsigned ReservedSpace;
560
561 /// \brief This grows the operand list in response to a push_back style of
562 /// operation. This grows the number of ops by 1.5 times.
563 void growOperands() {
564 unsigned E = getNumOperands();
565 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
566 ReservedSpace = std::max(E + E / 2, 2u);
567 growHungoffUses(ReservedSpace, /* IsPhi */ true);
568 }
569
570 static void deleteMe(DerivedUser *Self);
571};
572
573inline unsigned MemoryAccess::getID() const {
574 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 575, __extension__ __PRETTY_FUNCTION__))
575 "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 575, __extension__ __PRETTY_FUNCTION__))
;
576 if (const auto *MD = dyn_cast<MemoryDef>(this))
577 return MD->getID();
578 return cast<MemoryPhi>(this)->getID();
579}
580
581inline bool MemoryUseOrDef::isOptimized() const {
582 if (const auto *MD = dyn_cast<MemoryDef>(this))
583 return MD->isOptimized();
584 return cast<MemoryUse>(this)->isOptimized();
585}
586
587inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
588 if (const auto *MD = dyn_cast<MemoryDef>(this))
589 return MD->getOptimized();
590 return cast<MemoryUse>(this)->getOptimized();
591}
592
593inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
594 if (auto *MD = dyn_cast<MemoryDef>(this))
595 MD->setOptimized(MA);
596 else
597 cast<MemoryUse>(this)->setOptimized(MA);
598}
599
600inline void MemoryUseOrDef::resetOptimized() {
601 if (auto *MD = dyn_cast<MemoryDef>(this))
602 MD->resetOptimized();
603 else
604 cast<MemoryUse>(this)->resetOptimized();
605}
606
607template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
608DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits
<MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator
MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi
>::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi
::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi
>::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi::
op_end() const { return OperandTraits<MemoryPhi>::op_end
(const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryPhi>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 608, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryPhi>::op_begin
(const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void
MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryPhi>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 608, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryPhi::getNumOperands() const { return OperandTraits
<MemoryPhi>::operands(this); } template <int Idx_nocapture
> Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
609
610/// \brief Encapsulates MemorySSA, including all data associated with memory
611/// accesses.
612class MemorySSA {
613public:
614 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
615 ~MemorySSA();
616
617 MemorySSAWalker *getWalker();
618
619 /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
620 /// access associated with it. If passed a basic block gets the memory phi
621 /// node that exists for that block, if there is one. Otherwise, this will get
622 /// a MemoryUseOrDef.
623 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
624 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
625
626 void dump() const;
627 void print(raw_ostream &) const;
628
629 /// \brief Return true if \p MA represents the live on entry value
630 ///
631 /// Loads and stores from pointer arguments and other global values may be
632 /// defined by memory operations that do not occur in the current function, so
633 /// they may be live on entry to the function. MemorySSA represents such
634 /// memory state by the live on entry definition, which is guaranteed to occur
635 /// before any other memory access in the function.
636 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
637 return MA == LiveOnEntryDef.get();
638 }
639
640 inline MemoryAccess *getLiveOnEntryDef() const {
641 return LiveOnEntryDef.get();
642 }
643
644 // Sadly, iplists, by default, owns and deletes pointers added to the
645 // list. It's not currently possible to have two iplists for the same type,
646 // where one owns the pointers, and one does not. This is because the traits
647 // are per-type, not per-tag. If this ever changes, we should make the
648 // DefList an iplist.
649 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
650 using DefsList =
651 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
652
653 /// \brief Return the list of MemoryAccess's for a given basic block.
654 ///
655 /// This list is not modifiable by the user.
656 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
657 return getWritableBlockAccesses(BB);
4
Calling 'MemorySSA::getWritableBlockAccesses'
19
Returning from 'MemorySSA::getWritableBlockAccesses'
27
Calling 'MemorySSA::getWritableBlockAccesses'
42
Returning from 'MemorySSA::getWritableBlockAccesses'
658 }
659
660 /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
661 /// block.
662 ///
663 /// This list is not modifiable by the user.
664 const DefsList *getBlockDefs(const BasicBlock *BB) const {
665 return getWritableBlockDefs(BB);
666 }
667
668 /// \brief Given two memory accesses in the same basic block, determine
669 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
670 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
671
672 /// \brief Given two memory accesses in potentially different blocks,
673 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
674 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
675
676 /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
677 /// dominates Use \p B.
678 bool dominates(const MemoryAccess *A, const Use &B) const;
679
680 /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
681 /// all uses, uses appear in the right places). This is used by unit tests.
682 void verifyMemorySSA() const;
683
684 /// Used in various insertion functions to specify whether we are talking
685 /// about the beginning or end of a block.
686 enum InsertionPlace { Beginning, End };
687
688protected:
689 // Used by Memory SSA annotater, dumpers, and wrapper pass
690 friend class MemorySSAAnnotatedWriter;
691 friend class MemorySSAPrinterLegacyPass;
692 friend class MemorySSAUpdater;
693
694 void verifyDefUses(Function &F) const;
695 void verifyDomination(Function &F) const;
696 void verifyOrdering(Function &F) const;
697
698 // This is used by the use optimizer and updater.
699 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
700 auto It = PerBlockAccesses.find(BB);
701 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
5
Assuming the condition is false
6
'?' condition is false
7
Calling 'unique_ptr::get'
18
Returning from 'unique_ptr::get'
28
Assuming the condition is false
29
'?' condition is false
30
Calling 'unique_ptr::get'
41
Returning from 'unique_ptr::get'
702 }
703
704 // This is used by the use optimizer and updater.
705 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
706 auto It = PerBlockDefs.find(BB);
707 return It == PerBlockDefs.end() ? nullptr : It->second.get();
708 }
709
710 // These is used by the updater to perform various internal MemorySSA
711 // machinsations. They do not always leave the IR in a correct state, and
712 // relies on the updater to fixup what it breaks, so it is not public.
713
714 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
715 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
716
717 // Rename the dominator tree branch rooted at BB.
718 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
719 SmallPtrSetImpl<BasicBlock *> &Visited) {
720 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
721 }
722
723 void removeFromLookups(MemoryAccess *);
724 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
725 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
726 InsertionPlace);
727 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
728 AccessList::iterator);
729 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
730
731private:
732 class CachingWalker;
733 class OptimizeUses;
734
735 CachingWalker *getWalkerImpl();
736 void buildMemorySSA();
737 void optimizeUses();
738
739 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
740
741 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
742 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
743
744 void
745 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
746 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
747 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
748 MemoryPhi *createMemoryPhi(BasicBlock *BB);
749 MemoryUseOrDef *createNewAccess(Instruction *);
750 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
751 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
752 const DenseMap<const BasicBlock *, unsigned int> &);
753 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
754 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
755 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
756 SmallPtrSetImpl<BasicBlock *> &Visited,
757 bool SkipVisited = false, bool RenameAllUses = false);
758 AccessList *getOrCreateAccessList(const BasicBlock *);
759 DefsList *getOrCreateDefsList(const BasicBlock *);
760 void renumberBlock(const BasicBlock *) const;
761 AliasAnalysis *AA;
762 DominatorTree *DT;
763 Function &F;
764
765 // Memory SSA mappings
766 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
767
768 // These two mappings contain the main block to access/def mappings for
769 // MemorySSA. The list contained in PerBlockAccesses really owns all the
770 // MemoryAccesses.
771 // Both maps maintain the invariant that if a block is found in them, the
772 // corresponding list is not empty, and if a block is not found in them, the
773 // corresponding list is empty.
774 AccessMap PerBlockAccesses;
775 DefsMap PerBlockDefs;
776 std::unique_ptr<MemoryAccess> LiveOnEntryDef;
777
778 // Domination mappings
779 // Note that the numbering is local to a block, even though the map is
780 // global.
781 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
782 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
783
784 // Memory SSA building info
785 std::unique_ptr<CachingWalker> Walker;
786 unsigned NextID;
787};
788
789// Internal MemorySSA utils, for use by MemorySSA classes and walkers
790class MemorySSAUtil {
791protected:
792 friend class GVNHoist;
793 friend class MemorySSAWalker;
794
795 // This function should not be used by new passes.
796 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
797 AliasAnalysis &AA);
798};
799
800// This pass does eager building and then printing of MemorySSA. It is used by
801// the tests to be able to build, dump, and verify Memory SSA.
802class MemorySSAPrinterLegacyPass : public FunctionPass {
803public:
804 MemorySSAPrinterLegacyPass();
805
806 bool runOnFunction(Function &) override;
807 void getAnalysisUsage(AnalysisUsage &AU) const override;
808
809 static char ID;
810};
811
812/// An analysis that produces \c MemorySSA for a function.
813///
814class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
815 friend AnalysisInfoMixin<MemorySSAAnalysis>;
816
817 static AnalysisKey Key;
818
819public:
820 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
821 // pointers after construction. Use a wrapper class instead of plain
822 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
823 struct Result {
824 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
825
826 MemorySSA &getMSSA() { return *MSSA.get(); }
827
828 std::unique_ptr<MemorySSA> MSSA;
829 };
830
831 Result run(Function &F, FunctionAnalysisManager &AM);
832};
833
834/// \brief Printer pass for \c MemorySSA.
835class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
836 raw_ostream &OS;
837
838public:
839 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
840
841 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
842};
843
844/// \brief Verifier pass for \c MemorySSA.
845struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
846 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
847};
848
849/// \brief Legacy analysis pass which computes \c MemorySSA.
850class MemorySSAWrapperPass : public FunctionPass {
851public:
852 MemorySSAWrapperPass();
853
854 static char ID;
855
856 bool runOnFunction(Function &) override;
857 void releaseMemory() override;
858 MemorySSA &getMSSA() { return *MSSA; }
859 const MemorySSA &getMSSA() const { return *MSSA; }
860
861 void getAnalysisUsage(AnalysisUsage &AU) const override;
862
863 void verifyAnalysis() const override;
864 void print(raw_ostream &OS, const Module *M = nullptr) const override;
865
866private:
867 std::unique_ptr<MemorySSA> MSSA;
868};
869
870/// \brief This is the generic walker interface for walkers of MemorySSA.
871/// Walkers are used to be able to further disambiguate the def-use chains
872/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
873/// you.
874/// In particular, while the def-use chains provide basic information, and are
875/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
876/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
877/// information. In particular, they may want to use SCEV info to further
878/// disambiguate memory accesses, or they may want the nearest dominating
879/// may-aliasing MemoryDef for a call or a store. This API enables a
880/// standardized interface to getting and using that info.
881class MemorySSAWalker {
882public:
883 MemorySSAWalker(MemorySSA *);
884 virtual ~MemorySSAWalker() = default;
885
886 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
887
888 /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
889 /// will give you the nearest dominating MemoryAccess that Mod's the location
890 /// the instruction accesses (by skipping any def which AA can prove does not
891 /// alias the location(s) accessed by the instruction given).
892 ///
893 /// Note that this will return a single access, and it must dominate the
894 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
895 /// this will return the MemoryPhi, not the operand. This means that
896 /// given:
897 /// if (a) {
898 /// 1 = MemoryDef(liveOnEntry)
899 /// store %a
900 /// } else {
901 /// 2 = MemoryDef(liveOnEntry)
902 /// store %b
903 /// }
904 /// 3 = MemoryPhi(2, 1)
905 /// MemoryUse(3)
906 /// load %a
907 ///
908 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
909 /// in the if (a) branch.
910 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
911 MemoryAccess *MA = MSSA->getMemoryAccess(I);
912 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?"
) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 912, __extension__ __PRETTY_FUNCTION__))
;
913 return getClobberingMemoryAccess(MA);
914 }
915
916 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
917 /// but takes a MemoryAccess instead of an Instruction.
918 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
919
920 /// \brief Given a potentially clobbering memory access and a new location,
921 /// calling this will give you the nearest dominating clobbering MemoryAccess
922 /// (by skipping non-aliasing def links).
923 ///
924 /// This version of the function is mainly used to disambiguate phi translated
925 /// pointers, where the value of a pointer may have changed from the initial
926 /// memory access. Note that this expects to be handed either a MemoryUse,
927 /// or an already potentially clobbering access. Unlike the above API, if
928 /// given a MemoryDef that clobbers the pointer as the starting access, it
929 /// will return that MemoryDef, whereas the above would return the clobber
930 /// starting from the use side of the memory def.
931 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
932 const MemoryLocation &) = 0;
933
934 /// \brief Given a memory access, invalidate anything this walker knows about
935 /// that access.
936 /// This API is used by walkers that store information to perform basic cache
937 /// invalidation. This will be called by MemorySSA at appropriate times for
938 /// the walker it uses or returns.
939 virtual void invalidateInfo(MemoryAccess *) {}
940
941 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA)(static_cast <bool> (MSSA == this->MSSA) ? void (0) :
__assert_fail ("MSSA == this->MSSA", "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 941, __extension__ __PRETTY_FUNCTION__))
; }
942
943protected:
944 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
945 // constructor.
946 MemorySSA *MSSA;
947};
948
949/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
950/// simply returns the links as they were constructed by the builder.
951class DoNothingMemorySSAWalker final : public MemorySSAWalker {
952public:
953 // Keep the overrides below from hiding the Instruction overload of
954 // getClobberingMemoryAccess.
955 using MemorySSAWalker::getClobberingMemoryAccess;
956
957 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
958 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
959 const MemoryLocation &) override;
960};
961
962using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
963using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
964
965/// \brief Iterator base class used to implement const and non-const iterators
966/// over the defining accesses of a MemoryAccess.
967template <class T>
968class memoryaccess_def_iterator_base
969 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
970 std::forward_iterator_tag, T, ptrdiff_t, T *,
971 T *> {
972 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
973
974public:
975 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
976 memoryaccess_def_iterator_base() = default;
977
978 bool operator==(const memoryaccess_def_iterator_base &Other) const {
979 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
980 }
981
982 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
983 // block from the operand in constant time (In a PHINode, the uselist has
984 // both, so it's just subtraction). We provide it as part of the
985 // iterator to avoid callers having to linear walk to get the block.
986 // If the operation becomes constant time on MemoryPHI's, this bit of
987 // abstraction breaking should be removed.
988 BasicBlock *getPhiArgBlock() const {
989 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
990 assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI"
) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 990, __extension__ __PRETTY_FUNCTION__))
;
991 return MP->getIncomingBlock(ArgNo);
992 }
993
994 typename BaseT::iterator::pointer operator*() const {
995 assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 995, __extension__ __PRETTY_FUNCTION__))
;
996 // Go to the first argument for phis, and the defining access for everything
997 // else.
998 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
999 return MP->getIncomingValue(ArgNo);
1000 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1001 }
1002
1003 using BaseT::operator++;
1004 memoryaccess_def_iterator &operator++() {
1005 assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator"
) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1005, __extension__ __PRETTY_FUNCTION__))
;
1006 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1007 if (++ArgNo >= MP->getNumIncomingValues()) {
1008 ArgNo = 0;
1009 Access = nullptr;
1010 }
1011 } else {
1012 Access = nullptr;
1013 }
1014 return *this;
1015 }
1016
1017private:
1018 T *Access = nullptr;
1019 unsigned ArgNo = 0;
1020};
1021
1022inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1023 return memoryaccess_def_iterator(this);
1024}
1025
1026inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1027 return const_memoryaccess_def_iterator(this);
1028}
1029
1030inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1031 return memoryaccess_def_iterator();
1032}
1033
1034inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1035 return const_memoryaccess_def_iterator();
1036}
1037
1038/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
1039/// and uses in the inverse case.
1040template <> struct GraphTraits<MemoryAccess *> {
1041 using NodeRef = MemoryAccess *;
1042 using ChildIteratorType = memoryaccess_def_iterator;
1043
1044 static NodeRef getEntryNode(NodeRef N) { return N; }
1045 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1046 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1047};
1048
1049template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1050 using NodeRef = MemoryAccess *;
1051 using ChildIteratorType = MemoryAccess::iterator;
1052
1053 static NodeRef getEntryNode(NodeRef N) { return N; }
1054 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1055 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1056};
1057
1058/// \brief Provide an iterator that walks defs, giving both the memory access,
1059/// and the current pointer location, updating the pointer location as it
1060/// changes due to phi node translation.
1061///
1062/// This iterator, while somewhat specialized, is what most clients actually
1063/// want when walking upwards through MemorySSA def chains. It takes a pair of
1064/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1065/// memory location through phi nodes for the user.
1066class upward_defs_iterator
1067 : public iterator_facade_base<upward_defs_iterator,
1068 std::forward_iterator_tag,
1069 const MemoryAccessPair> {
1070 using BaseT = upward_defs_iterator::iterator_facade_base;
1071
1072public:
1073 upward_defs_iterator(const MemoryAccessPair &Info)
1074 : DefIterator(Info.first), Location(Info.second),
1075 OriginalAccess(Info.first) {
1076 CurrentPair.first = nullptr;
1077
1078 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1079 fillInCurrentPair();
1080 }
1081
1082 upward_defs_iterator() { CurrentPair.first = nullptr; }
1083
1084 bool operator==(const upward_defs_iterator &Other) const {
1085 return DefIterator == Other.DefIterator;
1086 }
1087
1088 BaseT::iterator::reference operator*() const {
1089 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1090, __extension__ __PRETTY_FUNCTION__))
1090 "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1090, __extension__ __PRETTY_FUNCTION__))
;
1091 return CurrentPair;
1092 }
1093
1094 using BaseT::operator++;
1095 upward_defs_iterator &operator++() {
1096 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1097, __extension__ __PRETTY_FUNCTION__))
1097 "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1097, __extension__ __PRETTY_FUNCTION__))
;
1098 ++DefIterator;
1099 if (DefIterator != OriginalAccess->defs_end())
1100 fillInCurrentPair();
1101 return *this;
1102 }
1103
1104 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1105
1106private:
1107 void fillInCurrentPair() {
1108 CurrentPair.first = *DefIterator;
1109 if (WalkingPhi && Location.Ptr) {
1110 PHITransAddr Translator(
1111 const_cast<Value *>(Location.Ptr),
1112 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1113 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1114 DefIterator.getPhiArgBlock(), nullptr,
1115 false))
1116 if (Translator.getAddr() != Location.Ptr) {
1117 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1118 return;
1119 }
1120 }
1121 CurrentPair.second = Location;
1122 }
1123
1124 MemoryAccessPair CurrentPair;
1125 memoryaccess_def_iterator DefIterator;
1126 MemoryLocation Location;
1127 MemoryAccess *OriginalAccess = nullptr;
1128 bool WalkingPhi = false;
1129};
1130
1131inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1132 return upward_defs_iterator(Pair);
1133}
1134
1135inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1136
1137inline iterator_range<upward_defs_iterator>
1138upward_defs(const MemoryAccessPair &Pair) {
1139 return make_range(upward_defs_begin(Pair), upward_defs_end());
1140}
1141
1142/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1143/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1144/// comparing against a null def_chain_iterator, this will compare equal only
1145/// after walking said Phi/liveOnEntry.
1146///
1147/// The UseOptimizedChain flag specifies whether to walk the clobbering
1148/// access chain, or all the accesses.
1149///
1150/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1151/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1152/// a phi node. The optimized chain walks the clobbering access of a store.
1153/// So if you are just trying to find, given a store, what the next
1154/// thing that would clobber the same memory is, you want the optimized chain.
1155template <class T, bool UseOptimizedChain = false>
1156struct def_chain_iterator
1157 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1158 std::forward_iterator_tag, MemoryAccess *> {
1159 def_chain_iterator() : MA(nullptr) {}
1160 def_chain_iterator(T MA) : MA(MA) {}
1161
1162 T operator*() const { return MA; }
1163
1164 def_chain_iterator &operator++() {
1165 // N.B. liveOnEntry has a null defining access.
1166 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1167 if (UseOptimizedChain && MUD->isOptimized())
1168 MA = MUD->getOptimized();
1169 else
1170 MA = MUD->getDefiningAccess();
1171 } else {
1172 MA = nullptr;
1173 }
1174
1175 return *this;
1176 }
1177
1178 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1179
1180private:
1181 T MA;
1182};
1183
1184template <class T>
1185inline iterator_range<def_chain_iterator<T>>
1186def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1187#ifdef EXPENSIVE_CHECKS
1188 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318693/include/llvm/Analysis/MemorySSA.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190#endif
1191 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1192}
1193
1194template <class T>
1195inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1196 return make_range(def_chain_iterator<T, true>(MA),
1197 def_chain_iterator<T, true>(nullptr));
1198}
1199
1200} // end namespace llvm
1201
1202#endif // LLVM_ANALYSIS_MEMORYSSA_H

/usr/lib/gcc/x86_64-linux-gnu/7.2.0/../../../../include/c++/7.2.0/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 /**
46 * @addtogroup pointer_abstractions
47 * @{
48 */
49
50#if _GLIBCXX_USE_DEPRECATED1
51 template<typename> class auto_ptr;
52#endif
53
54 /// Primary template of default_delete, used by unique_ptr
55 template<typename _Tp>
56 struct default_delete
57 {
58 /// Default constructor
59 constexpr default_delete() noexcept = default;
60
61 /** @brief Converting constructor.
62 *
63 * Allows conversion from a deleter for arrays of another type, @p _Up,
64 * only if @p _Up* is convertible to @p _Tp*.
65 */
66 template<typename _Up, typename = typename
67 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
68 default_delete(const default_delete<_Up>&) noexcept { }
69
70 /// Calls @c delete @p __ptr
71 void
72 operator()(_Tp* __ptr) const
73 {
74 static_assert(!is_void<_Tp>::value,
75 "can't delete pointer to incomplete type");
76 static_assert(sizeof(_Tp)>0,
77 "can't delete pointer to incomplete type");
78 delete __ptr;
79 }
80 };
81
82 // _GLIBCXX_RESOLVE_LIB_DEFECTS
83 // DR 740 - omit specialization for array objects with a compile time length
84 /// Specialization for arrays, default_delete.
85 template<typename _Tp>
86 struct default_delete<_Tp[]>
87 {
88 public:
89 /// Default constructor
90 constexpr default_delete() noexcept = default;
91
92 /** @brief Converting constructor.
93 *
94 * Allows conversion from a deleter for arrays of another type, such as
95 * a const-qualified version of @p _Tp.
96 *
97 * Conversions from types derived from @c _Tp are not allowed because
98 * it is unsafe to @c delete[] an array of derived types through a
99 * pointer to the base type.
100 */
101 template<typename _Up, typename = typename
102 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
103 default_delete(const default_delete<_Up[]>&) noexcept { }
104
105 /// Calls @c delete[] @p __ptr
106 template<typename _Up>
107 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
108 operator()(_Up* __ptr) const
109 {
110 static_assert(sizeof(_Tp)>0,
111 "can't delete pointer to incomplete type");
112 delete [] __ptr;
113 }
114 };
115
116 template <typename _Tp, typename _Dp>
117 class __uniq_ptr_impl
118 {
119 template <typename _Up, typename _Ep, typename = void>
120 struct _Ptr
121 {
122 using type = _Up*;
123 };
124
125 template <typename _Up, typename _Ep>
126 struct
127 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
128 {
129 using type = typename remove_reference<_Ep>::type::pointer;
130 };
131
132 public:
133 using _DeleterConstraint = enable_if<
134 __and_<__not_<is_pointer<_Dp>>,
135 is_default_constructible<_Dp>>::value>;
136
137 using pointer = typename _Ptr<_Tp, _Dp>::type;
138
139 __uniq_ptr_impl() = default;
140 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
141
142 template<typename _Del>
143 __uniq_ptr_impl(pointer __p, _Del&& __d)
144 : _M_t(__p, std::forward<_Del>(__d)) { }
145
146 pointer& _M_ptr() { return std::get<0>(_M_t); }
147 pointer _M_ptr() const { return std::get<0>(_M_t); }
9
Calling 'get'
16
Returning from 'get'
32
Calling 'get'
39
Returning from 'get'
148 _Dp& _M_deleter() { return std::get<1>(_M_t); }
149 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
150
151 private:
152 tuple<pointer, _Dp> _M_t;
153 };
154
155 /// 20.7.1.2 unique_ptr for single objects.
156 template <typename _Tp, typename _Dp = default_delete<_Tp>>
157 class unique_ptr
158 {
159 template <class _Up>
160 using _DeleterConstraint =
161 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
162
163 __uniq_ptr_impl<_Tp, _Dp> _M_t;
164
165 public:
166 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
167 using element_type = _Tp;
168 using deleter_type = _Dp;
169
170 // helper template for detecting a safe conversion from another
171 // unique_ptr
172 template<typename _Up, typename _Ep>
173 using __safe_conversion_up = __and_<
174 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
175 __not_<is_array<_Up>>,
176 __or_<__and_<is_reference<deleter_type>,
177 is_same<deleter_type, _Ep>>,
178 __and_<__not_<is_reference<deleter_type>>,
179 is_convertible<_Ep, deleter_type>>
180 >
181 >;
182
183 // Constructors.
184
185 /// Default constructor, creates a unique_ptr that owns nothing.
186 template <typename _Up = _Dp,
187 typename = _DeleterConstraint<_Up>>
188 constexpr unique_ptr() noexcept
189 : _M_t()
190 { }
191
192 /** Takes ownership of a pointer.
193 *
194 * @param __p A pointer to an object of @c element_type
195 *
196 * The deleter will be value-initialized.
197 */
198 template <typename _Up = _Dp,
199 typename = _DeleterConstraint<_Up>>
200 explicit
201 unique_ptr(pointer __p) noexcept
202 : _M_t(__p)
203 { }
204
205 /** Takes ownership of a pointer.
206 *
207 * @param __p A pointer to an object of @c element_type
208 * @param __d A reference to a deleter.
209 *
210 * The deleter will be initialized with @p __d
211 */
212 unique_ptr(pointer __p,
213 typename conditional<is_reference<deleter_type>::value,
214 deleter_type, const deleter_type&>::type __d) noexcept
215 : _M_t(__p, __d) { }
216
217 /** Takes ownership of a pointer.
218 *
219 * @param __p A pointer to an object of @c element_type
220 * @param __d An rvalue reference to a deleter.
221 *
222 * The deleter will be initialized with @p std::move(__d)
223 */
224 unique_ptr(pointer __p,
225 typename remove_reference<deleter_type>::type&& __d) noexcept
226 : _M_t(std::move(__p), std::move(__d))
227 { static_assert(!std::is_reference<deleter_type>::value,
228 "rvalue deleter bound to reference"); }
229
230 /// Creates a unique_ptr that owns nothing.
231 template <typename _Up = _Dp,
232 typename = _DeleterConstraint<_Up>>
233 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
234
235 // Move constructors.
236
237 /// Move constructor.
238 unique_ptr(unique_ptr&& __u) noexcept
239 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
240
241 /** @brief Converting constructor from another type
242 *
243 * Requires that the pointer owned by @p __u is convertible to the
244 * type of pointer owned by this object, @p __u does not own an array,
245 * and @p __u has a compatible deleter type.
246 */
247 template<typename _Up, typename _Ep, typename = _Require<
248 __safe_conversion_up<_Up, _Ep>,
249 typename conditional<is_reference<_Dp>::value,
250 is_same<_Ep, _Dp>,
251 is_convertible<_Ep, _Dp>>::type>>
252 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
253 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
254 { }
255
256#if _GLIBCXX_USE_DEPRECATED1
257 /// Converting constructor from @c auto_ptr
258 template<typename _Up, typename = _Require<
259 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
260 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
261#endif
262
263 /// Destructor, invokes the deleter if the stored pointer is not null.
264 ~unique_ptr() noexcept
265 {
266 auto& __ptr = _M_t._M_ptr();
267 if (__ptr != nullptr)
268 get_deleter()(__ptr);
269 __ptr = pointer();
270 }
271
272 // Assignment.
273
274 /** @brief Move assignment operator.
275 *
276 * @param __u The object to transfer ownership from.
277 *
278 * Invokes the deleter first if this object owns a pointer.
279 */
280 unique_ptr&
281 operator=(unique_ptr&& __u) noexcept
282 {
283 reset(__u.release());
284 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
285 return *this;
286 }
287
288 /** @brief Assignment from another type.
289 *
290 * @param __u The object to transfer ownership from, which owns a
291 * convertible pointer to a non-array object.
292 *
293 * Invokes the deleter first if this object owns a pointer.
294 */
295 template<typename _Up, typename _Ep>
296 typename enable_if< __and_<
297 __safe_conversion_up<_Up, _Ep>,
298 is_assignable<deleter_type&, _Ep&&>
299 >::value,
300 unique_ptr&>::type
301 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
302 {
303 reset(__u.release());
304 get_deleter() = std::forward<_Ep>(__u.get_deleter());
305 return *this;
306 }
307
308 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
309 unique_ptr&
310 operator=(nullptr_t) noexcept
311 {
312 reset();
313 return *this;
314 }
315
316 // Observers.
317
318 /// Dereference the stored pointer.
319 typename add_lvalue_reference<element_type>::type
320 operator*() const
321 {
322 __glibcxx_assert(get() != pointer());
323 return *get();
324 }
325
326 /// Return the stored pointer.
327 pointer
328 operator->() const noexcept
329 {
330 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
331 return get();
332 }
333
334 /// Return the stored pointer.
335 pointer
336 get() const noexcept
337 { return _M_t._M_ptr(); }
8
Calling '__uniq_ptr_impl::_M_ptr'
17
Returning from '__uniq_ptr_impl::_M_ptr'
31
Calling '__uniq_ptr_impl::_M_ptr'
40
Returning from '__uniq_ptr_impl::_M_ptr'
338
339 /// Return a reference to the stored deleter.
340 deleter_type&
341 get_deleter() noexcept
342 { return _M_t._M_deleter(); }
343
344 /// Return a reference to the stored deleter.
345 const deleter_type&
346 get_deleter() const noexcept
347 { return _M_t._M_deleter(); }
348
349 /// Return @c true if the stored pointer is not null.
350 explicit operator bool() const noexcept
351 { return get() == pointer() ? false : true; }
352
353 // Modifiers.
354
355 /// Release ownership of any stored pointer.
356 pointer
357 release() noexcept
358 {
359 pointer __p = get();
360 _M_t._M_ptr() = pointer();
361 return __p;
362 }
363
364 /** @brief Replace the stored pointer.
365 *
366 * @param __p The new pointer to store.
367 *
368 * The deleter will be invoked if a pointer is already owned.
369 */
370 void
371 reset(pointer __p = pointer()) noexcept
372 {
373 using std::swap;
374 swap(_M_t._M_ptr(), __p);
375 if (__p != pointer())
376 get_deleter()(__p);
377 }
378
379 /// Exchange the pointer and deleter with another object.
380 void
381 swap(unique_ptr& __u) noexcept
382 {
383 using std::swap;
384 swap(_M_t, __u._M_t);
385 }
386
387 // Disable copy from lvalue.
388 unique_ptr(const unique_ptr&) = delete;
389 unique_ptr& operator=(const unique_ptr&) = delete;
390 };
391
392 /// 20.7.1.3 unique_ptr for array objects with a runtime length
393 // [unique.ptr.runtime]
394 // _GLIBCXX_RESOLVE_LIB_DEFECTS
395 // DR 740 - omit specialization for array objects with a compile time length
396 template<typename _Tp, typename _Dp>
397 class unique_ptr<_Tp[], _Dp>
398 {
399 template <typename _Up>
400 using _DeleterConstraint =
401 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
402
403 __uniq_ptr_impl<_Tp, _Dp> _M_t;
404
405 template<typename _Up>
406 using __remove_cv = typename remove_cv<_Up>::type;
407
408 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
409 template<typename _Up>
410 using __is_derived_Tp
411 = __and_< is_base_of<_Tp, _Up>,
412 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
413
414 public:
415 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
416 using element_type = _Tp;
417 using deleter_type = _Dp;
418
419 // helper template for detecting a safe conversion from another
420 // unique_ptr
421 template<typename _Up, typename _Ep,
422 typename _Up_up = unique_ptr<_Up, _Ep>,
423 typename _Up_element_type = typename _Up_up::element_type>
424 using __safe_conversion_up = __and_<
425 is_array<_Up>,
426 is_same<pointer, element_type*>,
427 is_same<typename _Up_up::pointer, _Up_element_type*>,
428 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
429 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
430 __and_<__not_<is_reference<deleter_type>>,
431 is_convertible<_Ep, deleter_type>>>
432 >;
433
434 // helper template for detecting a safe conversion from a raw pointer
435 template<typename _Up>
436 using __safe_conversion_raw = __and_<
437 __or_<__or_<is_same<_Up, pointer>,
438 is_same<_Up, nullptr_t>>,
439 __and_<is_pointer<_Up>,
440 is_same<pointer, element_type*>,
441 is_convertible<
442 typename remove_pointer<_Up>::type(*)[],
443 element_type(*)[]>
444 >
445 >
446 >;
447
448 // Constructors.
449
450 /// Default constructor, creates a unique_ptr that owns nothing.
451 template <typename _Up = _Dp,
452 typename = _DeleterConstraint<_Up>>
453 constexpr unique_ptr() noexcept
454 : _M_t()
455 { }
456
457 /** Takes ownership of a pointer.
458 *
459 * @param __p A pointer to an array of a type safely convertible
460 * to an array of @c element_type
461 *
462 * The deleter will be value-initialized.
463 */
464 template<typename _Up,
465 typename _Vp = _Dp,
466 typename = _DeleterConstraint<_Vp>,
467 typename = typename enable_if<
468 __safe_conversion_raw<_Up>::value, bool>::type>
469 explicit
470 unique_ptr(_Up __p) noexcept
471 : _M_t(__p)
472 { }
473
474 /** Takes ownership of a pointer.
475 *
476 * @param __p A pointer to an array of a type safely convertible
477 * to an array of @c element_type
478 * @param __d A reference to a deleter.
479 *
480 * The deleter will be initialized with @p __d
481 */
482 template<typename _Up,
483 typename = typename enable_if<
484 __safe_conversion_raw<_Up>::value, bool>::type>
485 unique_ptr(_Up __p,
486 typename conditional<is_reference<deleter_type>::value,
487 deleter_type, const deleter_type&>::type __d) noexcept
488 : _M_t(__p, __d) { }
489
490 /** Takes ownership of a pointer.
491 *
492 * @param __p A pointer to an array of a type safely convertible
493 * to an array of @c element_type
494 * @param __d A reference to a deleter.
495 *
496 * The deleter will be initialized with @p std::move(__d)
497 */
498 template<typename _Up,
499 typename = typename enable_if<
500 __safe_conversion_raw<_Up>::value, bool>::type>
501 unique_ptr(_Up __p, typename
502 remove_reference<deleter_type>::type&& __d) noexcept
503 : _M_t(std::move(__p), std::move(__d))
504 { static_assert(!is_reference<deleter_type>::value,
505 "rvalue deleter bound to reference"); }
506
507 /// Move constructor.
508 unique_ptr(unique_ptr&& __u) noexcept
509 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
510
511 /// Creates a unique_ptr that owns nothing.
512 template <typename _Up = _Dp,
513 typename = _DeleterConstraint<_Up>>
514 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
515
516 template<typename _Up, typename _Ep,
517 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
518 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
519 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
520 { }
521
522 /// Destructor, invokes the deleter if the stored pointer is not null.
523 ~unique_ptr()
524 {
525 auto& __ptr = _M_t._M_ptr();
526 if (__ptr != nullptr)
527 get_deleter()(__ptr);
528 __ptr = pointer();
529 }
530
531 // Assignment.
532
533 /** @brief Move assignment operator.
534 *
535 * @param __u The object to transfer ownership from.
536 *
537 * Invokes the deleter first if this object owns a pointer.
538 */
539 unique_ptr&
540 operator=(unique_ptr&& __u) noexcept
541 {
542 reset(__u.release());
543 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
544 return *this;
545 }
546
547 /** @brief Assignment from another type.
548 *
549 * @param __u The object to transfer ownership from, which owns a
550 * convertible pointer to an array object.
551 *
552 * Invokes the deleter first if this object owns a pointer.
553 */
554 template<typename _Up, typename _Ep>
555 typename
556 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
557 is_assignable<deleter_type&, _Ep&&>
558 >::value,
559 unique_ptr&>::type
560 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
561 {
562 reset(__u.release());
563 get_deleter() = std::forward<_Ep>(__u.get_deleter());
564 return *this;
565 }
566
567 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
568 unique_ptr&
569 operator=(nullptr_t) noexcept
570 {
571 reset();
572 return *this;
573 }
574
575 // Observers.
576
577 /// Access an element of owned array.
578 typename std::add_lvalue_reference<element_type>::type
579 operator[](size_t __i) const
580 {
581 __glibcxx_assert(get() != pointer());
582 return get()[__i];
583 }
584
585 /// Return the stored pointer.
586 pointer
587 get() const noexcept
588 { return _M_t._M_ptr(); }
589
590 /// Return a reference to the stored deleter.
591 deleter_type&
592 get_deleter() noexcept
593 { return _M_t._M_deleter(); }
594
595 /// Return a reference to the stored deleter.
596 const deleter_type&
597 get_deleter() const noexcept
598 { return _M_t._M_deleter(); }
599
600 /// Return @c true if the stored pointer is not null.
601 explicit operator bool() const noexcept
602 { return get() == pointer() ? false : true; }
603
604 // Modifiers.
605
606 /// Release ownership of any stored pointer.
607 pointer
608 release() noexcept
609 {
610 pointer __p = get();
611 _M_t._M_ptr() = pointer();
612 return __p;
613 }
614
615 /** @brief Replace the stored pointer.
616 *
617 * @param __p The new pointer to store.
618 *
619 * The deleter will be invoked if a pointer is already owned.
620 */
621 template <typename _Up,
622 typename = _Require<
623 __or_<is_same<_Up, pointer>,
624 __and_<is_same<pointer, element_type*>,
625 is_pointer<_Up>,
626 is_convertible<
627 typename remove_pointer<_Up>::type(*)[],
628 element_type(*)[]
629 >
630 >
631 >
632 >>
633 void
634 reset(_Up __p) noexcept
635 {
636 pointer __ptr = __p;
637 using std::swap;
638 swap(_M_t._M_ptr(), __ptr);
639 if (__ptr != nullptr)
640 get_deleter()(__ptr);
641 }
642
643 void reset(nullptr_t = nullptr) noexcept
644 {
645 reset(pointer());
646 }
647
648 /// Exchange the pointer and deleter with another object.
649 void
650 swap(unique_ptr& __u) noexcept
651 {
652 using std::swap;
653 swap(_M_t, __u._M_t);
654 }
655
656 // Disable copy from lvalue.
657 unique_ptr(const unique_ptr&) = delete;
658 unique_ptr& operator=(const unique_ptr&) = delete;
659 };
660
661 template<typename _Tp, typename _Dp>
662 inline
663#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
664 // Constrained free swap overload, see p0185r1
665 typename enable_if<__is_swappable<_Dp>::value>::type
666#else
667 void
668#endif
669 swap(unique_ptr<_Tp, _Dp>& __x,
670 unique_ptr<_Tp, _Dp>& __y) noexcept
671 { __x.swap(__y); }
672
673#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
674 template<typename _Tp, typename _Dp>
675 typename enable_if<!__is_swappable<_Dp>::value>::type
676 swap(unique_ptr<_Tp, _Dp>&,
677 unique_ptr<_Tp, _Dp>&) = delete;
678#endif
679
680 template<typename _Tp, typename _Dp,
681 typename _Up, typename _Ep>
682 inline bool
683 operator==(const unique_ptr<_Tp, _Dp>& __x,
684 const unique_ptr<_Up, _Ep>& __y)
685 { return __x.get() == __y.get(); }
686
687 template<typename _Tp, typename _Dp>
688 inline bool
689 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
690 { return !__x; }
691
692 template<typename _Tp, typename _Dp>
693 inline bool
694 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
695 { return !__x; }
696
697 template<typename _Tp, typename _Dp,
698 typename _Up, typename _Ep>
699 inline bool
700 operator!=(const unique_ptr<_Tp, _Dp>& __x,
701 const unique_ptr<_Up, _Ep>& __y)
702 { return __x.get() != __y.get(); }
703
704 template<typename _Tp, typename _Dp>
705 inline bool
706 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
707 { return (bool)__x; }
708
709 template<typename _Tp, typename _Dp>
710 inline bool
711 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
712 { return (bool)__x; }
713
714 template<typename _Tp, typename _Dp,
715 typename _Up, typename _Ep>
716 inline bool
717 operator<(const unique_ptr<_Tp, _Dp>& __x,
718 const unique_ptr<_Up, _Ep>& __y)
719 {
720 typedef typename
721 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
722 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
723 return std::less<_CT>()(__x.get(), __y.get());
724 }
725
726 template<typename _Tp, typename _Dp>
727 inline bool
728 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
729 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
730 nullptr); }
731
732 template<typename _Tp, typename _Dp>
733 inline bool
734 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
735 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
736 __x.get()); }
737
738 template<typename _Tp, typename _Dp,
739 typename _Up, typename _Ep>
740 inline bool
741 operator<=(const unique_ptr<_Tp, _Dp>& __x,
742 const unique_ptr<_Up, _Ep>& __y)
743 { return !(__y < __x); }
744
745 template<typename _Tp, typename _Dp>
746 inline bool
747 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
748 { return !(nullptr < __x); }
749
750 template<typename _Tp, typename _Dp>
751 inline bool
752 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
753 { return !(__x < nullptr); }
754
755 template<typename _Tp, typename _Dp,
756 typename _Up, typename _Ep>
757 inline bool
758 operator>(const unique_ptr<_Tp, _Dp>& __x,
759 const unique_ptr<_Up, _Ep>& __y)
760 { return (__y < __x); }
761
762 template<typename _Tp, typename _Dp>
763 inline bool
764 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
765 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
766 __x.get()); }
767
768 template<typename _Tp, typename _Dp>
769 inline bool
770 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
771 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
772 nullptr); }
773
774 template<typename _Tp, typename _Dp,
775 typename _Up, typename _Ep>
776 inline bool
777 operator>=(const unique_ptr<_Tp, _Dp>& __x,
778 const unique_ptr<_Up, _Ep>& __y)
779 { return !(__x < __y); }
780
781 template<typename _Tp, typename _Dp>
782 inline bool
783 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
784 { return !(__x < nullptr); }
785
786 template<typename _Tp, typename _Dp>
787 inline bool
788 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
789 { return !(nullptr < __x); }
790
791 /// std::hash specialization for unique_ptr.
792 template<typename _Tp, typename _Dp>
793 struct hash<unique_ptr<_Tp, _Dp>>
794 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
795 private __poison_hash<typename unique_ptr<_Tp, _Dp>::pointer>
796 {
797 size_t
798 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
799 {
800 typedef unique_ptr<_Tp, _Dp> _UP;
801 return std::hash<typename _UP::pointer>()(__u.get());
802 }
803 };
804
805#if __cplusplus201103L > 201103L
806
807#define __cpp_lib_make_unique 201304
808
809 template<typename _Tp>
810 struct _MakeUniq
811 { typedef unique_ptr<_Tp> __single_object; };
812
813 template<typename _Tp>
814 struct _MakeUniq<_Tp[]>
815 { typedef unique_ptr<_Tp[]> __array; };
816
817 template<typename _Tp, size_t _Bound>
818 struct _MakeUniq<_Tp[_Bound]>
819 { struct __invalid_type { }; };
820
821 /// std::make_unique for single objects
822 template<typename _Tp, typename... _Args>
823 inline typename _MakeUniq<_Tp>::__single_object
824 make_unique(_Args&&... __args)
825 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
826
827 /// std::make_unique for arrays of unknown bound
828 template<typename _Tp>
829 inline typename _MakeUniq<_Tp>::__array
830 make_unique(size_t __num)
831 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
832
833 /// Disable std::make_unique for arrays of known bound
834 template<typename _Tp, typename... _Args>
835 inline typename _MakeUniq<_Tp>::__invalid_type
836 make_unique(_Args&&...) = delete;
837#endif
838
839 // @} group pointer_abstractions
840
841_GLIBCXX_END_NAMESPACE_VERSION
842} // namespace
843
844#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/7.2.0/../../../../include/c++/7.2.0/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41#include <bits/invoke.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @addtogroup utilities
49 * @{
50 */
51
52 template<typename... _Elements>
53 class tuple;
54
55 template<typename _Tp>
56 struct __is_empty_non_tuple : is_empty<_Tp> { };
57
58 // Using EBO for elements that are tuples causes ambiguous base errors.
59 template<typename _El0, typename... _El>
60 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
61
62 // Use the Empty Base-class Optimization for empty, non-final types.
63 template<typename _Tp>
64 using __empty_not_final
65 = typename conditional<__is_final(_Tp), false_type,
66 __is_empty_non_tuple<_Tp>>::type;
67
68 template<std::size_t _Idx, typename _Head,
69 bool = __empty_not_final<_Head>::value>
70 struct _Head_base;
71
72 template<std::size_t _Idx, typename _Head>
73 struct _Head_base<_Idx, _Head, true>
74 : public _Head
75 {
76 constexpr _Head_base()
77 : _Head() { }
78
79 constexpr _Head_base(const _Head& __h)
80 : _Head(__h) { }
81
82 constexpr _Head_base(const _Head_base&) = default;
83 constexpr _Head_base(_Head_base&&) = default;
84
85 template<typename _UHead>
86 constexpr _Head_base(_UHead&& __h)
87 : _Head(std::forward<_UHead>(__h)) { }
88
89 _Head_base(allocator_arg_t, __uses_alloc0)
90 : _Head() { }
91
92 template<typename _Alloc>
93 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
94 : _Head(allocator_arg, *__a._M_a) { }
95
96 template<typename _Alloc>
97 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
98 : _Head(*__a._M_a) { }
99
100 template<typename _UHead>
101 _Head_base(__uses_alloc0, _UHead&& __uhead)
102 : _Head(std::forward<_UHead>(__uhead)) { }
103
104 template<typename _Alloc, typename _UHead>
105 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
106 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
107
108 template<typename _Alloc, typename _UHead>
109 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
110 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
111
112 static constexpr _Head&
113 _M_head(_Head_base& __b) noexcept { return __b; }
114
115 static constexpr const _Head&
116 _M_head(const _Head_base& __b) noexcept { return __b; }
117 };
118
119 template<std::size_t _Idx, typename _Head>
120 struct _Head_base<_Idx, _Head, false>
121 {
122 constexpr _Head_base()
123 : _M_head_impl() { }
124
125 constexpr _Head_base(const _Head& __h)
126 : _M_head_impl(__h) { }
127
128 constexpr _Head_base(const _Head_base&) = default;
129 constexpr _Head_base(_Head_base&&) = default;
130
131 template<typename _UHead>
132 constexpr _Head_base(_UHead&& __h)
133 : _M_head_impl(std::forward<_UHead>(__h)) { }
134
135 _Head_base(allocator_arg_t, __uses_alloc0)
136 : _M_head_impl() { }
137
138 template<typename _Alloc>
139 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
140 : _M_head_impl(allocator_arg, *__a._M_a) { }
141
142 template<typename _Alloc>
143 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
144 : _M_head_impl(*__a._M_a) { }
145
146 template<typename _UHead>
147 _Head_base(__uses_alloc0, _UHead&& __uhead)
148 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
149
150 template<typename _Alloc, typename _UHead>
151 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
152 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
153 { }
154
155 template<typename _Alloc, typename _UHead>
156 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
157 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
158
159 static constexpr _Head&
160 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
161
162 static constexpr const _Head&
163 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
164
165 _Head _M_head_impl;
166 };
167
168 /**
169 * Contains the actual implementation of the @c tuple template, stored
170 * as a recursive inheritance hierarchy from the first element (most
171 * derived class) to the last (least derived class). The @c Idx
172 * parameter gives the 0-based index of the element stored at this
173 * point in the hierarchy; we use it to implement a constant-time
174 * get() operation.
175 */
176 template<std::size_t _Idx, typename... _Elements>
177 struct _Tuple_impl;
178
179 /**
180 * Recursive tuple implementation. Here we store the @c Head element
181 * and derive from a @c Tuple_impl containing the remaining elements
182 * (which contains the @c Tail).
183 */
184 template<std::size_t _Idx, typename _Head, typename... _Tail>
185 struct _Tuple_impl<_Idx, _Head, _Tail...>
186 : public _Tuple_impl<_Idx + 1, _Tail...>,
187 private _Head_base<_Idx, _Head>
188 {
189 template<std::size_t, typename...> friend class _Tuple_impl;
190
191 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
192 typedef _Head_base<_Idx, _Head> _Base;
193
194 static constexpr _Head&
195 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
196
197 static constexpr const _Head&
198 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
12
Calling '_Head_base::_M_head'
13
Returning from '_Head_base::_M_head'
35
Calling '_Head_base::_M_head'
36
Returning from '_Head_base::_M_head'
199
200 static constexpr _Inherited&
201 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
202
203 static constexpr const _Inherited&
204 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
205
206 constexpr _Tuple_impl()
207 : _Inherited(), _Base() { }
208
209 explicit
210 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
211 : _Inherited(__tail...), _Base(__head) { }
212
213 template<typename _UHead, typename... _UTail, typename = typename
214 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
215 explicit
216 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
217 : _Inherited(std::forward<_UTail>(__tail)...),
218 _Base(std::forward<_UHead>(__head)) { }
219
220 constexpr _Tuple_impl(const _Tuple_impl&) = default;
221
222 constexpr
223 _Tuple_impl(_Tuple_impl&& __in)
224 noexcept(__and_<is_nothrow_move_constructible<_Head>,
225 is_nothrow_move_constructible<_Inherited>>::value)
226 : _Inherited(std::move(_M_tail(__in))),
227 _Base(std::forward<_Head>(_M_head(__in))) { }
228
229 template<typename... _UElements>
230 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
231 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
232 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
233
234 template<typename _UHead, typename... _UTails>
235 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
236 : _Inherited(std::move
237 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
238 _Base(std::forward<_UHead>
239 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
243 : _Inherited(__tag, __a),
244 _Base(__tag, __use_alloc<_Head>(__a)) { }
245
246 template<typename _Alloc>
247 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
248 const _Head& __head, const _Tail&... __tail)
249 : _Inherited(__tag, __a, __tail...),
250 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
251
252 template<typename _Alloc, typename _UHead, typename... _UTail,
253 typename = typename enable_if<sizeof...(_Tail)
254 == sizeof...(_UTail)>::type>
255 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
256 _UHead&& __head, _UTail&&... __tail)
257 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
258 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
259 std::forward<_UHead>(__head)) { }
260
261 template<typename _Alloc>
262 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
263 const _Tuple_impl& __in)
264 : _Inherited(__tag, __a, _M_tail(__in)),
265 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
266
267 template<typename _Alloc>
268 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
269 _Tuple_impl&& __in)
270 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
271 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
272 std::forward<_Head>(_M_head(__in))) { }
273
274 template<typename _Alloc, typename... _UElements>
275 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
276 const _Tuple_impl<_Idx, _UElements...>& __in)
277 : _Inherited(__tag, __a,
278 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
279 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
280 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
281
282 template<typename _Alloc, typename _UHead, typename... _UTails>
283 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
284 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
285 : _Inherited(__tag, __a, std::move
286 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
287 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
288 std::forward<_UHead>
289 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
290
291 _Tuple_impl&
292 operator=(const _Tuple_impl& __in)
293 {
294 _M_head(*this) = _M_head(__in);
295 _M_tail(*this) = _M_tail(__in);
296 return *this;
297 }
298
299 _Tuple_impl&
300 operator=(_Tuple_impl&& __in)
301 noexcept(__and_<is_nothrow_move_assignable<_Head>,
302 is_nothrow_move_assignable<_Inherited>>::value)
303 {
304 _M_head(*this) = std::forward<_Head>(_M_head(__in));
305 _M_tail(*this) = std::move(_M_tail(__in));
306 return *this;
307 }
308
309 template<typename... _UElements>
310 _Tuple_impl&
311 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
312 {
313 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
314 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
315 return *this;
316 }
317
318 template<typename _UHead, typename... _UTails>
319 _Tuple_impl&
320 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
321 {
322 _M_head(*this) = std::forward<_UHead>
323 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
324 _M_tail(*this) = std::move
325 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
326 return *this;
327 }
328
329 protected:
330 void
331 _M_swap(_Tuple_impl& __in)
332 noexcept(__is_nothrow_swappable<_Head>::value
333 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
334 {
335 using std::swap;
336 swap(_M_head(*this), _M_head(__in));
337 _Inherited::_M_swap(_M_tail(__in));
338 }
339 };
340
341 // Basis case of inheritance recursion.
342 template<std::size_t _Idx, typename _Head>
343 struct _Tuple_impl<_Idx, _Head>
344 : private _Head_base<_Idx, _Head>
345 {
346 template<std::size_t, typename...> friend class _Tuple_impl;
347
348 typedef _Head_base<_Idx, _Head> _Base;
349
350 static constexpr _Head&
351 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
352
353 static constexpr const _Head&
354 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
355
356 constexpr _Tuple_impl()
357 : _Base() { }
358
359 explicit
360 constexpr _Tuple_impl(const _Head& __head)
361 : _Base(__head) { }
362
363 template<typename _UHead>
364 explicit
365 constexpr _Tuple_impl(_UHead&& __head)
366 : _Base(std::forward<_UHead>(__head)) { }
367
368 constexpr _Tuple_impl(const _Tuple_impl&) = default;
369
370 constexpr
371 _Tuple_impl(_Tuple_impl&& __in)
372 noexcept(is_nothrow_move_constructible<_Head>::value)
373 : _Base(std::forward<_Head>(_M_head(__in))) { }
374
375 template<typename _UHead>
376 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
377 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
378
379 template<typename _UHead>
380 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
381 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
382 { }
383
384 template<typename _Alloc>
385 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
386 : _Base(__tag, __use_alloc<_Head>(__a)) { }
387
388 template<typename _Alloc>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 const _Head& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
392
393 template<typename _Alloc, typename _UHead>
394 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
395 _UHead&& __head)
396 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
397 std::forward<_UHead>(__head)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 const _Tuple_impl& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
403
404 template<typename _Alloc>
405 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
406 _Tuple_impl&& __in)
407 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
408 std::forward<_Head>(_M_head(__in))) { }
409
410 template<typename _Alloc, typename _UHead>
411 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
412 const _Tuple_impl<_Idx, _UHead>& __in)
413 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
414 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
415
416 template<typename _Alloc, typename _UHead>
417 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
418 _Tuple_impl<_Idx, _UHead>&& __in)
419 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
420 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
421 { }
422
423 _Tuple_impl&
424 operator=(const _Tuple_impl& __in)
425 {
426 _M_head(*this) = _M_head(__in);
427 return *this;
428 }
429
430 _Tuple_impl&
431 operator=(_Tuple_impl&& __in)
432 noexcept(is_nothrow_move_assignable<_Head>::value)
433 {
434 _M_head(*this) = std::forward<_Head>(_M_head(__in));
435 return *this;
436 }
437
438 template<typename _UHead>
439 _Tuple_impl&
440 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
441 {
442 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
443 return *this;
444 }
445
446 template<typename _UHead>
447 _Tuple_impl&
448 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
449 {
450 _M_head(*this)
451 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
452 return *this;
453 }
454
455 protected:
456 void
457 _M_swap(_Tuple_impl& __in)
458 noexcept(__is_nothrow_swappable<_Head>::value)
459 {
460 using std::swap;
461 swap(_M_head(*this), _M_head(__in));
462 }
463 };
464
465 // Concept utility functions, reused in conditionally-explicit
466 // constructors.
467 template<bool, typename... _Elements>
468 struct _TC
469 {
470 template<typename... _UElements>
471 static constexpr bool _ConstructibleTuple()
472 {
473 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
474 }
475
476 template<typename... _UElements>
477 static constexpr bool _ImplicitlyConvertibleTuple()
478 {
479 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
480 }
481
482 template<typename... _UElements>
483 static constexpr bool _MoveConstructibleTuple()
484 {
485 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
486 }
487
488 template<typename... _UElements>
489 static constexpr bool _ImplicitlyMoveConvertibleTuple()
490 {
491 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
492 }
493
494 template<typename _SrcTuple>
495 static constexpr bool _NonNestedTuple()
496 {
497 return __and_<__not_<is_same<tuple<_Elements...>,
498 typename remove_cv<
499 typename remove_reference<_SrcTuple>::type
500 >::type>>,
501 __not_<is_convertible<_SrcTuple, _Elements...>>,
502 __not_<is_constructible<_Elements..., _SrcTuple>>
503 >::value;
504 }
505 template<typename... _UElements>
506 static constexpr bool _NotSameTuple()
507 {
508 return __not_<is_same<tuple<_Elements...>,
509 typename remove_const<
510 typename remove_reference<_UElements...>::type
511 >::type>>::value;
512 }
513 };
514
515 template<typename... _Elements>
516 struct _TC<false, _Elements...>
517 {
518 template<typename... _UElements>
519 static constexpr bool _ConstructibleTuple()
520 {
521 return false;
522 }
523
524 template<typename... _UElements>
525 static constexpr bool _ImplicitlyConvertibleTuple()
526 {
527 return false;
528 }
529
530 template<typename... _UElements>
531 static constexpr bool _MoveConstructibleTuple()
532 {
533 return false;
534 }
535
536 template<typename... _UElements>
537 static constexpr bool _ImplicitlyMoveConvertibleTuple()
538 {
539 return false;
540 }
541
542 template<typename... _UElements>
543 static constexpr bool _NonNestedTuple()
544 {
545 return true;
546 }
547 template<typename... _UElements>
548 static constexpr bool _NotSameTuple()
549 {
550 return true;
551 }
552 };
553
554 /// Primary class template, tuple
555 template<typename... _Elements>
556 class tuple : public _Tuple_impl<0, _Elements...>
557 {
558 typedef _Tuple_impl<0, _Elements...> _Inherited;
559
560 // Used for constraining the default constructor so
561 // that it becomes dependent on the constraints.
562 template<typename _Dummy>
563 struct _TC2
564 {
565 static constexpr bool _DefaultConstructibleTuple()
566 {
567 return __and_<is_default_constructible<_Elements>...>::value;
568 }
569 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
570 {
571 return __and_<__is_implicitly_default_constructible<_Elements>...>
572 ::value;
573 }
574 };
575
576 public:
577 template<typename _Dummy = void,
578 typename enable_if<_TC2<_Dummy>::
579 _ImplicitlyDefaultConstructibleTuple(),
580 bool>::type = true>
581 constexpr tuple()
582 : _Inherited() { }
583
584 template<typename _Dummy = void,
585 typename enable_if<_TC2<_Dummy>::
586 _DefaultConstructibleTuple()
587 &&
588 !_TC2<_Dummy>::
589 _ImplicitlyDefaultConstructibleTuple(),
590 bool>::type = false>
591 explicit constexpr tuple()
592 : _Inherited() { }
593
594 // Shortcut for the cases where constructors taking _Elements...
595 // need to be constrained.
596 template<typename _Dummy> using _TCC =
597 _TC<is_same<_Dummy, void>::value,
598 _Elements...>;
599
600 template<typename _Dummy = void,
601 typename enable_if<
602 _TCC<_Dummy>::template
603 _ConstructibleTuple<_Elements...>()
604 && _TCC<_Dummy>::template
605 _ImplicitlyConvertibleTuple<_Elements...>()
606 && (sizeof...(_Elements) >= 1),
607 bool>::type=true>
608 constexpr tuple(const _Elements&... __elements)
609 : _Inherited(__elements...) { }
610
611 template<typename _Dummy = void,
612 typename enable_if<
613 _TCC<_Dummy>::template
614 _ConstructibleTuple<_Elements...>()
615 && !_TCC<_Dummy>::template
616 _ImplicitlyConvertibleTuple<_Elements...>()
617 && (sizeof...(_Elements) >= 1),
618 bool>::type=false>
619 explicit constexpr tuple(const _Elements&... __elements)
620 : _Inherited(__elements...) { }
621
622 // Shortcut for the cases where constructors taking _UElements...
623 // need to be constrained.
624 template<typename... _UElements> using _TMC =
625 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
626 && (_TC<(sizeof...(_UElements)==1), _Elements...>::
627 template _NotSameTuple<_UElements...>()),
628 _Elements...>;
629
630 // Shortcut for the cases where constructors taking tuple<_UElements...>
631 // need to be constrained.
632 template<typename... _UElements> using _TMCT =
633 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
634 && !is_same<tuple<_Elements...>,
635 tuple<_UElements...>>::value,
636 _Elements...>;
637
638 template<typename... _UElements, typename
639 enable_if<
640 _TMC<_UElements...>::template
641 _MoveConstructibleTuple<_UElements...>()
642 && _TMC<_UElements...>::template
643 _ImplicitlyMoveConvertibleTuple<_UElements...>()
644 && (sizeof...(_Elements) >= 1),
645 bool>::type=true>
646 constexpr tuple(_UElements&&... __elements)
647 : _Inherited(std::forward<_UElements>(__elements)...) { }
648
649 template<typename... _UElements, typename
650 enable_if<
651 _TMC<_UElements...>::template
652 _MoveConstructibleTuple<_UElements...>()
653 && !_TMC<_UElements...>::template
654 _ImplicitlyMoveConvertibleTuple<_UElements...>()
655 && (sizeof...(_Elements) >= 1),
656 bool>::type=false>
657 explicit constexpr tuple(_UElements&&... __elements)
658 : _Inherited(std::forward<_UElements>(__elements)...) { }
659
660 constexpr tuple(const tuple&) = default;
661
662 constexpr tuple(tuple&&) = default;
663
664 // Shortcut for the cases where constructors taking tuples
665 // must avoid creating temporaries.
666 template<typename _Dummy> using _TNTC =
667 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
668 _Elements...>;
669
670 template<typename... _UElements, typename _Dummy = void, typename
671 enable_if<_TMCT<_UElements...>::template
672 _ConstructibleTuple<_UElements...>()
673 && _TMCT<_UElements...>::template
674 _ImplicitlyConvertibleTuple<_UElements...>()
675 && _TNTC<_Dummy>::template
676 _NonNestedTuple<const tuple<_UElements...>&>(),
677 bool>::type=true>
678 constexpr tuple(const tuple<_UElements...>& __in)
679 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
680 { }
681
682 template<typename... _UElements, typename _Dummy = void, typename
683 enable_if<_TMCT<_UElements...>::template
684 _ConstructibleTuple<_UElements...>()
685 && !_TMCT<_UElements...>::template
686 _ImplicitlyConvertibleTuple<_UElements...>()
687 && _TNTC<_Dummy>::template
688 _NonNestedTuple<const tuple<_UElements...>&>(),
689 bool>::type=false>
690 explicit constexpr tuple(const tuple<_UElements...>& __in)
691 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
692 { }
693
694 template<typename... _UElements, typename _Dummy = void, typename
695 enable_if<_TMCT<_UElements...>::template
696 _MoveConstructibleTuple<_UElements...>()
697 && _TMCT<_UElements...>::template
698 _ImplicitlyMoveConvertibleTuple<_UElements...>()
699 && _TNTC<_Dummy>::template
700 _NonNestedTuple<tuple<_UElements...>&&>(),
701 bool>::type=true>
702 constexpr tuple(tuple<_UElements...>&& __in)
703 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
704
705 template<typename... _UElements, typename _Dummy = void, typename
706 enable_if<_TMCT<_UElements...>::template
707 _MoveConstructibleTuple<_UElements...>()
708 && !_TMCT<_UElements...>::template
709 _ImplicitlyMoveConvertibleTuple<_UElements...>()
710 && _TNTC<_Dummy>::template
711 _NonNestedTuple<tuple<_UElements...>&&>(),
712 bool>::type=false>
713 explicit constexpr tuple(tuple<_UElements...>&& __in)
714 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
715
716 // Allocator-extended constructors.
717
718 template<typename _Alloc>
719 tuple(allocator_arg_t __tag, const _Alloc& __a)
720 : _Inherited(__tag, __a) { }
721
722 template<typename _Alloc, typename _Dummy = void,
723 typename enable_if<
724 _TCC<_Dummy>::template
725 _ConstructibleTuple<_Elements...>()
726 && _TCC<_Dummy>::template
727 _ImplicitlyConvertibleTuple<_Elements...>(),
728 bool>::type=true>
729 tuple(allocator_arg_t __tag, const _Alloc& __a,
730 const _Elements&... __elements)
731 : _Inherited(__tag, __a, __elements...) { }
732
733 template<typename _Alloc, typename _Dummy = void,
734 typename enable_if<
735 _TCC<_Dummy>::template
736 _ConstructibleTuple<_Elements...>()
737 && !_TCC<_Dummy>::template
738 _ImplicitlyConvertibleTuple<_Elements...>(),
739 bool>::type=false>
740 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
741 const _Elements&... __elements)
742 : _Inherited(__tag, __a, __elements...) { }
743
744 template<typename _Alloc, typename... _UElements, typename
745 enable_if<_TMC<_UElements...>::template
746 _MoveConstructibleTuple<_UElements...>()
747 && _TMC<_UElements...>::template
748 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
749 bool>::type=true>
750 tuple(allocator_arg_t __tag, const _Alloc& __a,
751 _UElements&&... __elements)
752 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
753 { }
754
755 template<typename _Alloc, typename... _UElements, typename
756 enable_if<_TMC<_UElements...>::template
757 _MoveConstructibleTuple<_UElements...>()
758 && !_TMC<_UElements...>::template
759 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
760 bool>::type=false>
761 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
762 _UElements&&... __elements)
763 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
764 { }
765
766 template<typename _Alloc>
767 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
768 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
769
770 template<typename _Alloc>
771 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
772 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
773
774 template<typename _Alloc, typename _Dummy = void,
775 typename... _UElements, typename
776 enable_if<_TMCT<_UElements...>::template
777 _ConstructibleTuple<_UElements...>()
778 && _TMCT<_UElements...>::template
779 _ImplicitlyConvertibleTuple<_UElements...>()
780 && _TNTC<_Dummy>::template
781 _NonNestedTuple<tuple<_UElements...>&&>(),
782 bool>::type=true>
783 tuple(allocator_arg_t __tag, const _Alloc& __a,
784 const tuple<_UElements...>& __in)
785 : _Inherited(__tag, __a,
786 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
787 { }
788
789 template<typename _Alloc, typename _Dummy = void,
790 typename... _UElements, typename
791 enable_if<_TMCT<_UElements...>::template
792 _ConstructibleTuple<_UElements...>()
793 && !_TMCT<_UElements...>::template
794 _ImplicitlyConvertibleTuple<_UElements...>()
795 && _TNTC<_Dummy>::template
796 _NonNestedTuple<tuple<_UElements...>&&>(),
797 bool>::type=false>
798 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
799 const tuple<_UElements...>& __in)
800 : _Inherited(__tag, __a,
801 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
802 { }
803
804 template<typename _Alloc, typename _Dummy = void,
805 typename... _UElements, typename
806 enable_if<_TMCT<_UElements...>::template
807 _MoveConstructibleTuple<_UElements...>()
808 && _TMCT<_UElements...>::template
809 _ImplicitlyMoveConvertibleTuple<_UElements...>()
810 && _TNTC<_Dummy>::template
811 _NonNestedTuple<tuple<_UElements...>&&>(),
812 bool>::type=true>
813 tuple(allocator_arg_t __tag, const _Alloc& __a,
814 tuple<_UElements...>&& __in)
815 : _Inherited(__tag, __a,
816 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
817 { }
818
819 template<typename _Alloc, typename _Dummy = void,
820 typename... _UElements, typename
821 enable_if<_TMCT<_UElements...>::template
822 _MoveConstructibleTuple<_UElements...>()
823 && !_TMCT<_UElements...>::template
824 _ImplicitlyMoveConvertibleTuple<_UElements...>()
825 && _TNTC<_Dummy>::template
826 _NonNestedTuple<tuple<_UElements...>&&>(),
827 bool>::type=false>
828 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
829 tuple<_UElements...>&& __in)
830 : _Inherited(__tag, __a,
831 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
832 { }
833
834 tuple&
835 operator=(const tuple& __in)
836 {
837 static_cast<_Inherited&>(*this) = __in;
838 return *this;
839 }
840
841 tuple&
842 operator=(tuple&& __in)
843 noexcept(is_nothrow_move_assignable<_Inherited>::value)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 template<typename... _UElements>
850 typename
851 enable_if<sizeof...(_UElements)
852 == sizeof...(_Elements), tuple&>::type
853 operator=(const tuple<_UElements...>& __in)
854 {
855 static_cast<_Inherited&>(*this) = __in;
856 return *this;
857 }
858
859 template<typename... _UElements>
860 typename
861 enable_if<sizeof...(_UElements)
862 == sizeof...(_Elements), tuple&>::type
863 operator=(tuple<_UElements...>&& __in)
864 {
865 static_cast<_Inherited&>(*this) = std::move(__in);
866 return *this;
867 }
868
869 void
870 swap(tuple& __in)
871 noexcept(noexcept(__in._M_swap(__in)))
872 { _Inherited::_M_swap(__in); }
873 };
874
875#if __cpp_deduction_guides >= 201606
876 template<typename... _UTypes>
877 tuple(_UTypes...) -> tuple<_UTypes...>;
878 template<typename _T1, typename _T2>
879 tuple(pair<_T1, _T2>) -> tuple<_T1, _T2>;
880 template<typename _Alloc, typename... _UTypes>
881 tuple(allocator_arg_t, _Alloc, _UTypes...) -> tuple<_UTypes...>;
882 template<typename _Alloc, typename _T1, typename _T2>
883 tuple(allocator_arg_t, _Alloc, pair<_T1, _T2>) -> tuple<_T1, _T2>;
884 template<typename _Alloc, typename... _UTypes>
885 tuple(allocator_arg_t, _Alloc, tuple<_UTypes...>) -> tuple<_UTypes...>;
886#endif
887
888 // Explicit specialization, zero-element tuple.
889 template<>
890 class tuple<>
891 {
892 public:
893 void swap(tuple&) noexcept { /* no-op */ }
894 // We need the default since we're going to define no-op
895 // allocator constructors.
896 tuple() = default;
897 // No-op allocator constructors.
898 template<typename _Alloc>
899 tuple(allocator_arg_t, const _Alloc&) { }
900 template<typename _Alloc>
901 tuple(allocator_arg_t, const _Alloc&, const tuple&) { }
902 };
903
904 /// Partial specialization, 2-element tuple.
905 /// Includes construction and assignment from a pair.
906 template<typename _T1, typename _T2>
907 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
908 {
909 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
910
911 public:
912 template <typename _U1 = _T1,
913 typename _U2 = _T2,
914 typename enable_if<__and_<
915 __is_implicitly_default_constructible<_U1>,
916 __is_implicitly_default_constructible<_U2>>
917 ::value, bool>::type = true>
918
919 constexpr tuple()
920 : _Inherited() { }
921
922 template <typename _U1 = _T1,
923 typename _U2 = _T2,
924 typename enable_if<
925 __and_<
926 is_default_constructible<_U1>,
927 is_default_constructible<_U2>,
928 __not_<
929 __and_<__is_implicitly_default_constructible<_U1>,
930 __is_implicitly_default_constructible<_U2>>>>
931 ::value, bool>::type = false>
932
933 explicit constexpr tuple()
934 : _Inherited() { }
935
936 // Shortcut for the cases where constructors taking _T1, _T2
937 // need to be constrained.
938 template<typename _Dummy> using _TCC =
939 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
940
941 template<typename _Dummy = void, typename
942 enable_if<_TCC<_Dummy>::template
943 _ConstructibleTuple<_T1, _T2>()
944 && _TCC<_Dummy>::template
945 _ImplicitlyConvertibleTuple<_T1, _T2>(),
946 bool>::type = true>
947 constexpr tuple(const _T1& __a1, const _T2& __a2)
948 : _Inherited(__a1, __a2) { }
949
950 template<typename _Dummy = void, typename
951 enable_if<_TCC<_Dummy>::template
952 _ConstructibleTuple<_T1, _T2>()
953 && !_TCC<_Dummy>::template
954 _ImplicitlyConvertibleTuple<_T1, _T2>(),
955 bool>::type = false>
956 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
957 : _Inherited(__a1, __a2) { }
958
959 // Shortcut for the cases where constructors taking _U1, _U2
960 // need to be constrained.
961 using _TMC = _TC<true, _T1, _T2>;
962
963 template<typename _U1, typename _U2, typename
964 enable_if<_TMC::template
965 _MoveConstructibleTuple<_U1, _U2>()
966 && _TMC::template
967 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
968 && !is_same<typename decay<_U1>::type,
969 allocator_arg_t>::value,
970 bool>::type = true>
971 constexpr tuple(_U1&& __a1, _U2&& __a2)
972 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
973
974 template<typename _U1, typename _U2, typename
975 enable_if<_TMC::template
976 _MoveConstructibleTuple<_U1, _U2>()
977 && !_TMC::template
978 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
979 && !is_same<typename decay<_U1>::type,
980 allocator_arg_t>::value,
981 bool>::type = false>
982 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
983 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
984
985 constexpr tuple(const tuple&) = default;
986
987 constexpr tuple(tuple&&) = default;
988
989 template<typename _U1, typename _U2, typename
990 enable_if<_TMC::template
991 _ConstructibleTuple<_U1, _U2>()
992 && _TMC::template
993 _ImplicitlyConvertibleTuple<_U1, _U2>(),
994 bool>::type = true>
995 constexpr tuple(const tuple<_U1, _U2>& __in)
996 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
997
998 template<typename _U1, typename _U2, typename
999 enable_if<_TMC::template
1000 _ConstructibleTuple<_U1, _U2>()
1001 && !_TMC::template
1002 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1003 bool>::type = false>
1004 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
1005 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
1006
1007 template<typename _U1, typename _U2, typename
1008 enable_if<_TMC::template
1009 _MoveConstructibleTuple<_U1, _U2>()
1010 && _TMC::template
1011 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1012 bool>::type = true>
1013 constexpr tuple(tuple<_U1, _U2>&& __in)
1014 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1015
1016 template<typename _U1, typename _U2, typename
1017 enable_if<_TMC::template
1018 _MoveConstructibleTuple<_U1, _U2>()
1019 && !_TMC::template
1020 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1021 bool>::type = false>
1022 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
1023 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1024
1025 template<typename _U1, typename _U2, typename
1026 enable_if<_TMC::template
1027 _ConstructibleTuple<_U1, _U2>()
1028 && _TMC::template
1029 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1030 bool>::type = true>
1031 constexpr tuple(const pair<_U1, _U2>& __in)
1032 : _Inherited(__in.first, __in.second) { }
1033
1034 template<typename _U1, typename _U2, typename
1035 enable_if<_TMC::template
1036 _ConstructibleTuple<_U1, _U2>()
1037 && !_TMC::template
1038 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1039 bool>::type = false>
1040 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1041 : _Inherited(__in.first, __in.second) { }
1042
1043 template<typename _U1, typename _U2, typename
1044 enable_if<_TMC::template
1045 _MoveConstructibleTuple<_U1, _U2>()
1046 && _TMC::template
1047 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1048 bool>::type = true>
1049 constexpr tuple(pair<_U1, _U2>&& __in)
1050 : _Inherited(std::forward<_U1>(__in.first),
1051 std::forward<_U2>(__in.second)) { }
1052
1053 template<typename _U1, typename _U2, typename
1054 enable_if<_TMC::template
1055 _MoveConstructibleTuple<_U1, _U2>()
1056 && !_TMC::template
1057 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1058 bool>::type = false>
1059 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1060 : _Inherited(std::forward<_U1>(__in.first),
1061 std::forward<_U2>(__in.second)) { }
1062
1063 // Allocator-extended constructors.
1064
1065 template<typename _Alloc>
1066 tuple(allocator_arg_t __tag, const _Alloc& __a)
1067 : _Inherited(__tag, __a) { }
1068
1069 template<typename _Alloc, typename _Dummy = void,
1070 typename enable_if<
1071 _TCC<_Dummy>::template
1072 _ConstructibleTuple<_T1, _T2>()
1073 && _TCC<_Dummy>::template
1074 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1075 bool>::type=true>
1076
1077 tuple(allocator_arg_t __tag, const _Alloc& __a,
1078 const _T1& __a1, const _T2& __a2)
1079 : _Inherited(__tag, __a, __a1, __a2) { }
1080
1081 template<typename _Alloc, typename _Dummy = void,
1082 typename enable_if<
1083 _TCC<_Dummy>::template
1084 _ConstructibleTuple<_T1, _T2>()
1085 && !_TCC<_Dummy>::template
1086 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1087 bool>::type=false>
1088
1089 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1090 const _T1& __a1, const _T2& __a2)
1091 : _Inherited(__tag, __a, __a1, __a2) { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _MoveConstructibleTuple<_U1, _U2>()
1096 && _TMC::template
1097 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1098 bool>::type = true>
1099 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1100 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1101 std::forward<_U2>(__a2)) { }
1102
1103 template<typename _Alloc, typename _U1, typename _U2, typename
1104 enable_if<_TMC::template
1105 _MoveConstructibleTuple<_U1, _U2>()
1106 && !_TMC::template
1107 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1108 bool>::type = false>
1109 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1110 _U1&& __a1, _U2&& __a2)
1111 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1112 std::forward<_U2>(__a2)) { }
1113
1114 template<typename _Alloc>
1115 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1116 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1117
1118 template<typename _Alloc>
1119 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1120 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1121
1122 template<typename _Alloc, typename _U1, typename _U2, typename
1123 enable_if<_TMC::template
1124 _ConstructibleTuple<_U1, _U2>()
1125 && _TMC::template
1126 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1127 bool>::type = true>
1128 tuple(allocator_arg_t __tag, const _Alloc& __a,
1129 const tuple<_U1, _U2>& __in)
1130 : _Inherited(__tag, __a,
1131 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1132 { }
1133
1134 template<typename _Alloc, typename _U1, typename _U2, typename
1135 enable_if<_TMC::template
1136 _ConstructibleTuple<_U1, _U2>()
1137 && !_TMC::template
1138 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1139 bool>::type = false>
1140 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1141 const tuple<_U1, _U2>& __in)
1142 : _Inherited(__tag, __a,
1143 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1144 { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1154 { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 tuple<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1165 { }
1166
1167 template<typename _Alloc, typename _U1, typename _U2, typename
1168 enable_if<_TMC::template
1169 _ConstructibleTuple<_U1, _U2>()
1170 && _TMC::template
1171 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1172 bool>::type = true>
1173 tuple(allocator_arg_t __tag, const _Alloc& __a,
1174 const pair<_U1, _U2>& __in)
1175 : _Inherited(__tag, __a, __in.first, __in.second) { }
1176
1177 template<typename _Alloc, typename _U1, typename _U2, typename
1178 enable_if<_TMC::template
1179 _ConstructibleTuple<_U1, _U2>()
1180 && !_TMC::template
1181 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1182 bool>::type = false>
1183 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1184 const pair<_U1, _U2>& __in)
1185 : _Inherited(__tag, __a, __in.first, __in.second) { }
1186
1187 template<typename _Alloc, typename _U1, typename _U2, typename
1188 enable_if<_TMC::template
1189 _MoveConstructibleTuple<_U1, _U2>()
1190 && _TMC::template
1191 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1192 bool>::type = true>
1193 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1194 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1195 std::forward<_U2>(__in.second)) { }
1196
1197 template<typename _Alloc, typename _U1, typename _U2, typename
1198 enable_if<_TMC::template
1199 _MoveConstructibleTuple<_U1, _U2>()
1200 && !_TMC::template
1201 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1202 bool>::type = false>
1203 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1204 pair<_U1, _U2>&& __in)
1205 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1206 std::forward<_U2>(__in.second)) { }
1207
1208 tuple&
1209 operator=(const tuple& __in)
1210 {
1211 static_cast<_Inherited&>(*this) = __in;
1212 return *this;
1213 }
1214
1215 tuple&
1216 operator=(tuple&& __in)
1217 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1218 {
1219 static_cast<_Inherited&>(*this) = std::move(__in);
1220 return *this;
1221 }
1222
1223 template<typename _U1, typename _U2>
1224 tuple&
1225 operator=(const tuple<_U1, _U2>& __in)
1226 {
1227 static_cast<_Inherited&>(*this) = __in;
1228 return *this;
1229 }
1230
1231 template<typename _U1, typename _U2>
1232 tuple&
1233 operator=(tuple<_U1, _U2>&& __in)
1234 {
1235 static_cast<_Inherited&>(*this) = std::move(__in);
1236 return *this;
1237 }
1238
1239 template<typename _U1, typename _U2>
1240 tuple&
1241 operator=(const pair<_U1, _U2>& __in)
1242 {
1243 this->_M_head(*this) = __in.first;
1244 this->_M_tail(*this)._M_head(*this) = __in.second;
1245 return *this;
1246 }
1247
1248 template<typename _U1, typename _U2>
1249 tuple&
1250 operator=(pair<_U1, _U2>&& __in)
1251 {
1252 this->_M_head(*this) = std::forward<_U1>(__in.first);
1253 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1254 return *this;
1255 }
1256
1257 void
1258 swap(tuple& __in)
1259 noexcept(noexcept(__in._M_swap(__in)))
1260 { _Inherited::_M_swap(__in); }
1261 };
1262
1263
1264 /// class tuple_size
1265 template<typename... _Elements>
1266 struct tuple_size<tuple<_Elements...>>
1267 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1268
1269#if __cplusplus201103L > 201402L
1270 template <typename _Tp>
1271 inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
1272#endif
1273
1274 /**
1275 * Recursive case for tuple_element: strip off the first element in
1276 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1277 */
1278 template<std::size_t __i, typename _Head, typename... _Tail>
1279 struct tuple_element<__i, tuple<_Head, _Tail...> >
1280 : tuple_element<__i - 1, tuple<_Tail...> > { };
1281
1282 /**
1283 * Basis case for tuple_element: The first element is the one we're seeking.
1284 */
1285 template<typename _Head, typename... _Tail>
1286 struct tuple_element<0, tuple<_Head, _Tail...> >
1287 {
1288 typedef _Head type;
1289 };
1290
1291 /**
1292 * Error case for tuple_element: invalid index.
1293 */
1294 template<size_t __i>
1295 struct tuple_element<__i, tuple<>>
1296 {
1297 static_assert(__i < tuple_size<tuple<>>::value,
1298 "tuple index is in range");
1299 };
1300
1301 template<std::size_t __i, typename _Head, typename... _Tail>
1302 constexpr _Head&
1303 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1304 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1305
1306 template<std::size_t __i, typename _Head, typename... _Tail>
1307 constexpr const _Head&
1308 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1309 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
11
Calling '_Tuple_impl::_M_head'
14
Returning from '_Tuple_impl::_M_head'
34
Calling '_Tuple_impl::_M_head'
37
Returning from '_Tuple_impl::_M_head'
1310
1311 /// Return a reference to the ith element of a tuple.
1312 template<std::size_t __i, typename... _Elements>
1313 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1314 get(tuple<_Elements...>& __t) noexcept
1315 { return std::__get_helper<__i>(__t); }
1316
1317 /// Return a const reference to the ith element of a const tuple.
1318 template<std::size_t __i, typename... _Elements>
1319 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1320 get(const tuple<_Elements...>& __t) noexcept
1321 { return std::__get_helper<__i>(__t); }
10
Calling '__get_helper'
15
Returning from '__get_helper'
33
Calling '__get_helper'
38
Returning from '__get_helper'
1322
1323 /// Return an rvalue reference to the ith element of a tuple rvalue.
1324 template<std::size_t __i, typename... _Elements>
1325 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1326 get(tuple<_Elements...>&& __t) noexcept
1327 {
1328 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1329 return std::forward<__element_type&&>(std::get<__i>(__t));
1330 }
1331
1332#if __cplusplus201103L > 201103L
1333
1334#define __cpp_lib_tuples_by_type 201304
1335
1336 template<typename _Head, size_t __i, typename... _Tail>
1337 constexpr _Head&
1338 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1339 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1340
1341 template<typename _Head, size_t __i, typename... _Tail>
1342 constexpr const _Head&
1343 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1344 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1345
1346 /// Return a reference to the unique element of type _Tp of a tuple.
1347 template <typename _Tp, typename... _Types>
1348 constexpr _Tp&
1349 get(tuple<_Types...>& __t) noexcept
1350 { return std::__get_helper2<_Tp>(__t); }
1351
1352 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1353 template <typename _Tp, typename... _Types>
1354 constexpr _Tp&&
1355 get(tuple<_Types...>&& __t) noexcept
1356 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1357
1358 /// Return a const reference to the unique element of type _Tp of a tuple.
1359 template <typename _Tp, typename... _Types>
1360 constexpr const _Tp&
1361 get(const tuple<_Types...>& __t) noexcept
1362 { return std::__get_helper2<_Tp>(__t); }
1363#endif
1364
1365 // This class performs the comparison operations on tuples
1366 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1367 struct __tuple_compare
1368 {
1369 static constexpr bool
1370 __eq(const _Tp& __t, const _Up& __u)
1371 {
1372 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1373 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1374 }
1375
1376 static constexpr bool
1377 __less(const _Tp& __t, const _Up& __u)
1378 {
1379 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1380 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1381 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1382 }
1383 };
1384
1385 template<typename _Tp, typename _Up, size_t __size>
1386 struct __tuple_compare<_Tp, _Up, __size, __size>
1387 {
1388 static constexpr bool
1389 __eq(const _Tp&, const _Up&) { return true; }
1390
1391 static constexpr bool
1392 __less(const _Tp&, const _Up&) { return false; }
1393 };
1394
1395 template<typename... _TElements, typename... _UElements>
1396 constexpr bool
1397 operator==(const tuple<_TElements...>& __t,
1398 const tuple<_UElements...>& __u)
1399 {
1400 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1401 "tuple objects can only be compared if they have equal sizes.");
1402 using __compare = __tuple_compare<tuple<_TElements...>,
1403 tuple<_UElements...>,
1404 0, sizeof...(_TElements)>;
1405 return __compare::__eq(__t, __u);
1406 }
1407
1408 template<typename... _TElements, typename... _UElements>
1409 constexpr bool
1410 operator<(const tuple<_TElements...>& __t,
1411 const tuple<_UElements...>& __u)
1412 {
1413 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1414 "tuple objects can only be compared if they have equal sizes.");
1415 using __compare = __tuple_compare<tuple<_TElements...>,
1416 tuple<_UElements...>,
1417 0, sizeof...(_TElements)>;
1418 return __compare::__less(__t, __u);
1419 }
1420
1421 template<typename... _TElements, typename... _UElements>
1422 constexpr bool
1423 operator!=(const tuple<_TElements...>& __t,
1424 const tuple<_UElements...>& __u)
1425 { return !(__t == __u); }
1426
1427 template<typename... _TElements, typename... _UElements>
1428 constexpr bool
1429 operator>(const tuple<_TElements...>& __t,
1430 const tuple<_UElements...>& __u)
1431 { return __u < __t; }
1432
1433 template<typename... _TElements, typename... _UElements>
1434 constexpr bool
1435 operator<=(const tuple<_TElements...>& __t,
1436 const tuple<_UElements...>& __u)
1437 { return !(__u < __t); }
1438
1439 template<typename... _TElements, typename... _UElements>
1440 constexpr bool
1441 operator>=(const tuple<_TElements...>& __t,
1442 const tuple<_UElements...>& __u)
1443 { return !(__t < __u); }
1444
1445 // NB: DR 705.
1446 template<typename... _Elements>
1447 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1448 make_tuple(_Elements&&... __args)
1449 {
1450 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1451 __result_type;
1452 return __result_type(std::forward<_Elements>(__args)...);
1453 }
1454
1455 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1456 // 2275. Why is forward_as_tuple not constexpr?
1457 template<typename... _Elements>
1458 constexpr tuple<_Elements&&...>
1459 forward_as_tuple(_Elements&&... __args) noexcept
1460 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1461
1462 template<size_t, typename, typename, size_t>
1463 struct __make_tuple_impl;
1464
1465 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1466 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1467 : __make_tuple_impl<_Idx + 1,
1468 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1469 _Tuple, _Nm>
1470 { };
1471
1472 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1473 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1474 {
1475 typedef tuple<_Tp...> __type;
1476 };
1477
1478 template<typename _Tuple>
1479 struct __do_make_tuple
1480 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1481 { };
1482
1483 // Returns the std::tuple equivalent of a tuple-like type.
1484 template<typename _Tuple>
1485 struct __make_tuple
1486 : public __do_make_tuple<typename std::remove_cv
1487 <typename std::remove_reference<_Tuple>::type>::type>
1488 { };
1489
1490 // Combines several std::tuple's into a single one.
1491 template<typename...>
1492 struct __combine_tuples;
1493
1494 template<>
1495 struct __combine_tuples<>
1496 {
1497 typedef tuple<> __type;
1498 };
1499
1500 template<typename... _Ts>
1501 struct __combine_tuples<tuple<_Ts...>>
1502 {
1503 typedef tuple<_Ts...> __type;
1504 };
1505
1506 template<typename... _T1s, typename... _T2s, typename... _Rem>
1507 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1508 {
1509 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1510 _Rem...>::__type __type;
1511 };
1512
1513 // Computes the result type of tuple_cat given a set of tuple-like types.
1514 template<typename... _Tpls>
1515 struct __tuple_cat_result
1516 {
1517 typedef typename __combine_tuples
1518 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1519 };
1520
1521 // Helper to determine the index set for the first tuple-like
1522 // type of a given set.
1523 template<typename...>
1524 struct __make_1st_indices;
1525
1526 template<>
1527 struct __make_1st_indices<>
1528 {
1529 typedef std::_Index_tuple<> __type;
1530 };
1531
1532 template<typename _Tp, typename... _Tpls>
1533 struct __make_1st_indices<_Tp, _Tpls...>
1534 {
1535 typedef typename std::_Build_index_tuple<std::tuple_size<
1536 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1537 };
1538
1539 // Performs the actual concatenation by step-wise expanding tuple-like
1540 // objects into the elements, which are finally forwarded into the
1541 // result tuple.
1542 template<typename _Ret, typename _Indices, typename... _Tpls>
1543 struct __tuple_concater;
1544
1545 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1546 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1547 {
1548 template<typename... _Us>
1549 static constexpr _Ret
1550 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1551 {
1552 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1553 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1554 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1555 std::forward<_Us>(__us)...,
1556 std::get<_Is>(std::forward<_Tp>(__tp))...);
1557 }
1558 };
1559
1560 template<typename _Ret>
1561 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1562 {
1563 template<typename... _Us>
1564 static constexpr _Ret
1565 _S_do(_Us&&... __us)
1566 {
1567 return _Ret(std::forward<_Us>(__us)...);
1568 }
1569 };
1570
1571 /// tuple_cat
1572 template<typename... _Tpls, typename = typename
1573 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1574 constexpr auto
1575 tuple_cat(_Tpls&&... __tpls)
1576 -> typename __tuple_cat_result<_Tpls...>::__type
1577 {
1578 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1579 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1580 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1581 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1582 }
1583
1584 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1585 // 2301. Why is tie not constexpr?
1586 /// tie
1587 template<typename... _Elements>
1588 constexpr tuple<_Elements&...>
1589 tie(_Elements&... __args) noexcept
1590 { return tuple<_Elements&...>(__args...); }
1591
1592 /// swap
1593 template<typename... _Elements>
1594 inline
1595#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1596 // Constrained free swap overload, see p0185r1
1597 typename enable_if<__and_<__is_swappable<_Elements>...>::value
1598 >::type
1599#else
1600 void
1601#endif
1602 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1603 noexcept(noexcept(__x.swap(__y)))
1604 { __x.swap(__y); }
1605
1606#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1607 template<typename... _Elements>
1608 typename enable_if<!__and_<__is_swappable<_Elements>...>::value>::type
1609 swap(tuple<_Elements...>&, tuple<_Elements...>&) = delete;
1610#endif
1611
1612 // A class (and instance) which can be used in 'tie' when an element
1613 // of a tuple is not required.
1614 // _GLIBCXX14_CONSTEXPR
1615 // 2933. PR for LWG 2773 could be clearer
1616 struct _Swallow_assign
1617 {
1618 template<class _Tp>
1619 _GLIBCXX14_CONSTEXPR const _Swallow_assign&
1620 operator=(const _Tp&) const
1621 { return *this; }
1622 };
1623
1624 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1625 // 2773. Making std::ignore constexpr
1626 _GLIBCXX17_INLINE constexpr _Swallow_assign ignore{};
1627
1628 /// Partial specialization for tuples
1629 template<typename... _Types, typename _Alloc>
1630 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1631
1632 // See stl_pair.h...
1633 template<class _T1, class _T2>
1634 template<typename... _Args1, typename... _Args2>
1635 inline
1636 pair<_T1, _T2>::
1637 pair(piecewise_construct_t,
1638 tuple<_Args1...> __first, tuple<_Args2...> __second)
1639 : pair(__first, __second,
1640 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1641 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1642 { }
1643
1644 template<class _T1, class _T2>
1645 template<typename... _Args1, std::size_t... _Indexes1,
1646 typename... _Args2, std::size_t... _Indexes2>
1647 inline
1648 pair<_T1, _T2>::
1649 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1650 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1651 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1652 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1653 { }
1654
1655#if __cplusplus201103L > 201402L
1656# define __cpp_lib_apply 201603
1657
1658 template <typename _Fn, typename _Tuple, size_t... _Idx>
1659 constexpr decltype(auto)
1660 __apply_impl(_Fn&& __f, _Tuple&& __t, index_sequence<_Idx...>)
1661 {
1662 return std::__invoke(std::forward<_Fn>(__f),
1663 std::get<_Idx>(std::forward<_Tuple>(__t))...);
1664 }
1665
1666 template <typename _Fn, typename _Tuple>
1667 constexpr decltype(auto)
1668 apply(_Fn&& __f, _Tuple&& __t)
1669 {
1670 using _Indices = make_index_sequence<tuple_size_v<decay_t<_Tuple>>>;
1671 return std::__apply_impl(std::forward<_Fn>(__f),
1672 std::forward<_Tuple>(__t),
1673 _Indices{});
1674 }
1675
1676#define __cpp_lib_make_from_tuple 201606
1677
1678 template <typename _Tp, typename _Tuple, size_t... _Idx>
1679 constexpr _Tp
1680 __make_from_tuple_impl(_Tuple&& __t, index_sequence<_Idx...>)
1681 { return _Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...); }
1682
1683 template <typename _Tp, typename _Tuple>
1684 constexpr _Tp
1685 make_from_tuple(_Tuple&& __t)
1686 {
1687 return __make_from_tuple_impl<_Tp>(
1688 std::forward<_Tuple>(__t),
1689 make_index_sequence<tuple_size_v<decay_t<_Tuple>>>{});
1690 }
1691#endif // C++17
1692
1693 /// @}
1694
1695_GLIBCXX_END_NAMESPACE_VERSION
1696} // namespace std
1697
1698#endif // C++11
1699
1700#endif // _GLIBCXX_TUPLE