Bug Summary

File:lib/CodeGen/InlineSpiller.cpp
Warning:line 1230, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InlineSpiller.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp

1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#include "LiveRangeCalc.h"
16#include "Spiller.h"
17#include "SplitKit.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/CodeGen/LiveInterval.h"
29#include "llvm/CodeGen/LiveIntervals.h"
30#include "llvm/CodeGen/LiveRangeEdit.h"
31#include "llvm/CodeGen/LiveStacks.h"
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
34#include "llvm/CodeGen/MachineDominators.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineFunctionPass.h"
37#include "llvm/CodeGen/MachineInstr.h"
38#include "llvm/CodeGen/MachineInstrBuilder.h"
39#include "llvm/CodeGen/MachineInstrBundle.h"
40#include "llvm/CodeGen/MachineLoopInfo.h"
41#include "llvm/CodeGen/MachineOperand.h"
42#include "llvm/CodeGen/MachineRegisterInfo.h"
43#include "llvm/CodeGen/SlotIndexes.h"
44#include "llvm/CodeGen/TargetInstrInfo.h"
45#include "llvm/CodeGen/TargetOpcodes.h"
46#include "llvm/CodeGen/TargetRegisterInfo.h"
47#include "llvm/CodeGen/TargetSubtargetInfo.h"
48#include "llvm/CodeGen/VirtRegMap.h"
49#include "llvm/Support/BlockFrequency.h"
50#include "llvm/Support/BranchProbability.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Compiler.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/raw_ostream.h"
56#include <cassert>
57#include <iterator>
58#include <tuple>
59#include <utility>
60#include <vector>
61
62using namespace llvm;
63
64#define DEBUG_TYPE"regalloc" "regalloc"
65
66STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges"
, "Number of spilled live ranges", {0}, {false}}
;
67STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets"
, "Number of spilled snippets", {0}, {false}}
;
68STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted"
, {0}, {false}}
;
69STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved"
, "Number of spills removed", {0}, {false}}
;
70STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads"
, "Number of reloads inserted", {0}, {false}}
;
71STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved"
, "Number of reloads removed", {0}, {false}}
;
72STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses"
, {0}, {false}}
;
73STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads"
, "Number of folded loads", {0}, {false}}
;
74STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling"
, {0}, {false}}
;
75
76static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77 cl::desc("Disable inline spill hoisting"));
78
79namespace {
80
81class HoistSpillHelper : private LiveRangeEdit::Delegate {
82 MachineFunction &MF;
83 LiveIntervals &LIS;
84 LiveStacks &LSS;
85 AliasAnalysis *AA;
86 MachineDominatorTree &MDT;
87 MachineLoopInfo &Loops;
88 VirtRegMap &VRM;
89 MachineRegisterInfo &MRI;
90 const TargetInstrInfo &TII;
91 const TargetRegisterInfo &TRI;
92 const MachineBlockFrequencyInfo &MBFI;
93
94 InsertPointAnalysis IPA;
95
96 // Map from StackSlot to the LiveInterval of the original register.
97 // Note the LiveInterval of the original register may have been deleted
98 // after it is spilled. We keep a copy here to track the range where
99 // spills can be moved.
100 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
101
102 // Map from pair of (StackSlot and Original VNI) to a set of spills which
103 // have the same stackslot and have equal values defined by Original VNI.
104 // These spills are mergeable and are hoist candiates.
105 using MergeableSpillsMap =
106 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
107 MergeableSpillsMap MergeableSpills;
108
109 /// This is the map from original register to a set containing all its
110 /// siblings. To hoist a spill to another BB, we need to find out a live
111 /// sibling there and use it as the source of the new spill.
112 DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
113
114 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
115 MachineBasicBlock &BB, unsigned &LiveReg);
116
117 void rmRedundantSpills(
118 SmallPtrSet<MachineInstr *, 16> &Spills,
119 SmallVectorImpl<MachineInstr *> &SpillsToRm,
120 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
121
122 void getVisitOrders(
123 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
124 SmallVectorImpl<MachineDomTreeNode *> &Orders,
125 SmallVectorImpl<MachineInstr *> &SpillsToRm,
126 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
127 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
128
129 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
130 SmallPtrSet<MachineInstr *, 16> &Spills,
131 SmallVectorImpl<MachineInstr *> &SpillsToRm,
132 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
133
134public:
135 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
136 VirtRegMap &vrm)
137 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
138 LSS(pass.getAnalysis<LiveStacks>()),
139 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
140 MDT(pass.getAnalysis<MachineDominatorTree>()),
141 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
142 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
143 TRI(*mf.getSubtarget().getRegisterInfo()),
144 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
145 IPA(LIS, mf.getNumBlockIDs()) {}
146
147 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
148 unsigned Original);
149 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
150 void hoistAllSpills();
151 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
152};
153
154class InlineSpiller : public Spiller {
155 MachineFunction &MF;
156 LiveIntervals &LIS;
157 LiveStacks &LSS;
158 AliasAnalysis *AA;
159 MachineDominatorTree &MDT;
160 MachineLoopInfo &Loops;
161 VirtRegMap &VRM;
162 MachineRegisterInfo &MRI;
163 const TargetInstrInfo &TII;
164 const TargetRegisterInfo &TRI;
165 const MachineBlockFrequencyInfo &MBFI;
166
167 // Variables that are valid during spill(), but used by multiple methods.
168 LiveRangeEdit *Edit;
169 LiveInterval *StackInt;
170 int StackSlot;
171 unsigned Original;
172
173 // All registers to spill to StackSlot, including the main register.
174 SmallVector<unsigned, 8> RegsToSpill;
175
176 // All COPY instructions to/from snippets.
177 // They are ignored since both operands refer to the same stack slot.
178 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
179
180 // Values that failed to remat at some point.
181 SmallPtrSet<VNInfo*, 8> UsedValues;
182
183 // Dead defs generated during spilling.
184 SmallVector<MachineInstr*, 8> DeadDefs;
185
186 // Object records spills information and does the hoisting.
187 HoistSpillHelper HSpiller;
188
189 ~InlineSpiller() override = default;
190
191public:
192 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
193 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
194 LSS(pass.getAnalysis<LiveStacks>()),
195 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
196 MDT(pass.getAnalysis<MachineDominatorTree>()),
197 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
198 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
199 TRI(*mf.getSubtarget().getRegisterInfo()),
200 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
201 HSpiller(pass, mf, vrm) {}
202
203 void spill(LiveRangeEdit &) override;
204 void postOptimization() override;
205
206private:
207 bool isSnippet(const LiveInterval &SnipLI);
208 void collectRegsToSpill();
209
210 bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
211
212 bool isSibling(unsigned Reg);
213 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
214 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
215
216 void markValueUsed(LiveInterval*, VNInfo*);
217 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
218 void reMaterializeAll();
219
220 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
221 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
222 MachineInstr *LoadMI = nullptr);
223 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
224 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
225
226 void spillAroundUses(unsigned Reg);
227 void spillAll();
228};
229
230} // end anonymous namespace
231
232Spiller::~Spiller() = default;
233
234void Spiller::anchor() {}
235
236Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
237 MachineFunction &mf,
238 VirtRegMap &vrm) {
239 return new InlineSpiller(pass, mf, vrm);
240}
241
242//===----------------------------------------------------------------------===//
243// Snippets
244//===----------------------------------------------------------------------===//
245
246// When spilling a virtual register, we also spill any snippets it is connected
247// to. The snippets are small live ranges that only have a single real use,
248// leftovers from live range splitting. Spilling them enables memory operand
249// folding or tightens the live range around the single use.
250//
251// This minimizes register pressure and maximizes the store-to-load distance for
252// spill slots which can be important in tight loops.
253
254/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
255/// otherwise return 0.
256static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
257 if (!MI.isFullCopy())
258 return 0;
259 if (MI.getOperand(0).getReg() == Reg)
260 return MI.getOperand(1).getReg();
261 if (MI.getOperand(1).getReg() == Reg)
262 return MI.getOperand(0).getReg();
263 return 0;
264}
265
266/// isSnippet - Identify if a live interval is a snippet that should be spilled.
267/// It is assumed that SnipLI is a virtual register with the same original as
268/// Edit->getReg().
269bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
270 unsigned Reg = Edit->getReg();
271
272 // A snippet is a tiny live range with only a single instruction using it
273 // besides copies to/from Reg or spills/fills. We accept:
274 //
275 // %snip = COPY %Reg / FILL fi#
276 // %snip = USE %snip
277 // %Reg = COPY %snip / SPILL %snip, fi#
278 //
279 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
280 return false;
281
282 MachineInstr *UseMI = nullptr;
283
284 // Check that all uses satisfy our criteria.
285 for (MachineRegisterInfo::reg_instr_nodbg_iterator
286 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
287 E = MRI.reg_instr_nodbg_end(); RI != E; ) {
288 MachineInstr &MI = *RI++;
289
290 // Allow copies to/from Reg.
291 if (isFullCopyOf(MI, Reg))
292 continue;
293
294 // Allow stack slot loads.
295 int FI;
296 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
297 continue;
298
299 // Allow stack slot stores.
300 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
301 continue;
302
303 // Allow a single additional instruction.
304 if (UseMI && &MI != UseMI)
305 return false;
306 UseMI = &MI;
307 }
308 return true;
309}
310
311/// collectRegsToSpill - Collect live range snippets that only have a single
312/// real use.
313void InlineSpiller::collectRegsToSpill() {
314 unsigned Reg = Edit->getReg();
315
316 // Main register always spills.
317 RegsToSpill.assign(1, Reg);
318 SnippetCopies.clear();
319
320 // Snippets all have the same original, so there can't be any for an original
321 // register.
322 if (Original == Reg)
323 return;
324
325 for (MachineRegisterInfo::reg_instr_iterator
326 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
327 MachineInstr &MI = *RI++;
328 unsigned SnipReg = isFullCopyOf(MI, Reg);
329 if (!isSibling(SnipReg))
330 continue;
331 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
332 if (!isSnippet(SnipLI))
333 continue;
334 SnippetCopies.insert(&MI);
335 if (isRegToSpill(SnipReg))
336 continue;
337 RegsToSpill.push_back(SnipReg);
338 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\talso spill snippet " <<
SnipLI << '\n'; } } while (false)
;
339 ++NumSnippets;
340 }
341}
342
343bool InlineSpiller::isSibling(unsigned Reg) {
344 return TargetRegisterInfo::isVirtualRegister(Reg) &&
345 VRM.getOriginal(Reg) == Original;
346}
347
348/// It is beneficial to spill to earlier place in the same BB in case
349/// as follows:
350/// There is an alternative def earlier in the same MBB.
351/// Hoist the spill as far as possible in SpillMBB. This can ease
352/// register pressure:
353///
354/// x = def
355/// y = use x
356/// s = copy x
357///
358/// Hoisting the spill of s to immediately after the def removes the
359/// interference between x and y:
360///
361/// x = def
362/// spill x
363/// y = use killed x
364///
365/// This hoist only helps when the copy kills its source.
366///
367bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
368 MachineInstr &CopyMI) {
369 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
370#ifndef NDEBUG
371 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
372 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")(static_cast <bool> (VNI && VNI->def == Idx.
getRegSlot() && "Not defined by copy") ? void (0) : __assert_fail
("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 372, __extension__ __PRETTY_FUNCTION__))
;
373#endif
374
375 unsigned SrcReg = CopyMI.getOperand(1).getReg();
376 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
377 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
378 LiveQueryResult SrcQ = SrcLI.Query(Idx);
379 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
380 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
381 return false;
382
383 // Conservatively extend the stack slot range to the range of the original
384 // value. We may be able to do better with stack slot coloring by being more
385 // careful here.
386 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387 LiveInterval &OrigLI = LIS.getInterval(Original);
388 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
389 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
390 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
391 << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
;
392
393 // We are going to spill SrcVNI immediately after its def, so clear out
394 // any later spills of the same value.
395 eliminateRedundantSpills(SrcLI, SrcVNI);
396
397 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
398 MachineBasicBlock::iterator MII;
399 if (SrcVNI->isPHIDef())
400 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
401 else {
402 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
403 assert(DefMI && "Defining instruction disappeared")(static_cast <bool> (DefMI && "Defining instruction disappeared"
) ? void (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 403, __extension__ __PRETTY_FUNCTION__))
;
404 MII = DefMI;
405 ++MII;
406 }
407 // Insert spill without kill flag immediately after def.
408 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
409 MRI.getRegClass(SrcReg), &TRI);
410 --MII; // Point to store instruction.
411 LIS.InsertMachineInstrInMaps(*MII);
412 DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\thoisted: " << SrcVNI
->def << '\t' << *MII; } } while (false)
;
413
414 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
415 ++NumSpills;
416 return true;
417}
418
419/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
420/// redundant spills of this value in SLI.reg and sibling copies.
421void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
422 assert(VNI && "Missing value")(static_cast <bool> (VNI && "Missing value") ? void
(0) : __assert_fail ("VNI && \"Missing value\"", "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 422, __extension__ __PRETTY_FUNCTION__))
;
423 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
424 WorkList.push_back(std::make_pair(&SLI, VNI));
425 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 425, __extension__ __PRETTY_FUNCTION__))
;
426
427 do {
428 LiveInterval *LI;
429 std::tie(LI, VNI) = WorkList.pop_back_val();
430 unsigned Reg = LI->reg;
431 DEBUG(dbgs() << "Checking redundant spills for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
432 << VNI->id << '@' << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
;
433
434 // Regs to spill are taken care of.
435 if (isRegToSpill(Reg))
436 continue;
437
438 // Add all of VNI's live range to StackInt.
439 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
440 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged to stack int: " <<
*StackInt << '\n'; } } while (false)
;
441
442 // Find all spills and copies of VNI.
443 for (MachineRegisterInfo::use_instr_nodbg_iterator
444 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
445 UI != E; ) {
446 MachineInstr &MI = *UI++;
447 if (!MI.isCopy() && !MI.mayStore())
448 continue;
449 SlotIndex Idx = LIS.getInstructionIndex(MI);
450 if (LI->getVNInfoAt(Idx) != VNI)
451 continue;
452
453 // Follow sibling copies down the dominator tree.
454 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
455 if (isSibling(DstReg)) {
456 LiveInterval &DstLI = LIS.getInterval(DstReg);
457 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
458 assert(DstVNI && "Missing defined value")(static_cast <bool> (DstVNI && "Missing defined value"
) ? void (0) : __assert_fail ("DstVNI && \"Missing defined value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 458, __extension__ __PRETTY_FUNCTION__))
;
459 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")(static_cast <bool> (DstVNI->def == Idx.getRegSlot()
&& "Wrong copy def slot") ? void (0) : __assert_fail
("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 459, __extension__ __PRETTY_FUNCTION__))
;
460 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
461 }
462 continue;
463 }
464
465 // Erase spills.
466 int FI;
467 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
468 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Redundant spill " << Idx
<< '\t' << MI; } } while (false)
;
469 // eliminateDeadDefs won't normally remove stores, so switch opcode.
470 MI.setDesc(TII.get(TargetOpcode::KILL));
471 DeadDefs.push_back(&MI);
472 ++NumSpillsRemoved;
473 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
474 --NumSpills;
475 }
476 }
477 } while (!WorkList.empty());
478}
479
480//===----------------------------------------------------------------------===//
481// Rematerialization
482//===----------------------------------------------------------------------===//
483
484/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
485/// instruction cannot be eliminated. See through snippet copies
486void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
487 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
488 WorkList.push_back(std::make_pair(LI, VNI));
489 do {
490 std::tie(LI, VNI) = WorkList.pop_back_val();
491 if (!UsedValues.insert(VNI).second)
492 continue;
493
494 if (VNI->isPHIDef()) {
495 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
496 for (MachineBasicBlock *P : MBB->predecessors()) {
497 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
498 if (PVNI)
499 WorkList.push_back(std::make_pair(LI, PVNI));
500 }
501 continue;
502 }
503
504 // Follow snippet copies.
505 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
506 if (!SnippetCopies.count(MI))
507 continue;
508 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
509 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy")(static_cast <bool> (isRegToSpill(SnipLI.reg) &&
"Unexpected register in copy") ? void (0) : __assert_fail ("isRegToSpill(SnipLI.reg) && \"Unexpected register in copy\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
511 assert(SnipVNI && "Snippet undefined before copy")(static_cast <bool> (SnipVNI && "Snippet undefined before copy"
) ? void (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 511, __extension__ __PRETTY_FUNCTION__))
;
512 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
513 } while (!WorkList.empty());
514}
515
516/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
517bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
518 // Analyze instruction
519 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
520 MIBundleOperands::VirtRegInfo RI =
521 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
522
523 if (!RI.Reads)
524 return false;
525
526 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
527 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
528
529 if (!ParentVNI) {
530 DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tadding <undef> flags: "
; } } while (false)
;
531 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
532 MachineOperand &MO = MI.getOperand(i);
533 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
534 MO.setIsUndef();
535 }
536 DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << UseIdx << '\t' <<
MI; } } while (false)
;
537 return true;
538 }
539
540 if (SnippetCopies.count(&MI))
541 return false;
542
543 LiveInterval &OrigLI = LIS.getInterval(Original);
544 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
545 LiveRangeEdit::Remat RM(ParentVNI);
546 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
547
548 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
549 markValueUsed(&VirtReg, ParentVNI);
550 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
551 return false;
552 }
553
554 // If the instruction also writes VirtReg.reg, it had better not require the
555 // same register for uses and defs.
556 if (RI.Tied) {
557 markValueUsed(&VirtReg, ParentVNI);
558 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat tied reg: " <<
UseIdx << '\t' << MI; } } while (false)
;
559 return false;
560 }
561
562 // Before rematerializing into a register for a single instruction, try to
563 // fold a load into the instruction. That avoids allocating a new register.
564 if (RM.OrigMI->canFoldAsLoad() &&
565 foldMemoryOperand(Ops, RM.OrigMI)) {
566 Edit->markRematerialized(RM.ParentVNI);
567 ++NumFoldedLoads;
568 return true;
569 }
570
571 // Allocate a new register for the remat.
572 unsigned NewVReg = Edit->createFrom(Original);
573
574 // Finally we can rematerialize OrigMI before MI.
575 SlotIndex DefIdx =
576 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
577
578 // We take the DebugLoc from MI, since OrigMI may be attributed to a
579 // different source location.
580 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
581 NewMI->setDebugLoc(MI.getDebugLoc());
582
583 (void)DefIdx;
584 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
585 << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
;
586
587 // Replace operands
588 for (const auto &OpPair : Ops) {
589 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
590 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
591 MO.setReg(NewVReg);
592 MO.setIsKill();
593 }
594 }
595 DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t " << UseIdx <<
'\t' << MI << '\n'; } } while (false)
;
596
597 ++NumRemats;
598 return true;
599}
600
601/// reMaterializeAll - Try to rematerialize as many uses as possible,
602/// and trim the live ranges after.
603void InlineSpiller::reMaterializeAll() {
604 if (!Edit->anyRematerializable(AA))
605 return;
606
607 UsedValues.clear();
608
609 // Try to remat before all uses of snippets.
610 bool anyRemat = false;
611 for (unsigned Reg : RegsToSpill) {
612 LiveInterval &LI = LIS.getInterval(Reg);
613 for (MachineRegisterInfo::reg_bundle_iterator
614 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
615 RegI != E; ) {
616 MachineInstr &MI = *RegI++;
617
618 // Debug values are not allowed to affect codegen.
619 if (MI.isDebugValue())
620 continue;
621
622 anyRemat |= reMaterializeFor(LI, MI);
623 }
624 }
625 if (!anyRemat)
626 return;
627
628 // Remove any values that were completely rematted.
629 for (unsigned Reg : RegsToSpill) {
630 LiveInterval &LI = LIS.getInterval(Reg);
631 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
632 I != E; ++I) {
633 VNInfo *VNI = *I;
634 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
635 continue;
636 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
637 MI->addRegisterDead(Reg, &TRI);
638 if (!MI->allDefsAreDead())
639 continue;
640 DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "All defs dead: " << *MI
; } } while (false)
;
641 DeadDefs.push_back(MI);
642 }
643 }
644
645 // Eliminate dead code after remat. Note that some snippet copies may be
646 // deleted here.
647 if (DeadDefs.empty())
648 return;
649 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n"; } } while (false)
;
650 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
651
652 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
653 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
654 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
655 // removed, PHI VNI are still left in the LiveInterval.
656 // So to get rid of unused reg, we need to check whether it has non-dbg
657 // reference instead of whether it has non-empty interval.
658 unsigned ResultPos = 0;
659 for (unsigned Reg : RegsToSpill) {
660 if (MRI.reg_nodbg_empty(Reg)) {
661 Edit->eraseVirtReg(Reg);
662 continue;
663 }
664
665 assert(LIS.hasInterval(Reg) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
666 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
667 "Empty and not used live-range?!")(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668
669 RegsToSpill[ResultPos++] = Reg;
670 }
671 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
672 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
;
673}
674
675//===----------------------------------------------------------------------===//
676// Spilling
677//===----------------------------------------------------------------------===//
678
679/// If MI is a load or store of StackSlot, it can be removed.
680bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
681 int FI = 0;
682 unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
683 bool IsLoad = InstrReg;
684 if (!IsLoad)
685 InstrReg = TII.isStoreToStackSlot(*MI, FI);
686
687 // We have a stack access. Is it the right register and slot?
688 if (InstrReg != Reg || FI != StackSlot)
689 return false;
690
691 if (!IsLoad)
692 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
693
694 DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Coalescing stack access: " <<
*MI; } } while (false)
;
695 LIS.RemoveMachineInstrFromMaps(*MI);
696 MI->eraseFromParent();
697
698 if (IsLoad) {
699 ++NumReloadsRemoved;
700 --NumReloads;
701 } else {
702 ++NumSpillsRemoved;
703 --NumSpills;
704 }
705
706 return true;
707}
708
709#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
710LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
711// Dump the range of instructions from B to E with their slot indexes.
712static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
713 MachineBasicBlock::iterator E,
714 LiveIntervals const &LIS,
715 const char *const header,
716 unsigned VReg =0) {
717 char NextLine = '\n';
718 char SlotIndent = '\t';
719
720 if (std::next(B) == E) {
721 NextLine = ' ';
722 SlotIndent = ' ';
723 }
724
725 dbgs() << '\t' << header << ": " << NextLine;
726
727 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
728 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
729
730 // If a register was passed in and this instruction has it as a
731 // destination that is marked as an early clobber, print the
732 // early-clobber slot index.
733 if (VReg) {
734 MachineOperand *MO = I->findRegisterDefOperand(VReg);
735 if (MO && MO->isEarlyClobber())
736 Idx = Idx.getRegSlot(true);
737 }
738
739 dbgs() << SlotIndent << Idx << '\t' << *I;
740 }
741}
742#endif
743
744/// foldMemoryOperand - Try folding stack slot references in Ops into their
745/// instructions.
746///
747/// @param Ops Operand indices from analyzeVirtReg().
748/// @param LoadMI Load instruction to use instead of stack slot when non-null.
749/// @return True on success.
750bool InlineSpiller::
751foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
752 MachineInstr *LoadMI) {
753 if (Ops.empty())
754 return false;
755 // Don't attempt folding in bundles.
756 MachineInstr *MI = Ops.front().first;
757 if (Ops.back().first != MI || MI->isBundled())
758 return false;
759
760 bool WasCopy = MI->isCopy();
761 unsigned ImpReg = 0;
762
763 // Spill subregs if the target allows it.
764 // We always want to spill subregs for stackmap/patchpoint pseudos.
765 bool SpillSubRegs = TII.isSubregFoldable() ||
766 MI->getOpcode() == TargetOpcode::STATEPOINT ||
767 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
768 MI->getOpcode() == TargetOpcode::STACKMAP;
769
770 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
771 // operands.
772 SmallVector<unsigned, 8> FoldOps;
773 for (const auto &OpPair : Ops) {
774 unsigned Idx = OpPair.second;
775 assert(MI == OpPair.first && "Instruction conflict during operand folding")(static_cast <bool> (MI == OpPair.first && "Instruction conflict during operand folding"
) ? void (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 775, __extension__ __PRETTY_FUNCTION__))
;
776 MachineOperand &MO = MI->getOperand(Idx);
777 if (MO.isImplicit()) {
778 ImpReg = MO.getReg();
779 continue;
780 }
781
782 if (!SpillSubRegs && MO.getSubReg())
783 return false;
784 // We cannot fold a load instruction into a def.
785 if (LoadMI && MO.isDef())
786 return false;
787 // Tied use operands should not be passed to foldMemoryOperand.
788 if (!MI->isRegTiedToDefOperand(Idx))
789 FoldOps.push_back(Idx);
790 }
791
792 // If we only have implicit uses, we won't be able to fold that.
793 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
794 if (FoldOps.empty())
795 return false;
796
797 MachineInstrSpan MIS(MI);
798
799 MachineInstr *FoldMI =
800 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
801 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
802 if (!FoldMI)
803 return false;
804
805 // Remove LIS for any dead defs in the original MI not in FoldMI.
806 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
807 if (!MO->isReg())
808 continue;
809 unsigned Reg = MO->getReg();
810 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
811 MRI.isReserved(Reg)) {
812 continue;
813 }
814 // Skip non-Defs, including undef uses and internal reads.
815 if (MO->isUse())
816 continue;
817 MIBundleOperands::PhysRegInfo RI =
818 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
819 if (RI.FullyDefined)
820 continue;
821 // FoldMI does not define this physreg. Remove the LI segment.
822 assert(MO->isDead() && "Cannot fold physreg def")(static_cast <bool> (MO->isDead() && "Cannot fold physreg def"
) ? void (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 822, __extension__ __PRETTY_FUNCTION__))
;
823 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
824 LIS.removePhysRegDefAt(Reg, Idx);
825 }
826
827 int FI;
828 if (TII.isStoreToStackSlot(*MI, FI) &&
829 HSpiller.rmFromMergeableSpills(*MI, FI))
830 --NumSpills;
831 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
832 MI->eraseFromParent();
833
834 // Insert any new instructions other than FoldMI into the LIS maps.
835 assert(!MIS.empty() && "Unexpected empty span of instructions!")(static_cast <bool> (!MIS.empty() && "Unexpected empty span of instructions!"
) ? void (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 835, __extension__ __PRETTY_FUNCTION__))
;
836 for (MachineInstr &MI : MIS)
837 if (&MI != FoldMI)
838 LIS.InsertMachineInstrInMaps(MI);
839
840 // TII.foldMemoryOperand may have left some implicit operands on the
841 // instruction. Strip them.
842 if (ImpReg)
843 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
844 MachineOperand &MO = FoldMI->getOperand(i - 1);
845 if (!MO.isReg() || !MO.isImplicit())
846 break;
847 if (MO.getReg() == ImpReg)
848 FoldMI->RemoveOperand(i - 1);
849 }
850
851 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
852 "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
;
853
854 if (!WasCopy)
855 ++NumFolded;
856 else if (Ops.front().second == 0) {
857 ++NumSpills;
858 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
859 } else
860 ++NumReloads;
861 return true;
862}
863
864void InlineSpiller::insertReload(unsigned NewVReg,
865 SlotIndex Idx,
866 MachineBasicBlock::iterator MI) {
867 MachineBasicBlock &MBB = *MI->getParent();
868
869 MachineInstrSpan MIS(MI);
870 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
871 MRI.getRegClass(NewVReg), &TRI);
872
873 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
874
875 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
876 NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
;
877 ++NumReloads;
878}
879
880/// Check if \p Def fully defines a VReg with an undefined value.
881/// If that's the case, that means the value of VReg is actually
882/// not relevant.
883static bool isFullUndefDef(const MachineInstr &Def) {
884 if (!Def.isImplicitDef())
885 return false;
886 assert(Def.getNumOperands() == 1 &&(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 887, __extension__ __PRETTY_FUNCTION__))
887 "Implicit def with more than one definition")(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 887, __extension__ __PRETTY_FUNCTION__))
;
888 // We can say that the VReg defined by Def is undef, only if it is
889 // fully defined by Def. Otherwise, some of the lanes may not be
890 // undef and the value of the VReg matters.
891 return !Def.getOperand(0).getSubReg();
892}
893
894/// insertSpill - Insert a spill of NewVReg after MI.
895void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
896 MachineBasicBlock::iterator MI) {
897 MachineBasicBlock &MBB = *MI->getParent();
898
899 MachineInstrSpan MIS(MI);
900 bool IsRealSpill = true;
901 if (isFullUndefDef(*MI)) {
902 // Don't spill undef value.
903 // Anything works for undef, in particular keeping the memory
904 // uninitialized is a viable option and it saves code size and
905 // run time.
906 BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
907 .addReg(NewVReg, getKillRegState(isKill));
908 IsRealSpill = false;
909 } else
910 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
911 MRI.getRegClass(NewVReg), &TRI);
912
913 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
914
915 DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
916 "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
;
917 ++NumSpills;
918 if (IsRealSpill)
919 HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
920}
921
922/// spillAroundUses - insert spill code around each use of Reg.
923void InlineSpiller::spillAroundUses(unsigned Reg) {
924 DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "spillAroundUses " << printReg
(Reg) << '\n'; } } while (false)
;
925 LiveInterval &OldLI = LIS.getInterval(Reg);
926
927 // Iterate over instructions using Reg.
928 for (MachineRegisterInfo::reg_bundle_iterator
929 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
930 RegI != E; ) {
931 MachineInstr *MI = &*(RegI++);
932
933 // Debug values are not allowed to affect codegen.
934 if (MI->isDebugValue()) {
935 // Modify DBG_VALUE now that the value is in a spill slot.
936 MachineBasicBlock *MBB = MI->getParent();
937 DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Modifying debug info due to spill:\t"
<< *MI; } } while (false)
;
938 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
939 MBB->erase(MI);
940 continue;
941 }
942
943 // Ignore copies to/from snippets. We'll delete them.
944 if (SnippetCopies.count(MI))
945 continue;
946
947 // Stack slot accesses may coalesce away.
948 if (coalesceStackAccess(MI, Reg))
949 continue;
950
951 // Analyze instruction.
952 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
953 MIBundleOperands::VirtRegInfo RI =
954 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
955
956 // Find the slot index where this instruction reads and writes OldLI.
957 // This is usually the def slot, except for tied early clobbers.
958 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
959 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
960 if (SlotIndex::isSameInstr(Idx, VNI->def))
961 Idx = VNI->def;
962
963 // Check for a sibling copy.
964 unsigned SibReg = isFullCopyOf(*MI, Reg);
965 if (SibReg && isSibling(SibReg)) {
966 // This may actually be a copy between snippets.
967 if (isRegToSpill(SibReg)) {
968 DEBUG(dbgs() << "Found new snippet copy: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Found new snippet copy: " <<
*MI; } } while (false)
;
969 SnippetCopies.insert(MI);
970 continue;
971 }
972 if (RI.Writes) {
973 if (hoistSpillInsideBB(OldLI, *MI)) {
974 // This COPY is now dead, the value is already in the stack slot.
975 MI->getOperand(0).setIsDead();
976 DeadDefs.push_back(MI);
977 continue;
978 }
979 } else {
980 // This is a reload for a sib-reg copy. Drop spills downstream.
981 LiveInterval &SibLI = LIS.getInterval(SibReg);
982 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
983 // The COPY will fold to a reload below.
984 }
985 }
986
987 // Attempt to fold memory ops.
988 if (foldMemoryOperand(Ops))
989 continue;
990
991 // Create a new virtual register for spill/fill.
992 // FIXME: Infer regclass from instruction alone.
993 unsigned NewVReg = Edit->createFrom(Reg);
994
995 if (RI.Reads)
996 insertReload(NewVReg, Idx, MI);
997
998 // Rewrite instruction operands.
999 bool hasLiveDef = false;
1000 for (const auto &OpPair : Ops) {
1001 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1002 MO.setReg(NewVReg);
1003 if (MO.isUse()) {
1004 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1005 MO.setIsKill();
1006 } else {
1007 if (!MO.isDead())
1008 hasLiveDef = true;
1009 }
1010 }
1011 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\trewrite: " << Idx <<
'\t' << *MI << '\n'; } } while (false)
;
1012
1013 // FIXME: Use a second vreg if instruction has no tied ops.
1014 if (RI.Writes)
1015 if (hasLiveDef)
1016 insertSpill(NewVReg, true, MI);
1017 }
1018}
1019
1020/// spillAll - Spill all registers remaining after rematerialization.
1021void InlineSpiller::spillAll() {
1022 // Update LiveStacks now that we are committed to spilling.
1023 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1024 StackSlot = VRM.assignVirt2StackSlot(Original);
1025 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1026 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1027 } else
1028 StackInt = &LSS.getInterval(StackSlot);
1029
1030 if (Original != Edit->getReg())
1031 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1032
1033 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")(static_cast <bool> (StackInt->getNumValNums() == 1 &&
"Bad stack interval values") ? void (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1033, __extension__ __PRETTY_FUNCTION__))
;
1034 for (unsigned Reg : RegsToSpill)
1035 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1036 StackInt->getValNumInfo(0));
1037 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged spilled regs: " <<
*StackInt << '\n'; } } while (false)
;
1038
1039 // Spill around uses of all RegsToSpill.
1040 for (unsigned Reg : RegsToSpill)
1041 spillAroundUses(Reg);
1042
1043 // Hoisted spills may cause dead code.
1044 if (!DeadDefs.empty()) {
1045 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n"; } } while (false)
;
1046 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1047 }
1048
1049 // Finally delete the SnippetCopies.
1050 for (unsigned Reg : RegsToSpill) {
1051 for (MachineRegisterInfo::reg_instr_iterator
1052 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1053 RI != E; ) {
1054 MachineInstr &MI = *(RI++);
1055 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")(static_cast <bool> (SnippetCopies.count(&MI) &&
"Remaining use wasn't a snippet copy") ? void (0) : __assert_fail
("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1055, __extension__ __PRETTY_FUNCTION__))
;
1056 // FIXME: Do this with a LiveRangeEdit callback.
1057 LIS.RemoveMachineInstrFromMaps(MI);
1058 MI.eraseFromParent();
1059 }
1060 }
1061
1062 // Delete all spilled registers.
1063 for (unsigned Reg : RegsToSpill)
1064 Edit->eraseVirtReg(Reg);
1065}
1066
1067void InlineSpiller::spill(LiveRangeEdit &edit) {
1068 ++NumSpilledRanges;
1069 Edit = &edit;
1070 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())(static_cast <bool> (!TargetRegisterInfo::isStackSlot(edit
.getReg()) && "Trying to spill a stack slot.") ? void
(0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
1071 && "Trying to spill a stack slot.")(static_cast <bool> (!TargetRegisterInfo::isStackSlot(edit
.getReg()) && "Trying to spill a stack slot.") ? void
(0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
;
1072 // Share a stack slot among all descendants of Original.
1073 Original = VRM.getOriginal(edit.getReg());
1074 StackSlot = VRM.getStackSlot(Original);
1075 StackInt = nullptr;
1076
1077 DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1078 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1079 << ':' << edit.getParent()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1080 << "\nFrom original " << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
;
1081 assert(edit.getParent().isSpillable() &&(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1082, __extension__ __PRETTY_FUNCTION__))
1082 "Attempting to spill already spilled value.")(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1082, __extension__ __PRETTY_FUNCTION__))
;
1083 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")(static_cast <bool> (DeadDefs.empty() && "Previous spill didn't remove dead defs"
) ? void (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1083, __extension__ __PRETTY_FUNCTION__))
;
1084
1085 collectRegsToSpill();
1086 reMaterializeAll();
1087
1088 // Remat may handle everything.
1089 if (!RegsToSpill.empty())
1090 spillAll();
1091
1092 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1093}
1094
1095/// Optimizations after all the reg selections and spills are done.
1096void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1
Calling 'HoistSpillHelper::hoistAllSpills'
1097
1098/// When a spill is inserted, add the spill to MergeableSpills map.
1099void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1100 unsigned Original) {
1101 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1102 LiveInterval &OrigLI = LIS.getInterval(Original);
1103 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1104 // LiveInterval may be cleared after all its references are spilled.
1105 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1106 auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1107 LI->assign(OrigLI, Allocator);
1108 StackSlotToOrigLI[StackSlot] = std::move(LI);
1109 }
1110 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1111 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1112 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1113 MergeableSpills[MIdx].insert(&Spill);
1114}
1115
1116/// When a spill is removed, remove the spill from MergeableSpills map.
1117/// Return true if the spill is removed successfully.
1118bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1119 int StackSlot) {
1120 auto It = StackSlotToOrigLI.find(StackSlot);
1121 if (It == StackSlotToOrigLI.end())
1122 return false;
1123 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1124 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1125 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1126 return MergeableSpills[MIdx].erase(&Spill);
1127}
1128
1129/// Check BB to see if it is a possible target BB to place a hoisted spill,
1130/// i.e., there should be a living sibling of OrigReg at the insert point.
1131bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1132 MachineBasicBlock &BB, unsigned &LiveReg) {
1133 SlotIndex Idx;
1134 unsigned OrigReg = OrigLI.reg;
1135 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1136 if (MI != BB.end())
1137 Idx = LIS.getInstructionIndex(*MI);
1138 else
1139 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1140 SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1141 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")(static_cast <bool> (OrigLI.getVNInfoAt(Idx) == &OrigVNI
&& "Unexpected VNI") ? void (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1141, __extension__ __PRETTY_FUNCTION__))
;
1142
1143 for (auto const SibReg : Siblings) {
1144 LiveInterval &LI = LIS.getInterval(SibReg);
1145 VNInfo *VNI = LI.getVNInfoAt(Idx);
1146 if (VNI) {
1147 LiveReg = SibReg;
1148 return true;
1149 }
1150 }
1151 return false;
1152}
1153
1154/// Remove redundant spills in the same BB. Save those redundant spills in
1155/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1156void HoistSpillHelper::rmRedundantSpills(
1157 SmallPtrSet<MachineInstr *, 16> &Spills,
1158 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1159 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1160 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1161 // another spill inside. If a BB contains more than one spill, only keep the
1162 // earlier spill with smaller SlotIndex.
1163 for (const auto CurrentSpill : Spills) {
1164 MachineBasicBlock *Block = CurrentSpill->getParent();
1165 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1166 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1167 if (PrevSpill) {
1168 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1169 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1170 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1171 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1172 SpillsToRm.push_back(SpillToRm);
1173 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1174 } else {
1175 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1176 }
1177 }
1178 for (const auto SpillToRm : SpillsToRm)
1179 Spills.erase(SpillToRm);
1180}
1181
1182/// Starting from \p Root find a top-down traversal order of the dominator
1183/// tree to visit all basic blocks containing the elements of \p Spills.
1184/// Redundant spills will be found and put into \p SpillsToRm at the same
1185/// time. \p SpillBBToSpill will be populated as part of the process and
1186/// maps a basic block to the first store occurring in the basic block.
1187/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1188void HoistSpillHelper::getVisitOrders(
1189 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1190 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1191 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1192 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1193 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1194 // The set contains all the possible BB nodes to which we may hoist
1195 // original spills.
1196 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1197 // Save the BB nodes on the path from the first BB node containing
1198 // non-redundant spill to the Root node.
1199 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1200 // All the spills to be hoisted must originate from a single def instruction
1201 // to the OrigReg. It means the def instruction should dominate all the spills
1202 // to be hoisted. We choose the BB where the def instruction is located as
1203 // the Root.
1204 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1205 // For every node on the dominator tree with spill, walk up on the dominator
1206 // tree towards the Root node until it is reached. If there is other node
1207 // containing spill in the middle of the path, the previous spill saw will
1208 // be redundant and the node containing it will be removed. All the nodes on
1209 // the path starting from the first node with non-redundant spill to the Root
1210 // node will be added to the WorkSet, which will contain all the possible
1211 // locations where spills may be hoisted to after the loop below is done.
1212 for (const auto Spill : Spills) {
1213 MachineBasicBlock *Block = Spill->getParent();
1214 MachineDomTreeNode *Node = MDT[Block];
7
Calling 'MachineDominatorTree::operator[]'
24
Returning from 'MachineDominatorTree::operator[]'
25
'Node' initialized here
1215 MachineInstr *SpillToRm = nullptr;
1216 while (Node != RootIDomNode) {
26
Assuming 'Node' is not equal to 'RootIDomNode'
27
Loop condition is true. Entering loop body
1217 // If Node dominates Block, and it already contains a spill, the spill in
1218 // Block will be redundant.
1219 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
28
Assuming the condition is false
29
Assuming pointer value is null
1220 SpillToRm = SpillBBToSpill[MDT[Block]];
1221 break;
1222 /// If we see the Node already in WorkSet, the path from the Node to
1223 /// the Root node must already be traversed by another spill.
1224 /// Then no need to repeat.
1225 } else if (WorkSet.count(Node)) {
30
Assuming the condition is false
31
Taking false branch
1226 break;
1227 } else {
1228 NodesOnPath.insert(Node);
1229 }
1230 Node = Node->getIDom();
32
Called C++ object pointer is null
1231 }
1232 if (SpillToRm) {
1233 SpillsToRm.push_back(SpillToRm);
1234 } else {
1235 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1236 // set the initial status before hoisting start. The value of BBs
1237 // containing original spills is set to 0, in order to descriminate
1238 // with BBs containing hoisted spills which will be inserted to
1239 // SpillsToKeep later during hoisting.
1240 SpillsToKeep[MDT[Block]] = 0;
1241 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1242 }
1243 NodesOnPath.clear();
1244 }
1245
1246 // Sort the nodes in WorkSet in top-down order and save the nodes
1247 // in Orders. Orders will be used for hoisting in runHoistSpills.
1248 unsigned idx = 0;
1249 Orders.push_back(MDT.getBase().getNode(Root));
1250 do {
1251 MachineDomTreeNode *Node = Orders[idx++];
1252 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1253 unsigned NumChildren = Children.size();
1254 for (unsigned i = 0; i != NumChildren; ++i) {
1255 MachineDomTreeNode *Child = Children[i];
1256 if (WorkSet.count(Child))
1257 Orders.push_back(Child);
1258 }
1259 } while (idx != Orders.size());
1260 assert(Orders.size() == WorkSet.size() &&(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1261, __extension__ __PRETTY_FUNCTION__))
1261 "Orders have different size with WorkSet")(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262
1263#ifndef NDEBUG
1264 DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Orders size is " << Orders
.size() << "\n"; } } while (false)
;
1265 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1266 for (; RIt != Orders.rend(); RIt++)
1267 DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock
()->getNumber() << ","; } } while (false)
;
1268 DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\n"; } } while (false)
;
1269#endif
1270}
1271
1272/// Try to hoist spills according to BB hotness. The spills to removed will
1273/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1274/// \p SpillsToIns.
1275void HoistSpillHelper::runHoistSpills(
1276 LiveInterval &OrigLI, VNInfo &OrigVNI,
1277 SmallPtrSet<MachineInstr *, 16> &Spills,
1278 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1279 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1280 // Visit order of dominator tree nodes.
1281 SmallVector<MachineDomTreeNode *, 32> Orders;
1282 // SpillsToKeep contains all the nodes where spills are to be inserted
1283 // during hoisting. If the spill to be inserted is an original spill
1284 // (not a hoisted one), the value of the map entry is 0. If the spill
1285 // is a hoisted spill, the value of the map entry is the VReg to be used
1286 // as the source of the spill.
1287 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1288 // Map from BB to the first spill inside of it.
1289 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1290
1291 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1292
1293 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1294 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
6
Calling 'HoistSpillHelper::getVisitOrders'
1295 SpillBBToSpill);
1296
1297 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1298 // nodes set and the cost of all the spills inside those nodes.
1299 // The nodes set are the locations where spills are to be inserted
1300 // in the subtree of current node.
1301 using NodesCostPair =
1302 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1303 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1304
1305 // Iterate Orders set in reverse order, which will be a bottom-up order
1306 // in the dominator tree. Once we visit a dom tree node, we know its
1307 // children have already been visited and the spill locations in the
1308 // subtrees of all the children have been determined.
1309 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1310 for (; RIt != Orders.rend(); RIt++) {
1311 MachineBasicBlock *Block = (*RIt)->getBlock();
1312
1313 // If Block contains an original spill, simply continue.
1314 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1315 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1316 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1317 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1318 continue;
1319 }
1320
1321 // Collect spills in subtree of current node (*RIt) to
1322 // SpillsInSubTreeMap[*RIt].first.
1323 const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1324 unsigned NumChildren = Children.size();
1325 for (unsigned i = 0; i != NumChildren; ++i) {
1326 MachineDomTreeNode *Child = Children[i];
1327 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1328 continue;
1329 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1330 // should be placed before getting the begin and end iterators of
1331 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1332 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1333 // and the map grows and then the original buckets in the map are moved.
1334 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1335 SpillsInSubTreeMap[*RIt].first;
1336 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1337 SubTreeCost += SpillsInSubTreeMap[Child].second;
1338 auto BI = SpillsInSubTreeMap[Child].first.begin();
1339 auto EI = SpillsInSubTreeMap[Child].first.end();
1340 SpillsInSubTree.insert(BI, EI);
1341 SpillsInSubTreeMap.erase(Child);
1342 }
1343
1344 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1345 SpillsInSubTreeMap[*RIt].first;
1346 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1347 // No spills in subtree, simply continue.
1348 if (SpillsInSubTree.empty())
1349 continue;
1350
1351 // Check whether Block is a possible candidate to insert spill.
1352 unsigned LiveReg = 0;
1353 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1354 continue;
1355
1356 // If there are multiple spills that could be merged, bias a little
1357 // to hoist the spill.
1358 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1359 ? BranchProbability(9, 10)
1360 : BranchProbability(1, 1);
1361 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1362 // Hoist: Move spills to current Block.
1363 for (const auto SpillBB : SpillsInSubTree) {
1364 // When SpillBB is a BB contains original spill, insert the spill
1365 // to SpillsToRm.
1366 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1367 !SpillsToKeep[SpillBB]) {
1368 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1369 SpillsToRm.push_back(SpillToRm);
1370 }
1371 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1372 SpillsToKeep.erase(SpillBB);
1373 }
1374 // Current Block is the BB containing the new hoisted spill. Add it to
1375 // SpillsToKeep. LiveReg is the source of the new spill.
1376 SpillsToKeep[*RIt] = LiveReg;
1377 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1378 dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1379 for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1380 dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1381 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1382 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1383 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
;
1384 SpillsInSubTree.clear();
1385 SpillsInSubTree.insert(*RIt);
1386 SubTreeCost = MBFI.getBlockFreq(Block);
1387 }
1388 }
1389 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1390 // save them to SpillsToIns.
1391 for (const auto Ent : SpillsToKeep) {
1392 if (Ent.second)
1393 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1394 }
1395}
1396
1397/// For spills with equal values, remove redundant spills and hoist those left
1398/// to less hot spots.
1399///
1400/// Spills with equal values will be collected into the same set in
1401/// MergeableSpills when spill is inserted. These equal spills are originated
1402/// from the same defining instruction and are dominated by the instruction.
1403/// Before hoisting all the equal spills, redundant spills inside in the same
1404/// BB are first marked to be deleted. Then starting from the spills left, walk
1405/// up on the dominator tree towards the Root node where the define instruction
1406/// is located, mark the dominated spills to be deleted along the way and
1407/// collect the BB nodes on the path from non-dominated spills to the define
1408/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1409/// where we are considering to hoist the spills. We iterate the WorkSet in
1410/// bottom-up order, and for each node, we will decide whether to hoist spills
1411/// inside its subtree to that node. In this way, we can get benefit locally
1412/// even if hoisting all the equal spills to one cold place is impossible.
1413void HoistSpillHelper::hoistAllSpills() {
1414 SmallVector<unsigned, 4> NewVRegs;
1415 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1416
1417 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
2
Assuming 'i' is equal to 'e'
3
Loop condition is false. Execution continues on line 1425
1418 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1419 unsigned Original = VRM.getPreSplitReg(Reg);
1420 if (!MRI.def_empty(Reg))
1421 Virt2SiblingsMap[Original].insert(Reg);
1422 }
1423
1424 // Each entry in MergeableSpills contains a spill set with equal values.
1425 for (auto &Ent : MergeableSpills) {
1426 int Slot = Ent.first.first;
1427 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1428 VNInfo *OrigVNI = Ent.first.second;
1429 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1430 if (Ent.second.empty())
4
Taking false branch
1431 continue;
1432
1433 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1434 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1435 << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1436 for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1437 dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1438 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1439 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
;
1440
1441 // SpillsToRm is the spill set to be removed from EqValSpills.
1442 SmallVector<MachineInstr *, 16> SpillsToRm;
1443 // SpillsToIns is the spill set to be newly inserted after hoisting.
1444 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1445
1446 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
5
Calling 'HoistSpillHelper::runHoistSpills'
1447
1448 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1449 dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1450 for (const auto Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1451 dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1452 dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1453 for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1454 dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1455 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1456 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
;
1457
1458 // Stack live range update.
1459 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1460 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1461 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1462 StackIntvl.getValNumInfo(0));
1463
1464 // Insert hoisted spills.
1465 for (auto const Insert : SpillsToIns) {
1466 MachineBasicBlock *BB = Insert.first;
1467 unsigned LiveReg = Insert.second;
1468 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1469 TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1470 MRI.getRegClass(LiveReg), &TRI);
1471 LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1472 ++NumSpills;
1473 }
1474
1475 // Remove redundant spills or change them to dead instructions.
1476 NumSpills -= SpillsToRm.size();
1477 for (auto const RMEnt : SpillsToRm) {
1478 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1479 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1480 MachineOperand &MO = RMEnt->getOperand(i - 1);
1481 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1482 RMEnt->RemoveOperand(i - 1);
1483 }
1484 }
1485 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1486 }
1487}
1488
1489/// For VirtReg clone, the \p New register should have the same physreg or
1490/// stackslot as the \p old register.
1491void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1492 if (VRM.hasPhys(Old))
1493 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1494 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1495 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1496 else
1497 llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/CodeGen/InlineSpiller.cpp"
, 1497)
;
1498}

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineDominators.h

1//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
11// but for target-specific code rather than target-independent IR.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
16#define LLVM_CODEGEN_MACHINEDOMINATORS_H
17
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/CodeGen/MachineBasicBlock.h"
21#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/Support/GenericDomTree.h"
24#include "llvm/Support/GenericDomTreeConstruction.h"
25#include <cassert>
26#include <memory>
27#include <vector>
28
29namespace llvm {
30
31template <>
32inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
33 MachineBasicBlock *MBB) {
34 this->Roots.push_back(MBB);
35}
36
37extern template class DomTreeNodeBase<MachineBasicBlock>;
38extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
39extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
40
41using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
42
43//===-------------------------------------
44/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
45/// compute a normal dominator tree.
46///
47class MachineDominatorTree : public MachineFunctionPass {
48 /// \brief Helper structure used to hold all the basic blocks
49 /// involved in the split of a critical edge.
50 struct CriticalEdge {
51 MachineBasicBlock *FromBB;
52 MachineBasicBlock *ToBB;
53 MachineBasicBlock *NewBB;
54 };
55
56 /// \brief Pile up all the critical edges to be split.
57 /// The splitting of a critical edge is local and thus, it is possible
58 /// to apply several of those changes at the same time.
59 mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
60
61 /// \brief Remember all the basic blocks that are inserted during
62 /// edge splitting.
63 /// Invariant: NewBBs == all the basic blocks contained in the NewBB
64 /// field of all the elements of CriticalEdgesToSplit.
65 /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
66 /// such as BB == elt.NewBB.
67 mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
68
69 /// The DominatorTreeBase that is used to compute a normal dominator tree
70 std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
71
72 /// \brief Apply all the recorded critical edges to the DT.
73 /// This updates the underlying DT information in a way that uses
74 /// the fast query path of DT as much as possible.
75 ///
76 /// \post CriticalEdgesToSplit.empty().
77 void applySplitCriticalEdges() const;
78
79public:
80 static char ID; // Pass ID, replacement for typeid
81
82 MachineDominatorTree();
83
84 DomTreeBase<MachineBasicBlock> &getBase() {
85 if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
86 applySplitCriticalEdges();
87 return *DT;
88 }
89
90 void getAnalysisUsage(AnalysisUsage &AU) const override;
91
92 /// getRoots - Return the root blocks of the current CFG. This may include
93 /// multiple blocks if we are computing post dominators. For forward
94 /// dominators, this will always be a single block (the entry node).
95 ///
96 inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
97 applySplitCriticalEdges();
98 return DT->getRoots();
99 }
100
101 inline MachineBasicBlock *getRoot() const {
102 applySplitCriticalEdges();
103 return DT->getRoot();
104 }
105
106 inline MachineDomTreeNode *getRootNode() const {
107 applySplitCriticalEdges();
108 return DT->getRootNode();
109 }
110
111 bool runOnMachineFunction(MachineFunction &F) override;
112
113 inline bool dominates(const MachineDomTreeNode* A,
114 const MachineDomTreeNode* B) const {
115 applySplitCriticalEdges();
116 return DT->dominates(A, B);
117 }
118
119 inline bool dominates(const MachineBasicBlock* A,
120 const MachineBasicBlock* B) const {
121 applySplitCriticalEdges();
122 return DT->dominates(A, B);
123 }
124
125 // dominates - Return true if A dominates B. This performs the
126 // special checks necessary if A and B are in the same basic block.
127 bool dominates(const MachineInstr *A, const MachineInstr *B) const {
128 applySplitCriticalEdges();
129 const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
130 if (BBA != BBB) return DT->dominates(BBA, BBB);
131
132 // Loop through the basic block until we find A or B.
133 MachineBasicBlock::const_iterator I = BBA->begin();
134 for (; &*I != A && &*I != B; ++I)
135 /*empty*/ ;
136
137 //if(!DT.IsPostDominators) {
138 // A dominates B if it is found first in the basic block.
139 return &*I == A;
140 //} else {
141 // // A post-dominates B if B is found first in the basic block.
142 // return &*I == B;
143 //}
144 }
145
146 inline bool properlyDominates(const MachineDomTreeNode* A,
147 const MachineDomTreeNode* B) const {
148 applySplitCriticalEdges();
149 return DT->properlyDominates(A, B);
150 }
151
152 inline bool properlyDominates(const MachineBasicBlock* A,
153 const MachineBasicBlock* B) const {
154 applySplitCriticalEdges();
155 return DT->properlyDominates(A, B);
156 }
157
158 /// findNearestCommonDominator - Find nearest common dominator basic block
159 /// for basic block A and B. If there is no such block then return NULL.
160 inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
161 MachineBasicBlock *B) {
162 applySplitCriticalEdges();
163 return DT->findNearestCommonDominator(A, B);
164 }
165
166 inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
167 applySplitCriticalEdges();
168 return DT->getNode(BB);
8
Calling 'DominatorTreeBase::getNode'
23
Returning from 'DominatorTreeBase::getNode'
169 }
170
171 /// getNode - return the (Post)DominatorTree node for the specified basic
172 /// block. This is the same as using operator[] on this class.
173 ///
174 inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
175 applySplitCriticalEdges();
176 return DT->getNode(BB);
177 }
178
179 /// addNewBlock - Add a new node to the dominator tree information. This
180 /// creates a new node as a child of DomBB dominator node,linking it into
181 /// the children list of the immediate dominator.
182 inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
183 MachineBasicBlock *DomBB) {
184 applySplitCriticalEdges();
185 return DT->addNewBlock(BB, DomBB);
186 }
187
188 /// changeImmediateDominator - This method is used to update the dominator
189 /// tree information when a node's immediate dominator changes.
190 ///
191 inline void changeImmediateDominator(MachineBasicBlock *N,
192 MachineBasicBlock* NewIDom) {
193 applySplitCriticalEdges();
194 DT->changeImmediateDominator(N, NewIDom);
195 }
196
197 inline void changeImmediateDominator(MachineDomTreeNode *N,
198 MachineDomTreeNode* NewIDom) {
199 applySplitCriticalEdges();
200 DT->changeImmediateDominator(N, NewIDom);
201 }
202
203 /// eraseNode - Removes a node from the dominator tree. Block must not
204 /// dominate any other blocks. Removes node from its immediate dominator's
205 /// children list. Deletes dominator node associated with basic block BB.
206 inline void eraseNode(MachineBasicBlock *BB) {
207 applySplitCriticalEdges();
208 DT->eraseNode(BB);
209 }
210
211 /// splitBlock - BB is split and now it has one successor. Update dominator
212 /// tree to reflect this change.
213 inline void splitBlock(MachineBasicBlock* NewBB) {
214 applySplitCriticalEdges();
215 DT->splitBlock(NewBB);
216 }
217
218 /// isReachableFromEntry - Return true if A is dominated by the entry
219 /// block of the function containing it.
220 bool isReachableFromEntry(const MachineBasicBlock *A) {
221 applySplitCriticalEdges();
222 return DT->isReachableFromEntry(A);
223 }
224
225 void releaseMemory() override;
226
227 void verifyAnalysis() const override;
228
229 void print(raw_ostream &OS, const Module*) const override;
230
231 /// \brief Record that the critical edge (FromBB, ToBB) has been
232 /// split with NewBB.
233 /// This is best to use this method instead of directly update the
234 /// underlying information, because this helps mitigating the
235 /// number of time the DT information is invalidated.
236 ///
237 /// \note Do not use this method with regular edges.
238 ///
239 /// \note To benefit from the compile time improvement incurred by this
240 /// method, the users of this method have to limit the queries to the DT
241 /// interface between two edges splitting. In other words, they have to
242 /// pack the splitting of critical edges as much as possible.
243 void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
244 MachineBasicBlock *ToBB,
245 MachineBasicBlock *NewBB) {
246 bool Inserted = NewBBs.insert(NewBB).second;
247 (void)Inserted;
248 assert(Inserted &&(static_cast <bool> (Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? void (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineDominators.h"
, 249, __extension__ __PRETTY_FUNCTION__))
249 "A basic block inserted via edge splitting cannot appear twice")(static_cast <bool> (Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? void (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/MachineDominators.h"
, 249, __extension__ __PRETTY_FUNCTION__))
;
250 CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
251 }
252};
253
254//===-------------------------------------
255/// DominatorTree GraphTraits specialization so the DominatorTree can be
256/// iterable by generic graph iterators.
257///
258
259template <class Node, class ChildIterator>
260struct MachineDomTreeGraphTraitsBase {
261 using NodeRef = Node *;
262 using ChildIteratorType = ChildIterator;
263
264 static NodeRef getEntryNode(NodeRef N) { return N; }
265 static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
266 static ChildIteratorType child_end(NodeRef N) { return N->end(); }
267};
268
269template <class T> struct GraphTraits;
270
271template <>
272struct GraphTraits<MachineDomTreeNode *>
273 : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
274 MachineDomTreeNode::iterator> {};
275
276template <>
277struct GraphTraits<const MachineDomTreeNode *>
278 : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
279 MachineDomTreeNode::const_iterator> {
280};
281
282template <> struct GraphTraits<MachineDominatorTree*>
283 : public GraphTraits<MachineDomTreeNode *> {
284 static NodeRef getEntryNode(MachineDominatorTree *DT) {
285 return DT->getRootNode();
286 }
287};
288
289} // end namespace llvm
290
291#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h

1//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file defines a set of templates that efficiently compute a dominator
12/// tree over a generic graph. This is used typically in LLVM for fast
13/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
14/// graph types.
15///
16/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
17/// on the graph's NodeRef. The NodeRef should be a pointer and,
18/// NodeRef->getParent() must return the parent node that is also a pointer.
19///
20/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
21///
22//===----------------------------------------------------------------------===//
23
24#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
25#define LLVM_SUPPORT_GENERICDOMTREE_H
26
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <iterator>
31#include <memory>
32#include <type_traits>
33#include <utility>
34#include <vector>
35#include "llvm/ADT/DenseMap.h"
36#include "llvm/ADT/GraphTraits.h"
37#include "llvm/ADT/PointerIntPair.h"
38#include "llvm/ADT/STLExtras.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Support/raw_ostream.h"
42
43namespace llvm {
44
45template <typename NodeT, bool IsPostDom>
46class DominatorTreeBase;
47
48namespace DomTreeBuilder {
49template <typename DomTreeT>
50struct SemiNCAInfo;
51} // namespace DomTreeBuilder
52
53/// \brief Base class for the actual dominator tree node.
54template <class NodeT> class DomTreeNodeBase {
55 friend class PostDominatorTree;
56 friend class DominatorTreeBase<NodeT, false>;
57 friend class DominatorTreeBase<NodeT, true>;
58 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
59 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
60
61 NodeT *TheBB;
62 DomTreeNodeBase *IDom;
63 unsigned Level;
64 std::vector<DomTreeNodeBase *> Children;
65 mutable unsigned DFSNumIn = ~0;
66 mutable unsigned DFSNumOut = ~0;
67
68 public:
69 DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
70 : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
71
72 using iterator = typename std::vector<DomTreeNodeBase *>::iterator;
73 using const_iterator =
74 typename std::vector<DomTreeNodeBase *>::const_iterator;
75
76 iterator begin() { return Children.begin(); }
77 iterator end() { return Children.end(); }
78 const_iterator begin() const { return Children.begin(); }
79 const_iterator end() const { return Children.end(); }
80
81 NodeT *getBlock() const { return TheBB; }
82 DomTreeNodeBase *getIDom() const { return IDom; }
83 unsigned getLevel() const { return Level; }
84
85 const std::vector<DomTreeNodeBase *> &getChildren() const { return Children; }
86
87 std::unique_ptr<DomTreeNodeBase> addChild(
88 std::unique_ptr<DomTreeNodeBase> C) {
89 Children.push_back(C.get());
90 return C;
91 }
92
93 size_t getNumChildren() const { return Children.size(); }
94
95 void clearAllChildren() { Children.clear(); }
96
97 bool compare(const DomTreeNodeBase *Other) const {
98 if (getNumChildren() != Other->getNumChildren())
99 return true;
100
101 if (Level != Other->Level) return true;
102
103 SmallPtrSet<const NodeT *, 4> OtherChildren;
104 for (const DomTreeNodeBase *I : *Other) {
105 const NodeT *Nd = I->getBlock();
106 OtherChildren.insert(Nd);
107 }
108
109 for (const DomTreeNodeBase *I : *this) {
110 const NodeT *N = I->getBlock();
111 if (OtherChildren.count(N) == 0)
112 return true;
113 }
114 return false;
115 }
116
117 void setIDom(DomTreeNodeBase *NewIDom) {
118 assert(IDom && "No immediate dominator?")(static_cast <bool> (IDom && "No immediate dominator?"
) ? void (0) : __assert_fail ("IDom && \"No immediate dominator?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119 if (IDom == NewIDom) return;
120
121 auto I = find(IDom->Children, this);
122 assert(I != IDom->Children.end() &&(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 123, __extension__ __PRETTY_FUNCTION__))
123 "Not in immediate dominator children set!")(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 123, __extension__ __PRETTY_FUNCTION__))
;
124 // I am no longer your child...
125 IDom->Children.erase(I);
126
127 // Switch to new dominator
128 IDom = NewIDom;
129 IDom->Children.push_back(this);
130
131 UpdateLevel();
132 }
133
134 /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
135 /// in the dominator tree. They are only guaranteed valid if
136 /// updateDFSNumbers() has been called.
137 unsigned getDFSNumIn() const { return DFSNumIn; }
138 unsigned getDFSNumOut() const { return DFSNumOut; }
139
140private:
141 // Return true if this node is dominated by other. Use this only if DFS info
142 // is valid.
143 bool DominatedBy(const DomTreeNodeBase *other) const {
144 return this->DFSNumIn >= other->DFSNumIn &&
145 this->DFSNumOut <= other->DFSNumOut;
146 }
147
148 void UpdateLevel() {
149 assert(IDom)(static_cast <bool> (IDom) ? void (0) : __assert_fail (
"IDom", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 149, __extension__ __PRETTY_FUNCTION__))
;
150 if (Level == IDom->Level + 1) return;
151
152 SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
153
154 while (!WorkStack.empty()) {
155 DomTreeNodeBase *Current = WorkStack.pop_back_val();
156 Current->Level = Current->IDom->Level + 1;
157
158 for (DomTreeNodeBase *C : *Current) {
159 assert(C->IDom)(static_cast <bool> (C->IDom) ? void (0) : __assert_fail
("C->IDom", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 159, __extension__ __PRETTY_FUNCTION__))
;
160 if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
161 }
162 }
163 }
164};
165
166template <class NodeT>
167raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
168 if (Node->getBlock())
169 Node->getBlock()->printAsOperand(O, false);
170 else
171 O << " <<exit node>>";
172
173 O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
174 << Node->getLevel() << "]\n";
175
176 return O;
177}
178
179template <class NodeT>
180void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
181 unsigned Lev) {
182 O.indent(2 * Lev) << "[" << Lev << "] " << N;
183 for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
184 E = N->end();
185 I != E; ++I)
186 PrintDomTree<NodeT>(*I, O, Lev + 1);
187}
188
189namespace DomTreeBuilder {
190// The routines below are provided in a separate header but referenced here.
191template <typename DomTreeT>
192void Calculate(DomTreeT &DT);
193
194template <typename DomTreeT>
195void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
196 typename DomTreeT::NodePtr To);
197
198template <typename DomTreeT>
199void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
200 typename DomTreeT::NodePtr To);
201
202// UpdateKind and Update are used by the batch update API and it's easiest to
203// define them here.
204enum class UpdateKind : unsigned char { Insert, Delete };
205
206template <typename NodePtr>
207struct Update {
208 using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
209
210 NodePtr From;
211 NodeKindPair ToAndKind;
212
213 Update(UpdateKind Kind, NodePtr From, NodePtr To)
214 : From(From), ToAndKind(To, Kind) {}
215
216 UpdateKind getKind() const { return ToAndKind.getInt(); }
217 NodePtr getFrom() const { return From; }
218 NodePtr getTo() const { return ToAndKind.getPointer(); }
219 bool operator==(const Update &RHS) const {
220 return From == RHS.From && ToAndKind == RHS.ToAndKind;
221 }
222
223 friend raw_ostream &operator<<(raw_ostream &OS, const Update &U) {
224 OS << (U.getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
225 U.getFrom()->printAsOperand(OS, false);
226 OS << " -> ";
227 U.getTo()->printAsOperand(OS, false);
228 return OS;
229 }
230};
231
232template <typename DomTreeT>
233void ApplyUpdates(DomTreeT &DT,
234 ArrayRef<typename DomTreeT::UpdateType> Updates);
235
236template <typename DomTreeT>
237bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
238} // namespace DomTreeBuilder
239
240/// \brief Core dominator tree base class.
241///
242/// This class is a generic template over graph nodes. It is instantiated for
243/// various graphs in the LLVM IR or in the code generator.
244template <typename NodeT, bool IsPostDom>
245class DominatorTreeBase {
246 public:
247 static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
248 "Currently DominatorTreeBase supports only pointer nodes");
249 using NodeType = NodeT;
250 using NodePtr = NodeT *;
251 using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
252 static_assert(std::is_pointer<ParentPtr>::value,
253 "Currently NodeT's parent must be a pointer type");
254 using ParentType = typename std::remove_pointer<ParentPtr>::type;
255 static constexpr bool IsPostDominator = IsPostDom;
256
257 using UpdateType = DomTreeBuilder::Update<NodePtr>;
258 using UpdateKind = DomTreeBuilder::UpdateKind;
259 static constexpr UpdateKind Insert = UpdateKind::Insert;
260 static constexpr UpdateKind Delete = UpdateKind::Delete;
261
262 enum class VerificationLevel { Fast, Basic, Full };
263
264protected:
265 // Dominators always have a single root, postdominators can have more.
266 SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
267
268 using DomTreeNodeMapType =
269 DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
270 DomTreeNodeMapType DomTreeNodes;
271 DomTreeNodeBase<NodeT> *RootNode;
272 ParentPtr Parent = nullptr;
273
274 mutable bool DFSInfoValid = false;
275 mutable unsigned int SlowQueries = 0;
276
277 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
278
279 public:
280 DominatorTreeBase() {}
281
282 DominatorTreeBase(DominatorTreeBase &&Arg)
283 : Roots(std::move(Arg.Roots)),
284 DomTreeNodes(std::move(Arg.DomTreeNodes)),
285 RootNode(Arg.RootNode),
286 Parent(Arg.Parent),
287 DFSInfoValid(Arg.DFSInfoValid),
288 SlowQueries(Arg.SlowQueries) {
289 Arg.wipe();
290 }
291
292 DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
293 Roots = std::move(RHS.Roots);
294 DomTreeNodes = std::move(RHS.DomTreeNodes);
295 RootNode = RHS.RootNode;
296 Parent = RHS.Parent;
297 DFSInfoValid = RHS.DFSInfoValid;
298 SlowQueries = RHS.SlowQueries;
299 RHS.wipe();
300 return *this;
301 }
302
303 DominatorTreeBase(const DominatorTreeBase &) = delete;
304 DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
305
306 /// getRoots - Return the root blocks of the current CFG. This may include
307 /// multiple blocks if we are computing post dominators. For forward
308 /// dominators, this will always be a single block (the entry node).
309 ///
310 const SmallVectorImpl<NodeT *> &getRoots() const { return Roots; }
311
312 /// isPostDominator - Returns true if analysis based of postdoms
313 ///
314 bool isPostDominator() const { return IsPostDominator; }
315
316 /// compare - Return false if the other dominator tree base matches this
317 /// dominator tree base. Otherwise return true.
318 bool compare(const DominatorTreeBase &Other) const {
319 if (Parent != Other.Parent) return true;
320
321 if (Roots.size() != Other.Roots.size())
322 return true;
323
324 if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
325 return true;
326
327 const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
328 if (DomTreeNodes.size() != OtherDomTreeNodes.size())
329 return true;
330
331 for (const auto &DomTreeNode : DomTreeNodes) {
332 NodeT *BB = DomTreeNode.first;
333 typename DomTreeNodeMapType::const_iterator OI =
334 OtherDomTreeNodes.find(BB);
335 if (OI == OtherDomTreeNodes.end())
336 return true;
337
338 DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
339 DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
340
341 if (MyNd.compare(&OtherNd))
342 return true;
343 }
344
345 return false;
346 }
347
348 void releaseMemory() { reset(); }
349
350 /// getNode - return the (Post)DominatorTree node for the specified basic
351 /// block. This is the same as using operator[] on this class. The result
352 /// may (but is not required to) be null for a forward (backwards)
353 /// statically unreachable block.
354 DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const {
355 auto I = DomTreeNodes.find(BB);
356 if (I != DomTreeNodes.end())
9
Assuming the condition is true
10
Taking true branch
357 return I->second.get();
11
Calling 'unique_ptr::get'
22
Returning from 'unique_ptr::get'
358 return nullptr;
359 }
360
361 /// See getNode.
362 DomTreeNodeBase<NodeT> *operator[](NodeT *BB) const { return getNode(BB); }
363
364 /// getRootNode - This returns the entry node for the CFG of the function. If
365 /// this tree represents the post-dominance relations for a function, however,
366 /// this root may be a node with the block == NULL. This is the case when
367 /// there are multiple exit nodes from a particular function. Consumers of
368 /// post-dominance information must be capable of dealing with this
369 /// possibility.
370 ///
371 DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
372 const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
373
374 /// Get all nodes dominated by R, including R itself.
375 void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
376 Result.clear();
377 const DomTreeNodeBase<NodeT> *RN = getNode(R);
378 if (!RN)
379 return; // If R is unreachable, it will not be present in the DOM tree.
380 SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
381 WL.push_back(RN);
382
383 while (!WL.empty()) {
384 const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
385 Result.push_back(N->getBlock());
386 WL.append(N->begin(), N->end());
387 }
388 }
389
390 /// properlyDominates - Returns true iff A dominates B and A != B.
391 /// Note that this is not a constant time operation!
392 ///
393 bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
394 const DomTreeNodeBase<NodeT> *B) const {
395 if (!A || !B)
396 return false;
397 if (A == B)
398 return false;
399 return dominates(A, B);
400 }
401
402 bool properlyDominates(const NodeT *A, const NodeT *B) const;
403
404 /// isReachableFromEntry - Return true if A is dominated by the entry
405 /// block of the function containing it.
406 bool isReachableFromEntry(const NodeT *A) const {
407 assert(!this->isPostDominator() &&(static_cast <bool> (!this->isPostDominator() &&
"This is not implemented for post dominators") ? void (0) : __assert_fail
("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 408, __extension__ __PRETTY_FUNCTION__))
408 "This is not implemented for post dominators")(static_cast <bool> (!this->isPostDominator() &&
"This is not implemented for post dominators") ? void (0) : __assert_fail
("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 408, __extension__ __PRETTY_FUNCTION__))
;
409 return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
410 }
411
412 bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
413
414 /// dominates - Returns true iff A dominates B. Note that this is not a
415 /// constant time operation!
416 ///
417 bool dominates(const DomTreeNodeBase<NodeT> *A,
418 const DomTreeNodeBase<NodeT> *B) const {
419 // A node trivially dominates itself.
420 if (B == A)
421 return true;
422
423 // An unreachable node is dominated by anything.
424 if (!isReachableFromEntry(B))
425 return true;
426
427 // And dominates nothing.
428 if (!isReachableFromEntry(A))
429 return false;
430
431 if (B->getIDom() == A) return true;
432
433 if (A->getIDom() == B) return false;
434
435 // A can only dominate B if it is higher in the tree.
436 if (A->getLevel() >= B->getLevel()) return false;
437
438 // Compare the result of the tree walk and the dfs numbers, if expensive
439 // checks are enabled.
440#ifdef EXPENSIVE_CHECKS
441 assert((!DFSInfoValid ||(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 443, __extension__ __PRETTY_FUNCTION__))
442 (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 443, __extension__ __PRETTY_FUNCTION__))
443 "Tree walk disagrees with dfs numbers!")(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 443, __extension__ __PRETTY_FUNCTION__))
;
444#endif
445
446 if (DFSInfoValid)
447 return B->DominatedBy(A);
448
449 // If we end up with too many slow queries, just update the
450 // DFS numbers on the theory that we are going to keep querying.
451 SlowQueries++;
452 if (SlowQueries > 32) {
453 updateDFSNumbers();
454 return B->DominatedBy(A);
455 }
456
457 return dominatedBySlowTreeWalk(A, B);
458 }
459
460 bool dominates(const NodeT *A, const NodeT *B) const;
461
462 NodeT *getRoot() const {
463 assert(this->Roots.size() == 1 && "Should always have entry node!")(static_cast <bool> (this->Roots.size() == 1 &&
"Should always have entry node!") ? void (0) : __assert_fail
("this->Roots.size() == 1 && \"Should always have entry node!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 463, __extension__ __PRETTY_FUNCTION__))
;
464 return this->Roots[0];
465 }
466
467 /// findNearestCommonDominator - Find nearest common dominator basic block
468 /// for basic block A and B. If there is no such block then return nullptr.
469 NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
470 assert(A && B && "Pointers are not valid")(static_cast <bool> (A && B && "Pointers are not valid"
) ? void (0) : __assert_fail ("A && B && \"Pointers are not valid\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 470, __extension__ __PRETTY_FUNCTION__))
;
471 assert(A->getParent() == B->getParent() &&(static_cast <bool> (A->getParent() == B->getParent
() && "Two blocks are not in same function") ? void (
0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 472, __extension__ __PRETTY_FUNCTION__))
472 "Two blocks are not in same function")(static_cast <bool> (A->getParent() == B->getParent
() && "Two blocks are not in same function") ? void (
0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 472, __extension__ __PRETTY_FUNCTION__))
;
473
474 // If either A or B is a entry block then it is nearest common dominator
475 // (for forward-dominators).
476 if (!isPostDominator()) {
477 NodeT &Entry = A->getParent()->front();
478 if (A == &Entry || B == &Entry)
479 return &Entry;
480 }
481
482 DomTreeNodeBase<NodeT> *NodeA = getNode(A);
483 DomTreeNodeBase<NodeT> *NodeB = getNode(B);
484
485 if (!NodeA || !NodeB) return nullptr;
486
487 // Use level information to go up the tree until the levels match. Then
488 // continue going up til we arrive at the same node.
489 while (NodeA && NodeA != NodeB) {
490 if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
491
492 NodeA = NodeA->IDom;
493 }
494
495 return NodeA ? NodeA->getBlock() : nullptr;
496 }
497
498 const NodeT *findNearestCommonDominator(const NodeT *A,
499 const NodeT *B) const {
500 // Cast away the const qualifiers here. This is ok since
501 // const is re-introduced on the return type.
502 return findNearestCommonDominator(const_cast<NodeT *>(A),
503 const_cast<NodeT *>(B));
504 }
505
506 bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
507 return isPostDominator() && !A->getBlock();
508 }
509
510 //===--------------------------------------------------------------------===//
511 // API to update (Post)DominatorTree information based on modifications to
512 // the CFG...
513
514 /// Inform the dominator tree about a sequence of CFG edge insertions and
515 /// deletions and perform a batch update on the tree.
516 ///
517 /// This function should be used when there were multiple CFG updates after
518 /// the last dominator tree update. It takes care of performing the updates
519 /// in sync with the CFG and optimizes away the redundant operations that
520 /// cancel each other.
521 /// The functions expects the sequence of updates to be balanced. Eg.:
522 /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
523 /// logically it results in a single insertions.
524 /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
525 /// sense to insert the same edge twice.
526 ///
527 /// What's more, the functions assumes that it's safe to ask every node in the
528 /// CFG about its children and inverse children. This implies that deletions
529 /// of CFG edges must not delete the CFG nodes before calling this function.
530 ///
531 /// Batch updates should be generally faster when performing longer sequences
532 /// of updates than calling insertEdge/deleteEdge manually multiple times, as
533 /// it can reorder the updates and remove redundant ones internally.
534 /// The batch updater is also able to detect sequences of zero and exactly one
535 /// update -- it's optimized to do less work in these cases.
536 ///
537 /// Note that for postdominators it automatically takes care of applying
538 /// updates on reverse edges internally (so there's no need to swap the
539 /// From and To pointers when constructing DominatorTree::UpdateType).
540 /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
541 /// with the same template parameter T.
542 ///
543 /// \param Updates An unordered sequence of updates to perform.
544 ///
545 void applyUpdates(ArrayRef<UpdateType> Updates) {
546 DomTreeBuilder::ApplyUpdates(*this, Updates);
547 }
548
549 /// Inform the dominator tree about a CFG edge insertion and update the tree.
550 ///
551 /// This function has to be called just before or just after making the update
552 /// on the actual CFG. There cannot be any other updates that the dominator
553 /// tree doesn't know about.
554 ///
555 /// Note that for postdominators it automatically takes care of inserting
556 /// a reverse edge internally (so there's no need to swap the parameters).
557 ///
558 void insertEdge(NodeT *From, NodeT *To) {
559 assert(From)(static_cast <bool> (From) ? void (0) : __assert_fail (
"From", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 559, __extension__ __PRETTY_FUNCTION__))
;
560 assert(To)(static_cast <bool> (To) ? void (0) : __assert_fail ("To"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 560, __extension__ __PRETTY_FUNCTION__))
;
561 assert(From->getParent() == Parent)(static_cast <bool> (From->getParent() == Parent) ? void
(0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 561, __extension__ __PRETTY_FUNCTION__))
;
562 assert(To->getParent() == Parent)(static_cast <bool> (To->getParent() == Parent) ? void
(0) : __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 DomTreeBuilder::InsertEdge(*this, From, To);
564 }
565
566 /// Inform the dominator tree about a CFG edge deletion and update the tree.
567 ///
568 /// This function has to be called just after making the update on the actual
569 /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
570 /// DEBUG mode. There cannot be any other updates that the
571 /// dominator tree doesn't know about.
572 ///
573 /// Note that for postdominators it automatically takes care of deleting
574 /// a reverse edge internally (so there's no need to swap the parameters).
575 ///
576 void deleteEdge(NodeT *From, NodeT *To) {
577 assert(From)(static_cast <bool> (From) ? void (0) : __assert_fail (
"From", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 577, __extension__ __PRETTY_FUNCTION__))
;
578 assert(To)(static_cast <bool> (To) ? void (0) : __assert_fail ("To"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 578, __extension__ __PRETTY_FUNCTION__))
;
579 assert(From->getParent() == Parent)(static_cast <bool> (From->getParent() == Parent) ? void
(0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 assert(To->getParent() == Parent)(static_cast <bool> (To->getParent() == Parent) ? void
(0) : __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 580, __extension__ __PRETTY_FUNCTION__))
;
581 DomTreeBuilder::DeleteEdge(*this, From, To);
582 }
583
584 /// Add a new node to the dominator tree information.
585 ///
586 /// This creates a new node as a child of DomBB dominator node, linking it
587 /// into the children list of the immediate dominator.
588 ///
589 /// \param BB New node in CFG.
590 /// \param DomBB CFG node that is dominator for BB.
591 /// \returns New dominator tree node that represents new CFG node.
592 ///
593 DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
594 assert(getNode(BB) == nullptr && "Block already in dominator tree!")(static_cast <bool> (getNode(BB) == nullptr && "Block already in dominator tree!"
) ? void (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 594, __extension__ __PRETTY_FUNCTION__))
;
595 DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
596 assert(IDomNode && "Not immediate dominator specified for block!")(static_cast <bool> (IDomNode && "Not immediate dominator specified for block!"
) ? void (0) : __assert_fail ("IDomNode && \"Not immediate dominator specified for block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 DFSInfoValid = false;
598 return (DomTreeNodes[BB] = IDomNode->addChild(
599 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
600 }
601
602 /// Add a new node to the forward dominator tree and make it a new root.
603 ///
604 /// \param BB New node in CFG.
605 /// \returns New dominator tree node that represents new CFG node.
606 ///
607 DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
608 assert(getNode(BB) == nullptr && "Block already in dominator tree!")(static_cast <bool> (getNode(BB) == nullptr && "Block already in dominator tree!"
) ? void (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 608, __extension__ __PRETTY_FUNCTION__))
;
609 assert(!this->isPostDominator() &&(static_cast <bool> (!this->isPostDominator() &&
"Cannot change root of post-dominator tree") ? void (0) : __assert_fail
("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 610, __extension__ __PRETTY_FUNCTION__))
610 "Cannot change root of post-dominator tree")(static_cast <bool> (!this->isPostDominator() &&
"Cannot change root of post-dominator tree") ? void (0) : __assert_fail
("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 610, __extension__ __PRETTY_FUNCTION__))
;
611 DFSInfoValid = false;
612 DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
613 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
614 if (Roots.empty()) {
615 addRoot(BB);
616 } else {
617 assert(Roots.size() == 1)(static_cast <bool> (Roots.size() == 1) ? void (0) : __assert_fail
("Roots.size() == 1", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 617, __extension__ __PRETTY_FUNCTION__))
;
618 NodeT *OldRoot = Roots.front();
619 auto &OldNode = DomTreeNodes[OldRoot];
620 OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
621 OldNode->IDom = NewNode;
622 OldNode->UpdateLevel();
623 Roots[0] = BB;
624 }
625 return RootNode = NewNode;
626 }
627
628 /// changeImmediateDominator - This method is used to update the dominator
629 /// tree information when a node's immediate dominator changes.
630 ///
631 void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
632 DomTreeNodeBase<NodeT> *NewIDom) {
633 assert(N && NewIDom && "Cannot change null node pointers!")(static_cast <bool> (N && NewIDom && "Cannot change null node pointers!"
) ? void (0) : __assert_fail ("N && NewIDom && \"Cannot change null node pointers!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 633, __extension__ __PRETTY_FUNCTION__))
;
634 DFSInfoValid = false;
635 N->setIDom(NewIDom);
636 }
637
638 void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
639 changeImmediateDominator(getNode(BB), getNode(NewBB));
640 }
641
642 /// eraseNode - Removes a node from the dominator tree. Block must not
643 /// dominate any other blocks. Removes node from its immediate dominator's
644 /// children list. Deletes dominator node associated with basic block BB.
645 void eraseNode(NodeT *BB) {
646 DomTreeNodeBase<NodeT> *Node = getNode(BB);
647 assert(Node && "Removing node that isn't in dominator tree.")(static_cast <bool> (Node && "Removing node that isn't in dominator tree."
) ? void (0) : __assert_fail ("Node && \"Removing node that isn't in dominator tree.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 647, __extension__ __PRETTY_FUNCTION__))
;
648 assert(Node->getChildren().empty() && "Node is not a leaf node.")(static_cast <bool> (Node->getChildren().empty() &&
"Node is not a leaf node.") ? void (0) : __assert_fail ("Node->getChildren().empty() && \"Node is not a leaf node.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 648, __extension__ __PRETTY_FUNCTION__))
;
649
650 DFSInfoValid = false;
651
652 // Remove node from immediate dominator's children list.
653 DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
654 if (IDom) {
655 const auto I = find(IDom->Children, Node);
656 assert(I != IDom->Children.end() &&(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 657, __extension__ __PRETTY_FUNCTION__))
657 "Not in immediate dominator children set!")(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 657, __extension__ __PRETTY_FUNCTION__))
;
658 // I am no longer your child...
659 IDom->Children.erase(I);
660 }
661
662 DomTreeNodes.erase(BB);
663
664 if (!IsPostDom) return;
665
666 // Remember to update PostDominatorTree roots.
667 auto RIt = llvm::find(Roots, BB);
668 if (RIt != Roots.end()) {
669 std::swap(*RIt, Roots.back());
670 Roots.pop_back();
671 }
672 }
673
674 /// splitBlock - BB is split and now it has one successor. Update dominator
675 /// tree to reflect this change.
676 void splitBlock(NodeT *NewBB) {
677 if (IsPostDominator)
678 Split<Inverse<NodeT *>>(NewBB);
679 else
680 Split<NodeT *>(NewBB);
681 }
682
683 /// print - Convert to human readable form
684 ///
685 void print(raw_ostream &O) const {
686 O << "=============================--------------------------------\n";
687 if (IsPostDominator)
688 O << "Inorder PostDominator Tree: ";
689 else
690 O << "Inorder Dominator Tree: ";
691 if (!DFSInfoValid)
692 O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
693 O << "\n";
694
695 // The postdom tree can have a null root if there are no returns.
696 if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
697 if (IsPostDominator) {
698 O << "Roots: ";
699 for (const NodePtr Block : Roots) {
700 Block->printAsOperand(O, false);
701 O << " ";
702 }
703 O << "\n";
704 }
705 }
706
707public:
708 /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
709 /// dominator tree in dfs order.
710 void updateDFSNumbers() const {
711 if (DFSInfoValid) {
712 SlowQueries = 0;
713 return;
714 }
715
716 SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
717 typename DomTreeNodeBase<NodeT>::const_iterator>,
718 32> WorkStack;
719
720 const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
721 assert((!Parent || ThisRoot) && "Empty constructed DomTree")(static_cast <bool> ((!Parent || ThisRoot) && "Empty constructed DomTree"
) ? void (0) : __assert_fail ("(!Parent || ThisRoot) && \"Empty constructed DomTree\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 721, __extension__ __PRETTY_FUNCTION__))
;
722 if (!ThisRoot)
723 return;
724
725 // Both dominators and postdominators have a single root node. In the case
726 // case of PostDominatorTree, this node is a virtual root.
727 WorkStack.push_back({ThisRoot, ThisRoot->begin()});
728
729 unsigned DFSNum = 0;
730 ThisRoot->DFSNumIn = DFSNum++;
731
732 while (!WorkStack.empty()) {
733 const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
734 const auto ChildIt = WorkStack.back().second;
735
736 // If we visited all of the children of this node, "recurse" back up the
737 // stack setting the DFOutNum.
738 if (ChildIt == Node->end()) {
739 Node->DFSNumOut = DFSNum++;
740 WorkStack.pop_back();
741 } else {
742 // Otherwise, recursively visit this child.
743 const DomTreeNodeBase<NodeT> *Child = *ChildIt;
744 ++WorkStack.back().second;
745
746 WorkStack.push_back({Child, Child->begin()});
747 Child->DFSNumIn = DFSNum++;
748 }
749 }
750
751 SlowQueries = 0;
752 DFSInfoValid = true;
753 }
754
755 /// recalculate - compute a dominator tree for the given function
756 void recalculate(ParentType &Func) {
757 Parent = &Func;
758 DomTreeBuilder::Calculate(*this);
759 }
760
761 /// verify - checks if the tree is correct. There are 3 level of verification:
762 /// - Full -- verifies if the tree is correct by making sure all the
763 /// properties (including the parent and the sibling property)
764 /// hold.
765 /// Takes O(N^3) time.
766 ///
767 /// - Basic -- checks if the tree is correct, but compares it to a freshly
768 /// constructed tree instead of checking the sibling property.
769 /// Takes O(N^2) time.
770 ///
771 /// - Fast -- checks basic tree structure and compares it with a freshly
772 /// constructed tree.
773 /// Takes O(N^2) time worst case, but is faster in practise (same
774 /// as tree construction).
775 bool verify(VerificationLevel VL = VerificationLevel::Full) const {
776 return DomTreeBuilder::Verify(*this, VL);
777 }
778
779protected:
780 void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
781
782 void reset() {
783 DomTreeNodes.clear();
784 Roots.clear();
785 RootNode = nullptr;
786 Parent = nullptr;
787 DFSInfoValid = false;
788 SlowQueries = 0;
789 }
790
791 // NewBB is split and now it has one successor. Update dominator tree to
792 // reflect this change.
793 template <class N>
794 void Split(typename GraphTraits<N>::NodeRef NewBB) {
795 using GraphT = GraphTraits<N>;
796 using NodeRef = typename GraphT::NodeRef;
797 assert(std::distance(GraphT::child_begin(NewBB),(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 799, __extension__ __PRETTY_FUNCTION__))
798 GraphT::child_end(NewBB)) == 1 &&(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 799, __extension__ __PRETTY_FUNCTION__))
799 "NewBB should have a single successor!")(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 799, __extension__ __PRETTY_FUNCTION__))
;
800 NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
801
802 std::vector<NodeRef> PredBlocks;
803 for (const auto &Pred : children<Inverse<N>>(NewBB))
804 PredBlocks.push_back(Pred);
805
806 assert(!PredBlocks.empty() && "No predblocks?")(static_cast <bool> (!PredBlocks.empty() && "No predblocks?"
) ? void (0) : __assert_fail ("!PredBlocks.empty() && \"No predblocks?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 806, __extension__ __PRETTY_FUNCTION__))
;
807
808 bool NewBBDominatesNewBBSucc = true;
809 for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
810 if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
811 isReachableFromEntry(Pred)) {
812 NewBBDominatesNewBBSucc = false;
813 break;
814 }
815 }
816
817 // Find NewBB's immediate dominator and create new dominator tree node for
818 // NewBB.
819 NodeT *NewBBIDom = nullptr;
820 unsigned i = 0;
821 for (i = 0; i < PredBlocks.size(); ++i)
822 if (isReachableFromEntry(PredBlocks[i])) {
823 NewBBIDom = PredBlocks[i];
824 break;
825 }
826
827 // It's possible that none of the predecessors of NewBB are reachable;
828 // in that case, NewBB itself is unreachable, so nothing needs to be
829 // changed.
830 if (!NewBBIDom) return;
831
832 for (i = i + 1; i < PredBlocks.size(); ++i) {
833 if (isReachableFromEntry(PredBlocks[i]))
834 NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
835 }
836
837 // Create the new dominator tree node... and set the idom of NewBB.
838 DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
839
840 // If NewBB strictly dominates other blocks, then it is now the immediate
841 // dominator of NewBBSucc. Update the dominator tree as appropriate.
842 if (NewBBDominatesNewBBSucc) {
843 DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
844 changeImmediateDominator(NewBBSuccNode, NewBBNode);
845 }
846 }
847
848 private:
849 bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
850 const DomTreeNodeBase<NodeT> *B) const {
851 assert(A != B)(static_cast <bool> (A != B) ? void (0) : __assert_fail
("A != B", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 851, __extension__ __PRETTY_FUNCTION__))
;
852 assert(isReachableFromEntry(B))(static_cast <bool> (isReachableFromEntry(B)) ? void (0
) : __assert_fail ("isReachableFromEntry(B)", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 852, __extension__ __PRETTY_FUNCTION__))
;
853 assert(isReachableFromEntry(A))(static_cast <bool> (isReachableFromEntry(A)) ? void (0
) : __assert_fail ("isReachableFromEntry(A)", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Support/GenericDomTree.h"
, 853, __extension__ __PRETTY_FUNCTION__))
;
854
855 const DomTreeNodeBase<NodeT> *IDom;
856 while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B)
857 B = IDom; // Walk up the tree
858 return IDom != nullptr;
859 }
860
861 /// \brief Wipe this tree's state without releasing any resources.
862 ///
863 /// This is essentially a post-move helper only. It leaves the object in an
864 /// assignable and destroyable state, but otherwise invalid.
865 void wipe() {
866 DomTreeNodes.clear();
867 RootNode = nullptr;
868 Parent = nullptr;
869 }
870};
871
872template <typename T>
873using DomTreeBase = DominatorTreeBase<T, false>;
874
875template <typename T>
876using PostDomTreeBase = DominatorTreeBase<T, true>;
877
878// These two functions are declared out of line as a workaround for building
879// with old (< r147295) versions of clang because of pr11642.
880template <typename NodeT, bool IsPostDom>
881bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
882 const NodeT *B) const {
883 if (A == B)
884 return true;
885
886 // Cast away the const qualifiers here. This is ok since
887 // this function doesn't actually return the values returned
888 // from getNode.
889 return dominates(getNode(const_cast<NodeT *>(A)),
890 getNode(const_cast<NodeT *>(B)));
891}
892template <typename NodeT, bool IsPostDom>
893bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
894 const NodeT *A, const NodeT *B) const {
895 if (A == B)
896 return false;
897
898 // Cast away the const qualifiers here. This is ok since
899 // this function doesn't actually return the values returned
900 // from getNode.
901 return dominates(getNode(const_cast<NodeT *>(A)),
902 getNode(const_cast<NodeT *>(B)));
903}
904
905} // end namespace llvm
906
907#endif // LLVM_SUPPORT_GENERICDOMTREE_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 /**
46 * @addtogroup pointer_abstractions
47 * @{
48 */
49
50#if _GLIBCXX_USE_DEPRECATED1
51 template<typename> class auto_ptr;
52#endif
53
54 /// Primary template of default_delete, used by unique_ptr
55 template<typename _Tp>
56 struct default_delete
57 {
58 /// Default constructor
59 constexpr default_delete() noexcept = default;
60
61 /** @brief Converting constructor.
62 *
63 * Allows conversion from a deleter for arrays of another type, @p _Up,
64 * only if @p _Up* is convertible to @p _Tp*.
65 */
66 template<typename _Up, typename = typename
67 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
68 default_delete(const default_delete<_Up>&) noexcept { }
69
70 /// Calls @c delete @p __ptr
71 void
72 operator()(_Tp* __ptr) const
73 {
74 static_assert(!is_void<_Tp>::value,
75 "can't delete pointer to incomplete type");
76 static_assert(sizeof(_Tp)>0,
77 "can't delete pointer to incomplete type");
78 delete __ptr;
79 }
80 };
81
82 // _GLIBCXX_RESOLVE_LIB_DEFECTS
83 // DR 740 - omit specialization for array objects with a compile time length
84 /// Specialization for arrays, default_delete.
85 template<typename _Tp>
86 struct default_delete<_Tp[]>
87 {
88 public:
89 /// Default constructor
90 constexpr default_delete() noexcept = default;
91
92 /** @brief Converting constructor.
93 *
94 * Allows conversion from a deleter for arrays of another type, such as
95 * a const-qualified version of @p _Tp.
96 *
97 * Conversions from types derived from @c _Tp are not allowed because
98 * it is unsafe to @c delete[] an array of derived types through a
99 * pointer to the base type.
100 */
101 template<typename _Up, typename = typename
102 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
103 default_delete(const default_delete<_Up[]>&) noexcept { }
104
105 /// Calls @c delete[] @p __ptr
106 template<typename _Up>
107 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
108 operator()(_Up* __ptr) const
109 {
110 static_assert(sizeof(_Tp)>0,
111 "can't delete pointer to incomplete type");
112 delete [] __ptr;
113 }
114 };
115
116 template <typename _Tp, typename _Dp>
117 class __uniq_ptr_impl
118 {
119 template <typename _Up, typename _Ep, typename = void>
120 struct _Ptr
121 {
122 using type = _Up*;
123 };
124
125 template <typename _Up, typename _Ep>
126 struct
127 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
128 {
129 using type = typename remove_reference<_Ep>::type::pointer;
130 };
131
132 public:
133 using _DeleterConstraint = enable_if<
134 __and_<__not_<is_pointer<_Dp>>,
135 is_default_constructible<_Dp>>::value>;
136
137 using pointer = typename _Ptr<_Tp, _Dp>::type;
138
139 __uniq_ptr_impl() = default;
140 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
141
142 template<typename _Del>
143 __uniq_ptr_impl(pointer __p, _Del&& __d)
144 : _M_t(__p, std::forward<_Del>(__d)) { }
145
146 pointer& _M_ptr() { return std::get<0>(_M_t); }
147 pointer _M_ptr() const { return std::get<0>(_M_t); }
13
Calling 'get'
20
Returning from 'get'
148 _Dp& _M_deleter() { return std::get<1>(_M_t); }
149 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
150
151 private:
152 tuple<pointer, _Dp> _M_t;
153 };
154
155 /// 20.7.1.2 unique_ptr for single objects.
156 template <typename _Tp, typename _Dp = default_delete<_Tp>>
157 class unique_ptr
158 {
159 template <class _Up>
160 using _DeleterConstraint =
161 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
162
163 __uniq_ptr_impl<_Tp, _Dp> _M_t;
164
165 public:
166 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
167 using element_type = _Tp;
168 using deleter_type = _Dp;
169
170 // helper template for detecting a safe conversion from another
171 // unique_ptr
172 template<typename _Up, typename _Ep>
173 using __safe_conversion_up = __and_<
174 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
175 __not_<is_array<_Up>>,
176 __or_<__and_<is_reference<deleter_type>,
177 is_same<deleter_type, _Ep>>,
178 __and_<__not_<is_reference<deleter_type>>,
179 is_convertible<_Ep, deleter_type>>
180 >
181 >;
182
183 // Constructors.
184
185 /// Default constructor, creates a unique_ptr that owns nothing.
186 template <typename _Up = _Dp,
187 typename = _DeleterConstraint<_Up>>
188 constexpr unique_ptr() noexcept
189 : _M_t()
190 { }
191
192 /** Takes ownership of a pointer.
193 *
194 * @param __p A pointer to an object of @c element_type
195 *
196 * The deleter will be value-initialized.
197 */
198 template <typename _Up = _Dp,
199 typename = _DeleterConstraint<_Up>>
200 explicit
201 unique_ptr(pointer __p) noexcept
202 : _M_t(__p)
203 { }
204
205 /** Takes ownership of a pointer.
206 *
207 * @param __p A pointer to an object of @c element_type
208 * @param __d A reference to a deleter.
209 *
210 * The deleter will be initialized with @p __d
211 */
212 unique_ptr(pointer __p,
213 typename conditional<is_reference<deleter_type>::value,
214 deleter_type, const deleter_type&>::type __d) noexcept
215 : _M_t(__p, __d) { }
216
217 /** Takes ownership of a pointer.
218 *
219 * @param __p A pointer to an object of @c element_type
220 * @param __d An rvalue reference to a deleter.
221 *
222 * The deleter will be initialized with @p std::move(__d)
223 */
224 unique_ptr(pointer __p,
225 typename remove_reference<deleter_type>::type&& __d) noexcept
226 : _M_t(std::move(__p), std::move(__d))
227 { static_assert(!std::is_reference<deleter_type>::value,
228 "rvalue deleter bound to reference"); }
229
230 /// Creates a unique_ptr that owns nothing.
231 template <typename _Up = _Dp,
232 typename = _DeleterConstraint<_Up>>
233 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
234
235 // Move constructors.
236
237 /// Move constructor.
238 unique_ptr(unique_ptr&& __u) noexcept
239 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
240
241 /** @brief Converting constructor from another type
242 *
243 * Requires that the pointer owned by @p __u is convertible to the
244 * type of pointer owned by this object, @p __u does not own an array,
245 * and @p __u has a compatible deleter type.
246 */
247 template<typename _Up, typename _Ep, typename = _Require<
248 __safe_conversion_up<_Up, _Ep>,
249 typename conditional<is_reference<_Dp>::value,
250 is_same<_Ep, _Dp>,
251 is_convertible<_Ep, _Dp>>::type>>
252 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
253 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
254 { }
255
256#if _GLIBCXX_USE_DEPRECATED1
257 /// Converting constructor from @c auto_ptr
258 template<typename _Up, typename = _Require<
259 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
260 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
261#endif
262
263 /// Destructor, invokes the deleter if the stored pointer is not null.
264 ~unique_ptr() noexcept
265 {
266 auto& __ptr = _M_t._M_ptr();
267 if (__ptr != nullptr)
268 get_deleter()(__ptr);
269 __ptr = pointer();
270 }
271
272 // Assignment.
273
274 /** @brief Move assignment operator.
275 *
276 * @param __u The object to transfer ownership from.
277 *
278 * Invokes the deleter first if this object owns a pointer.
279 */
280 unique_ptr&
281 operator=(unique_ptr&& __u) noexcept
282 {
283 reset(__u.release());
284 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
285 return *this;
286 }
287
288 /** @brief Assignment from another type.
289 *
290 * @param __u The object to transfer ownership from, which owns a
291 * convertible pointer to a non-array object.
292 *
293 * Invokes the deleter first if this object owns a pointer.
294 */
295 template<typename _Up, typename _Ep>
296 typename enable_if< __and_<
297 __safe_conversion_up<_Up, _Ep>,
298 is_assignable<deleter_type&, _Ep&&>
299 >::value,
300 unique_ptr&>::type
301 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
302 {
303 reset(__u.release());
304 get_deleter() = std::forward<_Ep>(__u.get_deleter());
305 return *this;
306 }
307
308 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
309 unique_ptr&
310 operator=(nullptr_t) noexcept
311 {
312 reset();
313 return *this;
314 }
315
316 // Observers.
317
318 /// Dereference the stored pointer.
319 typename add_lvalue_reference<element_type>::type
320 operator*() const
321 {
322 __glibcxx_assert(get() != pointer());
323 return *get();
324 }
325
326 /// Return the stored pointer.
327 pointer
328 operator->() const noexcept
329 {
330 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
331 return get();
332 }
333
334 /// Return the stored pointer.
335 pointer
336 get() const noexcept
337 { return _M_t._M_ptr(); }
12
Calling '__uniq_ptr_impl::_M_ptr'
21
Returning from '__uniq_ptr_impl::_M_ptr'
338
339 /// Return a reference to the stored deleter.
340 deleter_type&
341 get_deleter() noexcept
342 { return _M_t._M_deleter(); }
343
344 /// Return a reference to the stored deleter.
345 const deleter_type&
346 get_deleter() const noexcept
347 { return _M_t._M_deleter(); }
348
349 /// Return @c true if the stored pointer is not null.
350 explicit operator bool() const noexcept
351 { return get() == pointer() ? false : true; }
352
353 // Modifiers.
354
355 /// Release ownership of any stored pointer.
356 pointer
357 release() noexcept
358 {
359 pointer __p = get();
360 _M_t._M_ptr() = pointer();
361 return __p;
362 }
363
364 /** @brief Replace the stored pointer.
365 *
366 * @param __p The new pointer to store.
367 *
368 * The deleter will be invoked if a pointer is already owned.
369 */
370 void
371 reset(pointer __p = pointer()) noexcept
372 {
373 using std::swap;
374 swap(_M_t._M_ptr(), __p);
375 if (__p != pointer())
376 get_deleter()(__p);
377 }
378
379 /// Exchange the pointer and deleter with another object.
380 void
381 swap(unique_ptr& __u) noexcept
382 {
383 using std::swap;
384 swap(_M_t, __u._M_t);
385 }
386
387 // Disable copy from lvalue.
388 unique_ptr(const unique_ptr&) = delete;
389 unique_ptr& operator=(const unique_ptr&) = delete;
390 };
391
392 /// 20.7.1.3 unique_ptr for array objects with a runtime length
393 // [unique.ptr.runtime]
394 // _GLIBCXX_RESOLVE_LIB_DEFECTS
395 // DR 740 - omit specialization for array objects with a compile time length
396 template<typename _Tp, typename _Dp>
397 class unique_ptr<_Tp[], _Dp>
398 {
399 template <typename _Up>
400 using _DeleterConstraint =
401 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
402
403 __uniq_ptr_impl<_Tp, _Dp> _M_t;
404
405 template<typename _Up>
406 using __remove_cv = typename remove_cv<_Up>::type;
407
408 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
409 template<typename _Up>
410 using __is_derived_Tp
411 = __and_< is_base_of<_Tp, _Up>,
412 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
413
414 public:
415 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
416 using element_type = _Tp;
417 using deleter_type = _Dp;
418
419 // helper template for detecting a safe conversion from another
420 // unique_ptr
421 template<typename _Up, typename _Ep,
422 typename _Up_up = unique_ptr<_Up, _Ep>,
423 typename _Up_element_type = typename _Up_up::element_type>
424 using __safe_conversion_up = __and_<
425 is_array<_Up>,
426 is_same<pointer, element_type*>,
427 is_same<typename _Up_up::pointer, _Up_element_type*>,
428 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
429 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
430 __and_<__not_<is_reference<deleter_type>>,
431 is_convertible<_Ep, deleter_type>>>
432 >;
433
434 // helper template for detecting a safe conversion from a raw pointer
435 template<typename _Up>
436 using __safe_conversion_raw = __and_<
437 __or_<__or_<is_same<_Up, pointer>,
438 is_same<_Up, nullptr_t>>,
439 __and_<is_pointer<_Up>,
440 is_same<pointer, element_type*>,
441 is_convertible<
442 typename remove_pointer<_Up>::type(*)[],
443 element_type(*)[]>
444 >
445 >
446 >;
447
448 // Constructors.
449
450 /// Default constructor, creates a unique_ptr that owns nothing.
451 template <typename _Up = _Dp,
452 typename = _DeleterConstraint<_Up>>
453 constexpr unique_ptr() noexcept
454 : _M_t()
455 { }
456
457 /** Takes ownership of a pointer.
458 *
459 * @param __p A pointer to an array of a type safely convertible
460 * to an array of @c element_type
461 *
462 * The deleter will be value-initialized.
463 */
464 template<typename _Up,
465 typename _Vp = _Dp,
466 typename = _DeleterConstraint<_Vp>,
467 typename = typename enable_if<
468 __safe_conversion_raw<_Up>::value, bool>::type>
469 explicit
470 unique_ptr(_Up __p) noexcept
471 : _M_t(__p)
472 { }
473
474 /** Takes ownership of a pointer.
475 *
476 * @param __p A pointer to an array of a type safely convertible
477 * to an array of @c element_type
478 * @param __d A reference to a deleter.
479 *
480 * The deleter will be initialized with @p __d
481 */
482 template<typename _Up,
483 typename = typename enable_if<
484 __safe_conversion_raw<_Up>::value, bool>::type>
485 unique_ptr(_Up __p,
486 typename conditional<is_reference<deleter_type>::value,
487 deleter_type, const deleter_type&>::type __d) noexcept
488 : _M_t(__p, __d) { }
489
490 /** Takes ownership of a pointer.
491 *
492 * @param __p A pointer to an array of a type safely convertible
493 * to an array of @c element_type
494 * @param __d A reference to a deleter.
495 *
496 * The deleter will be initialized with @p std::move(__d)
497 */
498 template<typename _Up,
499 typename = typename enable_if<
500 __safe_conversion_raw<_Up>::value, bool>::type>
501 unique_ptr(_Up __p, typename
502 remove_reference<deleter_type>::type&& __d) noexcept
503 : _M_t(std::move(__p), std::move(__d))
504 { static_assert(!is_reference<deleter_type>::value,
505 "rvalue deleter bound to reference"); }
506
507 /// Move constructor.
508 unique_ptr(unique_ptr&& __u) noexcept
509 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
510
511 /// Creates a unique_ptr that owns nothing.
512 template <typename _Up = _Dp,
513 typename = _DeleterConstraint<_Up>>
514 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
515
516 template<typename _Up, typename _Ep,
517 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
518 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
519 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
520 { }
521
522 /// Destructor, invokes the deleter if the stored pointer is not null.
523 ~unique_ptr()
524 {
525 auto& __ptr = _M_t._M_ptr();
526 if (__ptr != nullptr)
527 get_deleter()(__ptr);
528 __ptr = pointer();
529 }
530
531 // Assignment.
532
533 /** @brief Move assignment operator.
534 *
535 * @param __u The object to transfer ownership from.
536 *
537 * Invokes the deleter first if this object owns a pointer.
538 */
539 unique_ptr&
540 operator=(unique_ptr&& __u) noexcept
541 {
542 reset(__u.release());
543 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
544 return *this;
545 }
546
547 /** @brief Assignment from another type.
548 *
549 * @param __u The object to transfer ownership from, which owns a
550 * convertible pointer to an array object.
551 *
552 * Invokes the deleter first if this object owns a pointer.
553 */
554 template<typename _Up, typename _Ep>
555 typename
556 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
557 is_assignable<deleter_type&, _Ep&&>
558 >::value,
559 unique_ptr&>::type
560 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
561 {
562 reset(__u.release());
563 get_deleter() = std::forward<_Ep>(__u.get_deleter());
564 return *this;
565 }
566
567 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
568 unique_ptr&
569 operator=(nullptr_t) noexcept
570 {
571 reset();
572 return *this;
573 }
574
575 // Observers.
576
577 /// Access an element of owned array.
578 typename std::add_lvalue_reference<element_type>::type
579 operator[](size_t __i) const
580 {
581 __glibcxx_assert(get() != pointer());
582 return get()[__i];
583 }
584
585 /// Return the stored pointer.
586 pointer
587 get() const noexcept
588 { return _M_t._M_ptr(); }
589
590 /// Return a reference to the stored deleter.
591 deleter_type&
592 get_deleter() noexcept
593 { return _M_t._M_deleter(); }
594
595 /// Return a reference to the stored deleter.
596 const deleter_type&
597 get_deleter() const noexcept
598 { return _M_t._M_deleter(); }
599
600 /// Return @c true if the stored pointer is not null.
601 explicit operator bool() const noexcept
602 { return get() == pointer() ? false : true; }
603
604 // Modifiers.
605
606 /// Release ownership of any stored pointer.
607 pointer
608 release() noexcept
609 {
610 pointer __p = get();
611 _M_t._M_ptr() = pointer();
612 return __p;
613 }
614
615 /** @brief Replace the stored pointer.
616 *
617 * @param __p The new pointer to store.
618 *
619 * The deleter will be invoked if a pointer is already owned.
620 */
621 template <typename _Up,
622 typename = _Require<
623 __or_<is_same<_Up, pointer>,
624 __and_<is_same<pointer, element_type*>,
625 is_pointer<_Up>,
626 is_convertible<
627 typename remove_pointer<_Up>::type(*)[],
628 element_type(*)[]
629 >
630 >
631 >
632 >>
633 void
634 reset(_Up __p) noexcept
635 {
636 pointer __ptr = __p;
637 using std::swap;
638 swap(_M_t._M_ptr(), __ptr);
639 if (__ptr != nullptr)
640 get_deleter()(__ptr);
641 }
642
643 void reset(nullptr_t = nullptr) noexcept
644 {
645 reset(pointer());
646 }
647
648 /// Exchange the pointer and deleter with another object.
649 void
650 swap(unique_ptr& __u) noexcept
651 {
652 using std::swap;
653 swap(_M_t, __u._M_t);
654 }
655
656 // Disable copy from lvalue.
657 unique_ptr(const unique_ptr&) = delete;
658 unique_ptr& operator=(const unique_ptr&) = delete;
659 };
660
661 template<typename _Tp, typename _Dp>
662 inline
663#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
664 // Constrained free swap overload, see p0185r1
665 typename enable_if<__is_swappable<_Dp>::value>::type
666#else
667 void
668#endif
669 swap(unique_ptr<_Tp, _Dp>& __x,
670 unique_ptr<_Tp, _Dp>& __y) noexcept
671 { __x.swap(__y); }
672
673#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
674 template<typename _Tp, typename _Dp>
675 typename enable_if<!__is_swappable<_Dp>::value>::type
676 swap(unique_ptr<_Tp, _Dp>&,
677 unique_ptr<_Tp, _Dp>&) = delete;
678#endif
679
680 template<typename _Tp, typename _Dp,
681 typename _Up, typename _Ep>
682 inline bool
683 operator==(const unique_ptr<_Tp, _Dp>& __x,
684 const unique_ptr<_Up, _Ep>& __y)
685 { return __x.get() == __y.get(); }
686
687 template<typename _Tp, typename _Dp>
688 inline bool
689 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
690 { return !__x; }
691
692 template<typename _Tp, typename _Dp>
693 inline bool
694 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
695 { return !__x; }
696
697 template<typename _Tp, typename _Dp,
698 typename _Up, typename _Ep>
699 inline bool
700 operator!=(const unique_ptr<_Tp, _Dp>& __x,
701 const unique_ptr<_Up, _Ep>& __y)
702 { return __x.get() != __y.get(); }
703
704 template<typename _Tp, typename _Dp>
705 inline bool
706 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
707 { return (bool)__x; }
708
709 template<typename _Tp, typename _Dp>
710 inline bool
711 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
712 { return (bool)__x; }
713
714 template<typename _Tp, typename _Dp,
715 typename _Up, typename _Ep>
716 inline bool
717 operator<(const unique_ptr<_Tp, _Dp>& __x,
718 const unique_ptr<_Up, _Ep>& __y)
719 {
720 typedef typename
721 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
722 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
723 return std::less<_CT>()(__x.get(), __y.get());
724 }
725
726 template<typename _Tp, typename _Dp>
727 inline bool
728 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
729 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
730 nullptr); }
731
732 template<typename _Tp, typename _Dp>
733 inline bool
734 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
735 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
736 __x.get()); }
737
738 template<typename _Tp, typename _Dp,
739 typename _Up, typename _Ep>
740 inline bool
741 operator<=(const unique_ptr<_Tp, _Dp>& __x,
742 const unique_ptr<_Up, _Ep>& __y)
743 { return !(__y < __x); }
744
745 template<typename _Tp, typename _Dp>
746 inline bool
747 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
748 { return !(nullptr < __x); }
749
750 template<typename _Tp, typename _Dp>
751 inline bool
752 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
753 { return !(__x < nullptr); }
754
755 template<typename _Tp, typename _Dp,
756 typename _Up, typename _Ep>
757 inline bool
758 operator>(const unique_ptr<_Tp, _Dp>& __x,
759 const unique_ptr<_Up, _Ep>& __y)
760 { return (__y < __x); }
761
762 template<typename _Tp, typename _Dp>
763 inline bool
764 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
765 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
766 __x.get()); }
767
768 template<typename _Tp, typename _Dp>
769 inline bool
770 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
771 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
772 nullptr); }
773
774 template<typename _Tp, typename _Dp,
775 typename _Up, typename _Ep>
776 inline bool
777 operator>=(const unique_ptr<_Tp, _Dp>& __x,
778 const unique_ptr<_Up, _Ep>& __y)
779 { return !(__x < __y); }
780
781 template<typename _Tp, typename _Dp>
782 inline bool
783 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
784 { return !(__x < nullptr); }
785
786 template<typename _Tp, typename _Dp>
787 inline bool
788 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
789 { return !(nullptr < __x); }
790
791 /// std::hash specialization for unique_ptr.
792 template<typename _Tp, typename _Dp>
793 struct hash<unique_ptr<_Tp, _Dp>>
794 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
795 private __poison_hash<typename unique_ptr<_Tp, _Dp>::pointer>
796 {
797 size_t
798 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
799 {
800 typedef unique_ptr<_Tp, _Dp> _UP;
801 return std::hash<typename _UP::pointer>()(__u.get());
802 }
803 };
804
805#if __cplusplus201103L > 201103L
806
807#define __cpp_lib_make_unique 201304
808
809 template<typename _Tp>
810 struct _MakeUniq
811 { typedef unique_ptr<_Tp> __single_object; };
812
813 template<typename _Tp>
814 struct _MakeUniq<_Tp[]>
815 { typedef unique_ptr<_Tp[]> __array; };
816
817 template<typename _Tp, size_t _Bound>
818 struct _MakeUniq<_Tp[_Bound]>
819 { struct __invalid_type { }; };
820
821 /// std::make_unique for single objects
822 template<typename _Tp, typename... _Args>
823 inline typename _MakeUniq<_Tp>::__single_object
824 make_unique(_Args&&... __args)
825 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
826
827 /// std::make_unique for arrays of unknown bound
828 template<typename _Tp>
829 inline typename _MakeUniq<_Tp>::__array
830 make_unique(size_t __num)
831 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
832
833 /// Disable std::make_unique for arrays of known bound
834 template<typename _Tp, typename... _Args>
835 inline typename _MakeUniq<_Tp>::__invalid_type
836 make_unique(_Args&&...) = delete;
837#endif
838
839 // @} group pointer_abstractions
840
841_GLIBCXX_END_NAMESPACE_VERSION
842} // namespace
843
844#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41#include <bits/invoke.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @addtogroup utilities
49 * @{
50 */
51
52 template<typename... _Elements>
53 class tuple;
54
55 template<typename _Tp>
56 struct __is_empty_non_tuple : is_empty<_Tp> { };
57
58 // Using EBO for elements that are tuples causes ambiguous base errors.
59 template<typename _El0, typename... _El>
60 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
61
62 // Use the Empty Base-class Optimization for empty, non-final types.
63 template<typename _Tp>
64 using __empty_not_final
65 = typename conditional<__is_final(_Tp), false_type,
66 __is_empty_non_tuple<_Tp>>::type;
67
68 template<std::size_t _Idx, typename _Head,
69 bool = __empty_not_final<_Head>::value>
70 struct _Head_base;
71
72 template<std::size_t _Idx, typename _Head>
73 struct _Head_base<_Idx, _Head, true>
74 : public _Head
75 {
76 constexpr _Head_base()
77 : _Head() { }
78
79 constexpr _Head_base(const _Head& __h)
80 : _Head(__h) { }
81
82 constexpr _Head_base(const _Head_base&) = default;
83 constexpr _Head_base(_Head_base&&) = default;
84
85 template<typename _UHead>
86 constexpr _Head_base(_UHead&& __h)
87 : _Head(std::forward<_UHead>(__h)) { }
88
89 _Head_base(allocator_arg_t, __uses_alloc0)
90 : _Head() { }
91
92 template<typename _Alloc>
93 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
94 : _Head(allocator_arg, *__a._M_a) { }
95
96 template<typename _Alloc>
97 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
98 : _Head(*__a._M_a) { }
99
100 template<typename _UHead>
101 _Head_base(__uses_alloc0, _UHead&& __uhead)
102 : _Head(std::forward<_UHead>(__uhead)) { }
103
104 template<typename _Alloc, typename _UHead>
105 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
106 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
107
108 template<typename _Alloc, typename _UHead>
109 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
110 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
111
112 static constexpr _Head&
113 _M_head(_Head_base& __b) noexcept { return __b; }
114
115 static constexpr const _Head&
116 _M_head(const _Head_base& __b) noexcept { return __b; }
117 };
118
119 template<std::size_t _Idx, typename _Head>
120 struct _Head_base<_Idx, _Head, false>
121 {
122 constexpr _Head_base()
123 : _M_head_impl() { }
124
125 constexpr _Head_base(const _Head& __h)
126 : _M_head_impl(__h) { }
127
128 constexpr _Head_base(const _Head_base&) = default;
129 constexpr _Head_base(_Head_base&&) = default;
130
131 template<typename _UHead>
132 constexpr _Head_base(_UHead&& __h)
133 : _M_head_impl(std::forward<_UHead>(__h)) { }
134
135 _Head_base(allocator_arg_t, __uses_alloc0)
136 : _M_head_impl() { }
137
138 template<typename _Alloc>
139 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
140 : _M_head_impl(allocator_arg, *__a._M_a) { }
141
142 template<typename _Alloc>
143 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
144 : _M_head_impl(*__a._M_a) { }
145
146 template<typename _UHead>
147 _Head_base(__uses_alloc0, _UHead&& __uhead)
148 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
149
150 template<typename _Alloc, typename _UHead>
151 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
152 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
153 { }
154
155 template<typename _Alloc, typename _UHead>
156 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
157 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
158
159 static constexpr _Head&
160 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
161
162 static constexpr const _Head&
163 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
164
165 _Head _M_head_impl;
166 };
167
168 /**
169 * Contains the actual implementation of the @c tuple template, stored
170 * as a recursive inheritance hierarchy from the first element (most
171 * derived class) to the last (least derived class). The @c Idx
172 * parameter gives the 0-based index of the element stored at this
173 * point in the hierarchy; we use it to implement a constant-time
174 * get() operation.
175 */
176 template<std::size_t _Idx, typename... _Elements>
177 struct _Tuple_impl;
178
179 /**
180 * Recursive tuple implementation. Here we store the @c Head element
181 * and derive from a @c Tuple_impl containing the remaining elements
182 * (which contains the @c Tail).
183 */
184 template<std::size_t _Idx, typename _Head, typename... _Tail>
185 struct _Tuple_impl<_Idx, _Head, _Tail...>
186 : public _Tuple_impl<_Idx + 1, _Tail...>,
187 private _Head_base<_Idx, _Head>
188 {
189 template<std::size_t, typename...> friend class _Tuple_impl;
190
191 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
192 typedef _Head_base<_Idx, _Head> _Base;
193
194 static constexpr _Head&
195 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
196
197 static constexpr const _Head&
198 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
16
Calling '_Head_base::_M_head'
17
Returning from '_Head_base::_M_head'
199
200 static constexpr _Inherited&
201 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
202
203 static constexpr const _Inherited&
204 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
205
206 constexpr _Tuple_impl()
207 : _Inherited(), _Base() { }
208
209 explicit
210 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
211 : _Inherited(__tail...), _Base(__head) { }
212
213 template<typename _UHead, typename... _UTail, typename = typename
214 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
215 explicit
216 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
217 : _Inherited(std::forward<_UTail>(__tail)...),
218 _Base(std::forward<_UHead>(__head)) { }
219
220 constexpr _Tuple_impl(const _Tuple_impl&) = default;
221
222 constexpr
223 _Tuple_impl(_Tuple_impl&& __in)
224 noexcept(__and_<is_nothrow_move_constructible<_Head>,
225 is_nothrow_move_constructible<_Inherited>>::value)
226 : _Inherited(std::move(_M_tail(__in))),
227 _Base(std::forward<_Head>(_M_head(__in))) { }
228
229 template<typename... _UElements>
230 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
231 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
232 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
233
234 template<typename _UHead, typename... _UTails>
235 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
236 : _Inherited(std::move
237 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
238 _Base(std::forward<_UHead>
239 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
243 : _Inherited(__tag, __a),
244 _Base(__tag, __use_alloc<_Head>(__a)) { }
245
246 template<typename _Alloc>
247 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
248 const _Head& __head, const _Tail&... __tail)
249 : _Inherited(__tag, __a, __tail...),
250 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
251
252 template<typename _Alloc, typename _UHead, typename... _UTail,
253 typename = typename enable_if<sizeof...(_Tail)
254 == sizeof...(_UTail)>::type>
255 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
256 _UHead&& __head, _UTail&&... __tail)
257 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
258 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
259 std::forward<_UHead>(__head)) { }
260
261 template<typename _Alloc>
262 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
263 const _Tuple_impl& __in)
264 : _Inherited(__tag, __a, _M_tail(__in)),
265 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
266
267 template<typename _Alloc>
268 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
269 _Tuple_impl&& __in)
270 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
271 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
272 std::forward<_Head>(_M_head(__in))) { }
273
274 template<typename _Alloc, typename... _UElements>
275 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
276 const _Tuple_impl<_Idx, _UElements...>& __in)
277 : _Inherited(__tag, __a,
278 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
279 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
280 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
281
282 template<typename _Alloc, typename _UHead, typename... _UTails>
283 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
284 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
285 : _Inherited(__tag, __a, std::move
286 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
287 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
288 std::forward<_UHead>
289 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
290
291 _Tuple_impl&
292 operator=(const _Tuple_impl& __in)
293 {
294 _M_head(*this) = _M_head(__in);
295 _M_tail(*this) = _M_tail(__in);
296 return *this;
297 }
298
299 _Tuple_impl&
300 operator=(_Tuple_impl&& __in)
301 noexcept(__and_<is_nothrow_move_assignable<_Head>,
302 is_nothrow_move_assignable<_Inherited>>::value)
303 {
304 _M_head(*this) = std::forward<_Head>(_M_head(__in));
305 _M_tail(*this) = std::move(_M_tail(__in));
306 return *this;
307 }
308
309 template<typename... _UElements>
310 _Tuple_impl&
311 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
312 {
313 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
314 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
315 return *this;
316 }
317
318 template<typename _UHead, typename... _UTails>
319 _Tuple_impl&
320 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
321 {
322 _M_head(*this) = std::forward<_UHead>
323 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
324 _M_tail(*this) = std::move
325 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
326 return *this;
327 }
328
329 protected:
330 void
331 _M_swap(_Tuple_impl& __in)
332 noexcept(__is_nothrow_swappable<_Head>::value
333 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
334 {
335 using std::swap;
336 swap(_M_head(*this), _M_head(__in));
337 _Inherited::_M_swap(_M_tail(__in));
338 }
339 };
340
341 // Basis case of inheritance recursion.
342 template<std::size_t _Idx, typename _Head>
343 struct _Tuple_impl<_Idx, _Head>
344 : private _Head_base<_Idx, _Head>
345 {
346 template<std::size_t, typename...> friend class _Tuple_impl;
347
348 typedef _Head_base<_Idx, _Head> _Base;
349
350 static constexpr _Head&
351 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
352
353 static constexpr const _Head&
354 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
355
356 constexpr _Tuple_impl()
357 : _Base() { }
358
359 explicit
360 constexpr _Tuple_impl(const _Head& __head)
361 : _Base(__head) { }
362
363 template<typename _UHead>
364 explicit
365 constexpr _Tuple_impl(_UHead&& __head)
366 : _Base(std::forward<_UHead>(__head)) { }
367
368 constexpr _Tuple_impl(const _Tuple_impl&) = default;
369
370 constexpr
371 _Tuple_impl(_Tuple_impl&& __in)
372 noexcept(is_nothrow_move_constructible<_Head>::value)
373 : _Base(std::forward<_Head>(_M_head(__in))) { }
374
375 template<typename _UHead>
376 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
377 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
378
379 template<typename _UHead>
380 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
381 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
382 { }
383
384 template<typename _Alloc>
385 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
386 : _Base(__tag, __use_alloc<_Head>(__a)) { }
387
388 template<typename _Alloc>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 const _Head& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
392
393 template<typename _Alloc, typename _UHead>
394 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
395 _UHead&& __head)
396 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
397 std::forward<_UHead>(__head)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 const _Tuple_impl& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
403
404 template<typename _Alloc>
405 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
406 _Tuple_impl&& __in)
407 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
408 std::forward<_Head>(_M_head(__in))) { }
409
410 template<typename _Alloc, typename _UHead>
411 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
412 const _Tuple_impl<_Idx, _UHead>& __in)
413 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
414 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
415
416 template<typename _Alloc, typename _UHead>
417 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
418 _Tuple_impl<_Idx, _UHead>&& __in)
419 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
420 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
421 { }
422
423 _Tuple_impl&
424 operator=(const _Tuple_impl& __in)
425 {
426 _M_head(*this) = _M_head(__in);
427 return *this;
428 }
429
430 _Tuple_impl&
431 operator=(_Tuple_impl&& __in)
432 noexcept(is_nothrow_move_assignable<_Head>::value)
433 {
434 _M_head(*this) = std::forward<_Head>(_M_head(__in));
435 return *this;
436 }
437
438 template<typename _UHead>
439 _Tuple_impl&
440 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
441 {
442 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
443 return *this;
444 }
445
446 template<typename _UHead>
447 _Tuple_impl&
448 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
449 {
450 _M_head(*this)
451 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
452 return *this;
453 }
454
455 protected:
456 void
457 _M_swap(_Tuple_impl& __in)
458 noexcept(__is_nothrow_swappable<_Head>::value)
459 {
460 using std::swap;
461 swap(_M_head(*this), _M_head(__in));
462 }
463 };
464
465 // Concept utility functions, reused in conditionally-explicit
466 // constructors.
467 template<bool, typename... _Elements>
468 struct _TC
469 {
470 template<typename... _UElements>
471 static constexpr bool _ConstructibleTuple()
472 {
473 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
474 }
475
476 template<typename... _UElements>
477 static constexpr bool _ImplicitlyConvertibleTuple()
478 {
479 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
480 }
481
482 template<typename... _UElements>
483 static constexpr bool _MoveConstructibleTuple()
484 {
485 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
486 }
487
488 template<typename... _UElements>
489 static constexpr bool _ImplicitlyMoveConvertibleTuple()
490 {
491 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
492 }
493
494 template<typename _SrcTuple>
495 static constexpr bool _NonNestedTuple()
496 {
497 return __and_<__not_<is_same<tuple<_Elements...>,
498 typename remove_cv<
499 typename remove_reference<_SrcTuple>::type
500 >::type>>,
501 __not_<is_convertible<_SrcTuple, _Elements...>>,
502 __not_<is_constructible<_Elements..., _SrcTuple>>
503 >::value;
504 }
505 template<typename... _UElements>
506 static constexpr bool _NotSameTuple()
507 {
508 return __not_<is_same<tuple<_Elements...>,
509 typename remove_const<
510 typename remove_reference<_UElements...>::type
511 >::type>>::value;
512 }
513 };
514
515 template<typename... _Elements>
516 struct _TC<false, _Elements...>
517 {
518 template<typename... _UElements>
519 static constexpr bool _ConstructibleTuple()
520 {
521 return false;
522 }
523
524 template<typename... _UElements>
525 static constexpr bool _ImplicitlyConvertibleTuple()
526 {
527 return false;
528 }
529
530 template<typename... _UElements>
531 static constexpr bool _MoveConstructibleTuple()
532 {
533 return false;
534 }
535
536 template<typename... _UElements>
537 static constexpr bool _ImplicitlyMoveConvertibleTuple()
538 {
539 return false;
540 }
541
542 template<typename... _UElements>
543 static constexpr bool _NonNestedTuple()
544 {
545 return true;
546 }
547 template<typename... _UElements>
548 static constexpr bool _NotSameTuple()
549 {
550 return true;
551 }
552 };
553
554 /// Primary class template, tuple
555 template<typename... _Elements>
556 class tuple : public _Tuple_impl<0, _Elements...>
557 {
558 typedef _Tuple_impl<0, _Elements...> _Inherited;
559
560 // Used for constraining the default constructor so
561 // that it becomes dependent on the constraints.
562 template<typename _Dummy>
563 struct _TC2
564 {
565 static constexpr bool _DefaultConstructibleTuple()
566 {
567 return __and_<is_default_constructible<_Elements>...>::value;
568 }
569 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
570 {
571 return __and_<__is_implicitly_default_constructible<_Elements>...>
572 ::value;
573 }
574 };
575
576 public:
577 template<typename _Dummy = void,
578 typename enable_if<_TC2<_Dummy>::
579 _ImplicitlyDefaultConstructibleTuple(),
580 bool>::type = true>
581 constexpr tuple()
582 : _Inherited() { }
583
584 template<typename _Dummy = void,
585 typename enable_if<_TC2<_Dummy>::
586 _DefaultConstructibleTuple()
587 &&
588 !_TC2<_Dummy>::
589 _ImplicitlyDefaultConstructibleTuple(),
590 bool>::type = false>
591 explicit constexpr tuple()
592 : _Inherited() { }
593
594 // Shortcut for the cases where constructors taking _Elements...
595 // need to be constrained.
596 template<typename _Dummy> using _TCC =
597 _TC<is_same<_Dummy, void>::value,
598 _Elements...>;
599
600 template<typename _Dummy = void,
601 typename enable_if<
602 _TCC<_Dummy>::template
603 _ConstructibleTuple<_Elements...>()
604 && _TCC<_Dummy>::template
605 _ImplicitlyConvertibleTuple<_Elements...>()
606 && (sizeof...(_Elements) >= 1),
607 bool>::type=true>
608 constexpr tuple(const _Elements&... __elements)
609 : _Inherited(__elements...) { }
610
611 template<typename _Dummy = void,
612 typename enable_if<
613 _TCC<_Dummy>::template
614 _ConstructibleTuple<_Elements...>()
615 && !_TCC<_Dummy>::template
616 _ImplicitlyConvertibleTuple<_Elements...>()
617 && (sizeof...(_Elements) >= 1),
618 bool>::type=false>
619 explicit constexpr tuple(const _Elements&... __elements)
620 : _Inherited(__elements...) { }
621
622 // Shortcut for the cases where constructors taking _UElements...
623 // need to be constrained.
624 template<typename... _UElements> using _TMC =
625 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
626 && (_TC<(sizeof...(_UElements)==1), _Elements...>::
627 template _NotSameTuple<_UElements...>()),
628 _Elements...>;
629
630 // Shortcut for the cases where constructors taking tuple<_UElements...>
631 // need to be constrained.
632 template<typename... _UElements> using _TMCT =
633 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
634 && !is_same<tuple<_Elements...>,
635 tuple<_UElements...>>::value,
636 _Elements...>;
637
638 template<typename... _UElements, typename
639 enable_if<
640 _TMC<_UElements...>::template
641 _MoveConstructibleTuple<_UElements...>()
642 && _TMC<_UElements...>::template
643 _ImplicitlyMoveConvertibleTuple<_UElements...>()
644 && (sizeof...(_Elements) >= 1),
645 bool>::type=true>
646 constexpr tuple(_UElements&&... __elements)
647 : _Inherited(std::forward<_UElements>(__elements)...) { }
648
649 template<typename... _UElements, typename
650 enable_if<
651 _TMC<_UElements...>::template
652 _MoveConstructibleTuple<_UElements...>()
653 && !_TMC<_UElements...>::template
654 _ImplicitlyMoveConvertibleTuple<_UElements...>()
655 && (sizeof...(_Elements) >= 1),
656 bool>::type=false>
657 explicit constexpr tuple(_UElements&&... __elements)
658 : _Inherited(std::forward<_UElements>(__elements)...) { }
659
660 constexpr tuple(const tuple&) = default;
661
662 constexpr tuple(tuple&&) = default;
663
664 // Shortcut for the cases where constructors taking tuples
665 // must avoid creating temporaries.
666 template<typename _Dummy> using _TNTC =
667 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
668 _Elements...>;
669
670 template<typename... _UElements, typename _Dummy = void, typename
671 enable_if<_TMCT<_UElements...>::template
672 _ConstructibleTuple<_UElements...>()
673 && _TMCT<_UElements...>::template
674 _ImplicitlyConvertibleTuple<_UElements...>()
675 && _TNTC<_Dummy>::template
676 _NonNestedTuple<const tuple<_UElements...>&>(),
677 bool>::type=true>
678 constexpr tuple(const tuple<_UElements...>& __in)
679 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
680 { }
681
682 template<typename... _UElements, typename _Dummy = void, typename
683 enable_if<_TMCT<_UElements...>::template
684 _ConstructibleTuple<_UElements...>()
685 && !_TMCT<_UElements...>::template
686 _ImplicitlyConvertibleTuple<_UElements...>()
687 && _TNTC<_Dummy>::template
688 _NonNestedTuple<const tuple<_UElements...>&>(),
689 bool>::type=false>
690 explicit constexpr tuple(const tuple<_UElements...>& __in)
691 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
692 { }
693
694 template<typename... _UElements, typename _Dummy = void, typename
695 enable_if<_TMCT<_UElements...>::template
696 _MoveConstructibleTuple<_UElements...>()
697 && _TMCT<_UElements...>::template
698 _ImplicitlyMoveConvertibleTuple<_UElements...>()
699 && _TNTC<_Dummy>::template
700 _NonNestedTuple<tuple<_UElements...>&&>(),
701 bool>::type=true>
702 constexpr tuple(tuple<_UElements...>&& __in)
703 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
704
705 template<typename... _UElements, typename _Dummy = void, typename
706 enable_if<_TMCT<_UElements...>::template
707 _MoveConstructibleTuple<_UElements...>()
708 && !_TMCT<_UElements...>::template
709 _ImplicitlyMoveConvertibleTuple<_UElements...>()
710 && _TNTC<_Dummy>::template
711 _NonNestedTuple<tuple<_UElements...>&&>(),
712 bool>::type=false>
713 explicit constexpr tuple(tuple<_UElements...>&& __in)
714 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
715
716 // Allocator-extended constructors.
717
718 template<typename _Alloc>
719 tuple(allocator_arg_t __tag, const _Alloc& __a)
720 : _Inherited(__tag, __a) { }
721
722 template<typename _Alloc, typename _Dummy = void,
723 typename enable_if<
724 _TCC<_Dummy>::template
725 _ConstructibleTuple<_Elements...>()
726 && _TCC<_Dummy>::template
727 _ImplicitlyConvertibleTuple<_Elements...>(),
728 bool>::type=true>
729 tuple(allocator_arg_t __tag, const _Alloc& __a,
730 const _Elements&... __elements)
731 : _Inherited(__tag, __a, __elements...) { }
732
733 template<typename _Alloc, typename _Dummy = void,
734 typename enable_if<
735 _TCC<_Dummy>::template
736 _ConstructibleTuple<_Elements...>()
737 && !_TCC<_Dummy>::template
738 _ImplicitlyConvertibleTuple<_Elements...>(),
739 bool>::type=false>
740 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
741 const _Elements&... __elements)
742 : _Inherited(__tag, __a, __elements...) { }
743
744 template<typename _Alloc, typename... _UElements, typename
745 enable_if<_TMC<_UElements...>::template
746 _MoveConstructibleTuple<_UElements...>()
747 && _TMC<_UElements...>::template
748 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
749 bool>::type=true>
750 tuple(allocator_arg_t __tag, const _Alloc& __a,
751 _UElements&&... __elements)
752 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
753 { }
754
755 template<typename _Alloc, typename... _UElements, typename
756 enable_if<_TMC<_UElements...>::template
757 _MoveConstructibleTuple<_UElements...>()
758 && !_TMC<_UElements...>::template
759 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
760 bool>::type=false>
761 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
762 _UElements&&... __elements)
763 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
764 { }
765
766 template<typename _Alloc>
767 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
768 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
769
770 template<typename _Alloc>
771 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
772 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
773
774 template<typename _Alloc, typename _Dummy = void,
775 typename... _UElements, typename
776 enable_if<_TMCT<_UElements...>::template
777 _ConstructibleTuple<_UElements...>()
778 && _TMCT<_UElements...>::template
779 _ImplicitlyConvertibleTuple<_UElements...>()
780 && _TNTC<_Dummy>::template
781 _NonNestedTuple<tuple<_UElements...>&&>(),
782 bool>::type=true>
783 tuple(allocator_arg_t __tag, const _Alloc& __a,
784 const tuple<_UElements...>& __in)
785 : _Inherited(__tag, __a,
786 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
787 { }
788
789 template<typename _Alloc, typename _Dummy = void,
790 typename... _UElements, typename
791 enable_if<_TMCT<_UElements...>::template
792 _ConstructibleTuple<_UElements...>()
793 && !_TMCT<_UElements...>::template
794 _ImplicitlyConvertibleTuple<_UElements...>()
795 && _TNTC<_Dummy>::template
796 _NonNestedTuple<tuple<_UElements...>&&>(),
797 bool>::type=false>
798 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
799 const tuple<_UElements...>& __in)
800 : _Inherited(__tag, __a,
801 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
802 { }
803
804 template<typename _Alloc, typename _Dummy = void,
805 typename... _UElements, typename
806 enable_if<_TMCT<_UElements...>::template
807 _MoveConstructibleTuple<_UElements...>()
808 && _TMCT<_UElements...>::template
809 _ImplicitlyMoveConvertibleTuple<_UElements...>()
810 && _TNTC<_Dummy>::template
811 _NonNestedTuple<tuple<_UElements...>&&>(),
812 bool>::type=true>
813 tuple(allocator_arg_t __tag, const _Alloc& __a,
814 tuple<_UElements...>&& __in)
815 : _Inherited(__tag, __a,
816 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
817 { }
818
819 template<typename _Alloc, typename _Dummy = void,
820 typename... _UElements, typename
821 enable_if<_TMCT<_UElements...>::template
822 _MoveConstructibleTuple<_UElements...>()
823 && !_TMCT<_UElements...>::template
824 _ImplicitlyMoveConvertibleTuple<_UElements...>()
825 && _TNTC<_Dummy>::template
826 _NonNestedTuple<tuple<_UElements...>&&>(),
827 bool>::type=false>
828 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
829 tuple<_UElements...>&& __in)
830 : _Inherited(__tag, __a,
831 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
832 { }
833
834 tuple&
835 operator=(const tuple& __in)
836 {
837 static_cast<_Inherited&>(*this) = __in;
838 return *this;
839 }
840
841 tuple&
842 operator=(tuple&& __in)
843 noexcept(is_nothrow_move_assignable<_Inherited>::value)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 template<typename... _UElements>
850 typename
851 enable_if<sizeof...(_UElements)
852 == sizeof...(_Elements), tuple&>::type
853 operator=(const tuple<_UElements...>& __in)
854 {
855 static_cast<_Inherited&>(*this) = __in;
856 return *this;
857 }
858
859 template<typename... _UElements>
860 typename
861 enable_if<sizeof...(_UElements)
862 == sizeof...(_Elements), tuple&>::type
863 operator=(tuple<_UElements...>&& __in)
864 {
865 static_cast<_Inherited&>(*this) = std::move(__in);
866 return *this;
867 }
868
869 void
870 swap(tuple& __in)
871 noexcept(noexcept(__in._M_swap(__in)))
872 { _Inherited::_M_swap(__in); }
873 };
874
875#if __cpp_deduction_guides >= 201606
876 template<typename... _UTypes>
877 tuple(_UTypes...) -> tuple<_UTypes...>;
878 template<typename _T1, typename _T2>
879 tuple(pair<_T1, _T2>) -> tuple<_T1, _T2>;
880 template<typename _Alloc, typename... _UTypes>
881 tuple(allocator_arg_t, _Alloc, _UTypes...) -> tuple<_UTypes...>;
882 template<typename _Alloc, typename _T1, typename _T2>
883 tuple(allocator_arg_t, _Alloc, pair<_T1, _T2>) -> tuple<_T1, _T2>;
884 template<typename _Alloc, typename... _UTypes>
885 tuple(allocator_arg_t, _Alloc, tuple<_UTypes...>) -> tuple<_UTypes...>;
886#endif
887
888 // Explicit specialization, zero-element tuple.
889 template<>
890 class tuple<>
891 {
892 public:
893 void swap(tuple&) noexcept { /* no-op */ }
894 // We need the default since we're going to define no-op
895 // allocator constructors.
896 tuple() = default;
897 // No-op allocator constructors.
898 template<typename _Alloc>
899 tuple(allocator_arg_t, const _Alloc&) { }
900 template<typename _Alloc>
901 tuple(allocator_arg_t, const _Alloc&, const tuple&) { }
902 };
903
904 /// Partial specialization, 2-element tuple.
905 /// Includes construction and assignment from a pair.
906 template<typename _T1, typename _T2>
907 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
908 {
909 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
910
911 public:
912 template <typename _U1 = _T1,
913 typename _U2 = _T2,
914 typename enable_if<__and_<
915 __is_implicitly_default_constructible<_U1>,
916 __is_implicitly_default_constructible<_U2>>
917 ::value, bool>::type = true>
918
919 constexpr tuple()
920 : _Inherited() { }
921
922 template <typename _U1 = _T1,
923 typename _U2 = _T2,
924 typename enable_if<
925 __and_<
926 is_default_constructible<_U1>,
927 is_default_constructible<_U2>,
928 __not_<
929 __and_<__is_implicitly_default_constructible<_U1>,
930 __is_implicitly_default_constructible<_U2>>>>
931 ::value, bool>::type = false>
932
933 explicit constexpr tuple()
934 : _Inherited() { }
935
936 // Shortcut for the cases where constructors taking _T1, _T2
937 // need to be constrained.
938 template<typename _Dummy> using _TCC =
939 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
940
941 template<typename _Dummy = void, typename
942 enable_if<_TCC<_Dummy>::template
943 _ConstructibleTuple<_T1, _T2>()
944 && _TCC<_Dummy>::template
945 _ImplicitlyConvertibleTuple<_T1, _T2>(),
946 bool>::type = true>
947 constexpr tuple(const _T1& __a1, const _T2& __a2)
948 : _Inherited(__a1, __a2) { }
949
950 template<typename _Dummy = void, typename
951 enable_if<_TCC<_Dummy>::template
952 _ConstructibleTuple<_T1, _T2>()
953 && !_TCC<_Dummy>::template
954 _ImplicitlyConvertibleTuple<_T1, _T2>(),
955 bool>::type = false>
956 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
957 : _Inherited(__a1, __a2) { }
958
959 // Shortcut for the cases where constructors taking _U1, _U2
960 // need to be constrained.
961 using _TMC = _TC<true, _T1, _T2>;
962
963 template<typename _U1, typename _U2, typename
964 enable_if<_TMC::template
965 _MoveConstructibleTuple<_U1, _U2>()
966 && _TMC::template
967 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
968 && !is_same<typename decay<_U1>::type,
969 allocator_arg_t>::value,
970 bool>::type = true>
971 constexpr tuple(_U1&& __a1, _U2&& __a2)
972 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
973
974 template<typename _U1, typename _U2, typename
975 enable_if<_TMC::template
976 _MoveConstructibleTuple<_U1, _U2>()
977 && !_TMC::template
978 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
979 && !is_same<typename decay<_U1>::type,
980 allocator_arg_t>::value,
981 bool>::type = false>
982 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
983 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
984
985 constexpr tuple(const tuple&) = default;
986
987 constexpr tuple(tuple&&) = default;
988
989 template<typename _U1, typename _U2, typename
990 enable_if<_TMC::template
991 _ConstructibleTuple<_U1, _U2>()
992 && _TMC::template
993 _ImplicitlyConvertibleTuple<_U1, _U2>(),
994 bool>::type = true>
995 constexpr tuple(const tuple<_U1, _U2>& __in)
996 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
997
998 template<typename _U1, typename _U2, typename
999 enable_if<_TMC::template
1000 _ConstructibleTuple<_U1, _U2>()
1001 && !_TMC::template
1002 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1003 bool>::type = false>
1004 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
1005 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
1006
1007 template<typename _U1, typename _U2, typename
1008 enable_if<_TMC::template
1009 _MoveConstructibleTuple<_U1, _U2>()
1010 && _TMC::template
1011 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1012 bool>::type = true>
1013 constexpr tuple(tuple<_U1, _U2>&& __in)
1014 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1015
1016 template<typename _U1, typename _U2, typename
1017 enable_if<_TMC::template
1018 _MoveConstructibleTuple<_U1, _U2>()
1019 && !_TMC::template
1020 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1021 bool>::type = false>
1022 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
1023 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1024
1025 template<typename _U1, typename _U2, typename
1026 enable_if<_TMC::template
1027 _ConstructibleTuple<_U1, _U2>()
1028 && _TMC::template
1029 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1030 bool>::type = true>
1031 constexpr tuple(const pair<_U1, _U2>& __in)
1032 : _Inherited(__in.first, __in.second) { }
1033
1034 template<typename _U1, typename _U2, typename
1035 enable_if<_TMC::template
1036 _ConstructibleTuple<_U1, _U2>()
1037 && !_TMC::template
1038 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1039 bool>::type = false>
1040 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1041 : _Inherited(__in.first, __in.second) { }
1042
1043 template<typename _U1, typename _U2, typename
1044 enable_if<_TMC::template
1045 _MoveConstructibleTuple<_U1, _U2>()
1046 && _TMC::template
1047 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1048 bool>::type = true>
1049 constexpr tuple(pair<_U1, _U2>&& __in)
1050 : _Inherited(std::forward<_U1>(__in.first),
1051 std::forward<_U2>(__in.second)) { }
1052
1053 template<typename _U1, typename _U2, typename
1054 enable_if<_TMC::template
1055 _MoveConstructibleTuple<_U1, _U2>()
1056 && !_TMC::template
1057 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1058 bool>::type = false>
1059 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1060 : _Inherited(std::forward<_U1>(__in.first),
1061 std::forward<_U2>(__in.second)) { }
1062
1063 // Allocator-extended constructors.
1064
1065 template<typename _Alloc>
1066 tuple(allocator_arg_t __tag, const _Alloc& __a)
1067 : _Inherited(__tag, __a) { }
1068
1069 template<typename _Alloc, typename _Dummy = void,
1070 typename enable_if<
1071 _TCC<_Dummy>::template
1072 _ConstructibleTuple<_T1, _T2>()
1073 && _TCC<_Dummy>::template
1074 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1075 bool>::type=true>
1076
1077 tuple(allocator_arg_t __tag, const _Alloc& __a,
1078 const _T1& __a1, const _T2& __a2)
1079 : _Inherited(__tag, __a, __a1, __a2) { }
1080
1081 template<typename _Alloc, typename _Dummy = void,
1082 typename enable_if<
1083 _TCC<_Dummy>::template
1084 _ConstructibleTuple<_T1, _T2>()
1085 && !_TCC<_Dummy>::template
1086 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1087 bool>::type=false>
1088
1089 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1090 const _T1& __a1, const _T2& __a2)
1091 : _Inherited(__tag, __a, __a1, __a2) { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _MoveConstructibleTuple<_U1, _U2>()
1096 && _TMC::template
1097 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1098 bool>::type = true>
1099 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1100 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1101 std::forward<_U2>(__a2)) { }
1102
1103 template<typename _Alloc, typename _U1, typename _U2, typename
1104 enable_if<_TMC::template
1105 _MoveConstructibleTuple<_U1, _U2>()
1106 && !_TMC::template
1107 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1108 bool>::type = false>
1109 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1110 _U1&& __a1, _U2&& __a2)
1111 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1112 std::forward<_U2>(__a2)) { }
1113
1114 template<typename _Alloc>
1115 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1116 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1117
1118 template<typename _Alloc>
1119 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1120 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1121
1122 template<typename _Alloc, typename _U1, typename _U2, typename
1123 enable_if<_TMC::template
1124 _ConstructibleTuple<_U1, _U2>()
1125 && _TMC::template
1126 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1127 bool>::type = true>
1128 tuple(allocator_arg_t __tag, const _Alloc& __a,
1129 const tuple<_U1, _U2>& __in)
1130 : _Inherited(__tag, __a,
1131 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1132 { }
1133
1134 template<typename _Alloc, typename _U1, typename _U2, typename
1135 enable_if<_TMC::template
1136 _ConstructibleTuple<_U1, _U2>()
1137 && !_TMC::template
1138 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1139 bool>::type = false>
1140 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1141 const tuple<_U1, _U2>& __in)
1142 : _Inherited(__tag, __a,
1143 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1144 { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1154 { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 tuple<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1165 { }
1166
1167 template<typename _Alloc, typename _U1, typename _U2, typename
1168 enable_if<_TMC::template
1169 _ConstructibleTuple<_U1, _U2>()
1170 && _TMC::template
1171 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1172 bool>::type = true>
1173 tuple(allocator_arg_t __tag, const _Alloc& __a,
1174 const pair<_U1, _U2>& __in)
1175 : _Inherited(__tag, __a, __in.first, __in.second) { }
1176
1177 template<typename _Alloc, typename _U1, typename _U2, typename
1178 enable_if<_TMC::template
1179 _ConstructibleTuple<_U1, _U2>()
1180 && !_TMC::template
1181 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1182 bool>::type = false>
1183 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1184 const pair<_U1, _U2>& __in)
1185 : _Inherited(__tag, __a, __in.first, __in.second) { }
1186
1187 template<typename _Alloc, typename _U1, typename _U2, typename
1188 enable_if<_TMC::template
1189 _MoveConstructibleTuple<_U1, _U2>()
1190 && _TMC::template
1191 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1192 bool>::type = true>
1193 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1194 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1195 std::forward<_U2>(__in.second)) { }
1196
1197 template<typename _Alloc, typename _U1, typename _U2, typename
1198 enable_if<_TMC::template
1199 _MoveConstructibleTuple<_U1, _U2>()
1200 && !_TMC::template
1201 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1202 bool>::type = false>
1203 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1204 pair<_U1, _U2>&& __in)
1205 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1206 std::forward<_U2>(__in.second)) { }
1207
1208 tuple&
1209 operator=(const tuple& __in)
1210 {
1211 static_cast<_Inherited&>(*this) = __in;
1212 return *this;
1213 }
1214
1215 tuple&
1216 operator=(tuple&& __in)
1217 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1218 {
1219 static_cast<_Inherited&>(*this) = std::move(__in);
1220 return *this;
1221 }
1222
1223 template<typename _U1, typename _U2>
1224 tuple&
1225 operator=(const tuple<_U1, _U2>& __in)
1226 {
1227 static_cast<_Inherited&>(*this) = __in;
1228 return *this;
1229 }
1230
1231 template<typename _U1, typename _U2>
1232 tuple&
1233 operator=(tuple<_U1, _U2>&& __in)
1234 {
1235 static_cast<_Inherited&>(*this) = std::move(__in);
1236 return *this;
1237 }
1238
1239 template<typename _U1, typename _U2>
1240 tuple&
1241 operator=(const pair<_U1, _U2>& __in)
1242 {
1243 this->_M_head(*this) = __in.first;
1244 this->_M_tail(*this)._M_head(*this) = __in.second;
1245 return *this;
1246 }
1247
1248 template<typename _U1, typename _U2>
1249 tuple&
1250 operator=(pair<_U1, _U2>&& __in)
1251 {
1252 this->_M_head(*this) = std::forward<_U1>(__in.first);
1253 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1254 return *this;
1255 }
1256
1257 void
1258 swap(tuple& __in)
1259 noexcept(noexcept(__in._M_swap(__in)))
1260 { _Inherited::_M_swap(__in); }
1261 };
1262
1263
1264 /// class tuple_size
1265 template<typename... _Elements>
1266 struct tuple_size<tuple<_Elements...>>
1267 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1268
1269#if __cplusplus201103L > 201402L
1270 template <typename _Tp>
1271 inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
1272#endif
1273
1274 /**
1275 * Recursive case for tuple_element: strip off the first element in
1276 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1277 */
1278 template<std::size_t __i, typename _Head, typename... _Tail>
1279 struct tuple_element<__i, tuple<_Head, _Tail...> >
1280 : tuple_element<__i - 1, tuple<_Tail...> > { };
1281
1282 /**
1283 * Basis case for tuple_element: The first element is the one we're seeking.
1284 */
1285 template<typename _Head, typename... _Tail>
1286 struct tuple_element<0, tuple<_Head, _Tail...> >
1287 {
1288 typedef _Head type;
1289 };
1290
1291 /**
1292 * Error case for tuple_element: invalid index.
1293 */
1294 template<size_t __i>
1295 struct tuple_element<__i, tuple<>>
1296 {
1297 static_assert(__i < tuple_size<tuple<>>::value,
1298 "tuple index is in range");
1299 };
1300
1301 template<std::size_t __i, typename _Head, typename... _Tail>
1302 constexpr _Head&
1303 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1304 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1305
1306 template<std::size_t __i, typename _Head, typename... _Tail>
1307 constexpr const _Head&
1308 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1309 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
15
Calling '_Tuple_impl::_M_head'
18
Returning from '_Tuple_impl::_M_head'
1310
1311 /// Return a reference to the ith element of a tuple.
1312 template<std::size_t __i, typename... _Elements>
1313 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1314 get(tuple<_Elements...>& __t) noexcept
1315 { return std::__get_helper<__i>(__t); }
1316
1317 /// Return a const reference to the ith element of a const tuple.
1318 template<std::size_t __i, typename... _Elements>
1319 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1320 get(const tuple<_Elements...>& __t) noexcept
1321 { return std::__get_helper<__i>(__t); }
14
Calling '__get_helper'
19
Returning from '__get_helper'
1322
1323 /// Return an rvalue reference to the ith element of a tuple rvalue.
1324 template<std::size_t __i, typename... _Elements>
1325 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1326 get(tuple<_Elements...>&& __t) noexcept
1327 {
1328 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1329 return std::forward<__element_type&&>(std::get<__i>(__t));
1330 }
1331
1332#if __cplusplus201103L > 201103L
1333
1334#define __cpp_lib_tuples_by_type 201304
1335
1336 template<typename _Head, size_t __i, typename... _Tail>
1337 constexpr _Head&
1338 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1339 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1340
1341 template<typename _Head, size_t __i, typename... _Tail>
1342 constexpr const _Head&
1343 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1344 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1345
1346 /// Return a reference to the unique element of type _Tp of a tuple.
1347 template <typename _Tp, typename... _Types>
1348 constexpr _Tp&
1349 get(tuple<_Types...>& __t) noexcept
1350 { return std::__get_helper2<_Tp>(__t); }
1351
1352 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1353 template <typename _Tp, typename... _Types>
1354 constexpr _Tp&&
1355 get(tuple<_Types...>&& __t) noexcept
1356 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1357
1358 /// Return a const reference to the unique element of type _Tp of a tuple.
1359 template <typename _Tp, typename... _Types>
1360 constexpr const _Tp&
1361 get(const tuple<_Types...>& __t) noexcept
1362 { return std::__get_helper2<_Tp>(__t); }
1363#endif
1364
1365 // This class performs the comparison operations on tuples
1366 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1367 struct __tuple_compare
1368 {
1369 static constexpr bool
1370 __eq(const _Tp& __t, const _Up& __u)
1371 {
1372 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1373 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1374 }
1375
1376 static constexpr bool
1377 __less(const _Tp& __t, const _Up& __u)
1378 {
1379 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1380 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1381 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1382 }
1383 };
1384
1385 template<typename _Tp, typename _Up, size_t __size>
1386 struct __tuple_compare<_Tp, _Up, __size, __size>
1387 {
1388 static constexpr bool
1389 __eq(const _Tp&, const _Up&) { return true; }
1390
1391 static constexpr bool
1392 __less(const _Tp&, const _Up&) { return false; }
1393 };
1394
1395 template<typename... _TElements, typename... _UElements>
1396 constexpr bool
1397 operator==(const tuple<_TElements...>& __t,
1398 const tuple<_UElements...>& __u)
1399 {
1400 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1401 "tuple objects can only be compared if they have equal sizes.");
1402 using __compare = __tuple_compare<tuple<_TElements...>,
1403 tuple<_UElements...>,
1404 0, sizeof...(_TElements)>;
1405 return __compare::__eq(__t, __u);
1406 }
1407
1408 template<typename... _TElements, typename... _UElements>
1409 constexpr bool
1410 operator<(const tuple<_TElements...>& __t,
1411 const tuple<_UElements...>& __u)
1412 {
1413 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1414 "tuple objects can only be compared if they have equal sizes.");
1415 using __compare = __tuple_compare<tuple<_TElements...>,
1416 tuple<_UElements...>,
1417 0, sizeof...(_TElements)>;
1418 return __compare::__less(__t, __u);
1419 }
1420
1421 template<typename... _TElements, typename... _UElements>
1422 constexpr bool
1423 operator!=(const tuple<_TElements...>& __t,
1424 const tuple<_UElements...>& __u)
1425 { return !(__t == __u); }
1426
1427 template<typename... _TElements, typename... _UElements>
1428 constexpr bool
1429 operator>(const tuple<_TElements...>& __t,
1430 const tuple<_UElements...>& __u)
1431 { return __u < __t; }
1432
1433 template<typename... _TElements, typename... _UElements>
1434 constexpr bool
1435 operator<=(const tuple<_TElements...>& __t,
1436 const tuple<_UElements...>& __u)
1437 { return !(__u < __t); }
1438
1439 template<typename... _TElements, typename... _UElements>
1440 constexpr bool
1441 operator>=(const tuple<_TElements...>& __t,
1442 const tuple<_UElements...>& __u)
1443 { return !(__t < __u); }
1444
1445 // NB: DR 705.
1446 template<typename... _Elements>
1447 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1448 make_tuple(_Elements&&... __args)
1449 {
1450 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1451 __result_type;
1452 return __result_type(std::forward<_Elements>(__args)...);
1453 }
1454
1455 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1456 // 2275. Why is forward_as_tuple not constexpr?
1457 template<typename... _Elements>
1458 constexpr tuple<_Elements&&...>
1459 forward_as_tuple(_Elements&&... __args) noexcept
1460 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1461
1462 template<size_t, typename, typename, size_t>
1463 struct __make_tuple_impl;
1464
1465 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1466 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1467 : __make_tuple_impl<_Idx + 1,
1468 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1469 _Tuple, _Nm>
1470 { };
1471
1472 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1473 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1474 {
1475 typedef tuple<_Tp...> __type;
1476 };
1477
1478 template<typename _Tuple>
1479 struct __do_make_tuple
1480 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1481 { };
1482
1483 // Returns the std::tuple equivalent of a tuple-like type.
1484 template<typename _Tuple>
1485 struct __make_tuple
1486 : public __do_make_tuple<typename std::remove_cv
1487 <typename std::remove_reference<_Tuple>::type>::type>
1488 { };
1489
1490 // Combines several std::tuple's into a single one.
1491 template<typename...>
1492 struct __combine_tuples;
1493
1494 template<>
1495 struct __combine_tuples<>
1496 {
1497 typedef tuple<> __type;
1498 };
1499
1500 template<typename... _Ts>
1501 struct __combine_tuples<tuple<_Ts...>>
1502 {
1503 typedef tuple<_Ts...> __type;
1504 };
1505
1506 template<typename... _T1s, typename... _T2s, typename... _Rem>
1507 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1508 {
1509 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1510 _Rem...>::__type __type;
1511 };
1512
1513 // Computes the result type of tuple_cat given a set of tuple-like types.
1514 template<typename... _Tpls>
1515 struct __tuple_cat_result
1516 {
1517 typedef typename __combine_tuples
1518 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1519 };
1520
1521 // Helper to determine the index set for the first tuple-like
1522 // type of a given set.
1523 template<typename...>
1524 struct __make_1st_indices;
1525
1526 template<>
1527 struct __make_1st_indices<>
1528 {
1529 typedef std::_Index_tuple<> __type;
1530 };
1531
1532 template<typename _Tp, typename... _Tpls>
1533 struct __make_1st_indices<_Tp, _Tpls...>
1534 {
1535 typedef typename std::_Build_index_tuple<std::tuple_size<
1536 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1537 };
1538
1539 // Performs the actual concatenation by step-wise expanding tuple-like
1540 // objects into the elements, which are finally forwarded into the
1541 // result tuple.
1542 template<typename _Ret, typename _Indices, typename... _Tpls>
1543 struct __tuple_concater;
1544
1545 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1546 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1547 {
1548 template<typename... _Us>
1549 static constexpr _Ret
1550 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1551 {
1552 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1553 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1554 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1555 std::forward<_Us>(__us)...,
1556 std::get<_Is>(std::forward<_Tp>(__tp))...);
1557 }
1558 };
1559
1560 template<typename _Ret>
1561 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1562 {
1563 template<typename... _Us>
1564 static constexpr _Ret
1565 _S_do(_Us&&... __us)
1566 {
1567 return _Ret(std::forward<_Us>(__us)...);
1568 }
1569 };
1570
1571 /// tuple_cat
1572 template<typename... _Tpls, typename = typename
1573 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1574 constexpr auto
1575 tuple_cat(_Tpls&&... __tpls)
1576 -> typename __tuple_cat_result<_Tpls...>::__type
1577 {
1578 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1579 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1580 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1581 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1582 }
1583
1584 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1585 // 2301. Why is tie not constexpr?
1586 /// tie
1587 template<typename... _Elements>
1588 constexpr tuple<_Elements&...>
1589 tie(_Elements&... __args) noexcept
1590 { return tuple<_Elements&...>(__args...); }
1591
1592 /// swap
1593 template<typename... _Elements>
1594 inline
1595#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1596 // Constrained free swap overload, see p0185r1
1597 typename enable_if<__and_<__is_swappable<_Elements>...>::value
1598 >::type
1599#else
1600 void
1601#endif
1602 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1603 noexcept(noexcept(__x.swap(__y)))
1604 { __x.swap(__y); }
1605
1606#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1607 template<typename... _Elements>
1608 typename enable_if<!__and_<__is_swappable<_Elements>...>::value>::type
1609 swap(tuple<_Elements...>&, tuple<_Elements...>&) = delete;
1610#endif
1611
1612 // A class (and instance) which can be used in 'tie' when an element
1613 // of a tuple is not required.
1614 // _GLIBCXX14_CONSTEXPR
1615 // 2933. PR for LWG 2773 could be clearer
1616 struct _Swallow_assign
1617 {
1618 template<class _Tp>
1619 _GLIBCXX14_CONSTEXPR const _Swallow_assign&
1620 operator=(const _Tp&) const
1621 { return *this; }
1622 };
1623
1624 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1625 // 2773. Making std::ignore constexpr
1626 _GLIBCXX17_INLINE constexpr _Swallow_assign ignore{};
1627
1628 /// Partial specialization for tuples
1629 template<typename... _Types, typename _Alloc>
1630 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1631
1632 // See stl_pair.h...
1633 template<class _T1, class _T2>
1634 template<typename... _Args1, typename... _Args2>
1635 inline
1636 pair<_T1, _T2>::
1637 pair(piecewise_construct_t,
1638 tuple<_Args1...> __first, tuple<_Args2...> __second)
1639 : pair(__first, __second,
1640 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1641 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1642 { }
1643
1644 template<class _T1, class _T2>
1645 template<typename... _Args1, std::size_t... _Indexes1,
1646 typename... _Args2, std::size_t... _Indexes2>
1647 inline
1648 pair<_T1, _T2>::
1649 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1650 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1651 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1652 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1653 { }
1654
1655#if __cplusplus201103L > 201402L
1656# define __cpp_lib_apply 201603
1657
1658 template <typename _Fn, typename _Tuple, size_t... _Idx>
1659 constexpr decltype(auto)
1660 __apply_impl(_Fn&& __f, _Tuple&& __t, index_sequence<_Idx...>)
1661 {
1662 return std::__invoke(std::forward<_Fn>(__f),
1663 std::get<_Idx>(std::forward<_Tuple>(__t))...);
1664 }
1665
1666 template <typename _Fn, typename _Tuple>
1667 constexpr decltype(auto)
1668 apply(_Fn&& __f, _Tuple&& __t)
1669 {
1670 using _Indices = make_index_sequence<tuple_size_v<decay_t<_Tuple>>>;
1671 return std::__apply_impl(std::forward<_Fn>(__f),
1672 std::forward<_Tuple>(__t),
1673 _Indices{});
1674 }
1675
1676#define __cpp_lib_make_from_tuple 201606
1677
1678 template <typename _Tp, typename _Tuple, size_t... _Idx>
1679 constexpr _Tp
1680 __make_from_tuple_impl(_Tuple&& __t, index_sequence<_Idx...>)
1681 { return _Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...); }
1682
1683 template <typename _Tp, typename _Tuple>
1684 constexpr _Tp
1685 make_from_tuple(_Tuple&& __t)
1686 {
1687 return __make_from_tuple_impl<_Tp>(
1688 std::forward<_Tuple>(__t),
1689 make_index_sequence<tuple_size_v<decay_t<_Tuple>>>{});
1690 }
1691#endif // C++17
1692
1693 /// @}
1694
1695_GLIBCXX_END_NAMESPACE_VERSION
1696} // namespace std
1697
1698#endif // C++11
1699
1700#endif // _GLIBCXX_TUPLE