Bug Summary

File:lib/CodeGen/InlineSpiller.cpp
Warning:line 1238, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InlineSpiller.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp

1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#include "LiveRangeCalc.h"
16#include "Spiller.h"
17#include "SplitKit.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/CodeGen/LiveInterval.h"
29#include "llvm/CodeGen/LiveIntervals.h"
30#include "llvm/CodeGen/LiveRangeEdit.h"
31#include "llvm/CodeGen/LiveStacks.h"
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
34#include "llvm/CodeGen/MachineDominators.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineFunctionPass.h"
37#include "llvm/CodeGen/MachineInstr.h"
38#include "llvm/CodeGen/MachineInstrBuilder.h"
39#include "llvm/CodeGen/MachineInstrBundle.h"
40#include "llvm/CodeGen/MachineLoopInfo.h"
41#include "llvm/CodeGen/MachineOperand.h"
42#include "llvm/CodeGen/MachineRegisterInfo.h"
43#include "llvm/CodeGen/SlotIndexes.h"
44#include "llvm/CodeGen/TargetInstrInfo.h"
45#include "llvm/CodeGen/TargetOpcodes.h"
46#include "llvm/CodeGen/TargetRegisterInfo.h"
47#include "llvm/CodeGen/TargetSubtargetInfo.h"
48#include "llvm/CodeGen/VirtRegMap.h"
49#include "llvm/Config/llvm-config.h"
50#include "llvm/Support/BlockFrequency.h"
51#include "llvm/Support/BranchProbability.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Compiler.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/ErrorHandling.h"
56#include "llvm/Support/raw_ostream.h"
57#include <cassert>
58#include <iterator>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65#define DEBUG_TYPE"regalloc" "regalloc"
66
67STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges"
, "Number of spilled live ranges", {0}, {false}}
;
68STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets"
, "Number of spilled snippets", {0}, {false}}
;
69STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted"
, {0}, {false}}
;
70STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved"
, "Number of spills removed", {0}, {false}}
;
71STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads"
, "Number of reloads inserted", {0}, {false}}
;
72STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved"
, "Number of reloads removed", {0}, {false}}
;
73STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses"
, {0}, {false}}
;
74STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads"
, "Number of folded loads", {0}, {false}}
;
75STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling"
, {0}, {false}}
;
76
77static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78 cl::desc("Disable inline spill hoisting"));
79
80namespace {
81
82class HoistSpillHelper : private LiveRangeEdit::Delegate {
83 MachineFunction &MF;
84 LiveIntervals &LIS;
85 LiveStacks &LSS;
86 AliasAnalysis *AA;
87 MachineDominatorTree &MDT;
88 MachineLoopInfo &Loops;
89 VirtRegMap &VRM;
90 MachineRegisterInfo &MRI;
91 const TargetInstrInfo &TII;
92 const TargetRegisterInfo &TRI;
93 const MachineBlockFrequencyInfo &MBFI;
94
95 InsertPointAnalysis IPA;
96
97 // Map from StackSlot to the LiveInterval of the original register.
98 // Note the LiveInterval of the original register may have been deleted
99 // after it is spilled. We keep a copy here to track the range where
100 // spills can be moved.
101 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
102
103 // Map from pair of (StackSlot and Original VNI) to a set of spills which
104 // have the same stackslot and have equal values defined by Original VNI.
105 // These spills are mergeable and are hoist candiates.
106 using MergeableSpillsMap =
107 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
108 MergeableSpillsMap MergeableSpills;
109
110 /// This is the map from original register to a set containing all its
111 /// siblings. To hoist a spill to another BB, we need to find out a live
112 /// sibling there and use it as the source of the new spill.
113 DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
114
115 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
116 MachineBasicBlock &BB, unsigned &LiveReg);
117
118 void rmRedundantSpills(
119 SmallPtrSet<MachineInstr *, 16> &Spills,
120 SmallVectorImpl<MachineInstr *> &SpillsToRm,
121 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
122
123 void getVisitOrders(
124 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
125 SmallVectorImpl<MachineDomTreeNode *> &Orders,
126 SmallVectorImpl<MachineInstr *> &SpillsToRm,
127 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
128 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
129
130 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
131 SmallPtrSet<MachineInstr *, 16> &Spills,
132 SmallVectorImpl<MachineInstr *> &SpillsToRm,
133 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
134
135public:
136 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
137 VirtRegMap &vrm)
138 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
139 LSS(pass.getAnalysis<LiveStacks>()),
140 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
141 MDT(pass.getAnalysis<MachineDominatorTree>()),
142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
144 TRI(*mf.getSubtarget().getRegisterInfo()),
145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
146 IPA(LIS, mf.getNumBlockIDs()) {}
147
148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
149 unsigned Original);
150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
151 void hoistAllSpills();
152 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
153};
154
155class InlineSpiller : public Spiller {
156 MachineFunction &MF;
157 LiveIntervals &LIS;
158 LiveStacks &LSS;
159 AliasAnalysis *AA;
160 MachineDominatorTree &MDT;
161 MachineLoopInfo &Loops;
162 VirtRegMap &VRM;
163 MachineRegisterInfo &MRI;
164 const TargetInstrInfo &TII;
165 const TargetRegisterInfo &TRI;
166 const MachineBlockFrequencyInfo &MBFI;
167
168 // Variables that are valid during spill(), but used by multiple methods.
169 LiveRangeEdit *Edit;
170 LiveInterval *StackInt;
171 int StackSlot;
172 unsigned Original;
173
174 // All registers to spill to StackSlot, including the main register.
175 SmallVector<unsigned, 8> RegsToSpill;
176
177 // All COPY instructions to/from snippets.
178 // They are ignored since both operands refer to the same stack slot.
179 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
180
181 // Values that failed to remat at some point.
182 SmallPtrSet<VNInfo*, 8> UsedValues;
183
184 // Dead defs generated during spilling.
185 SmallVector<MachineInstr*, 8> DeadDefs;
186
187 // Object records spills information and does the hoisting.
188 HoistSpillHelper HSpiller;
189
190 ~InlineSpiller() override = default;
191
192public:
193 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
194 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
195 LSS(pass.getAnalysis<LiveStacks>()),
196 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
197 MDT(pass.getAnalysis<MachineDominatorTree>()),
198 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
199 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
200 TRI(*mf.getSubtarget().getRegisterInfo()),
201 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
202 HSpiller(pass, mf, vrm) {}
203
204 void spill(LiveRangeEdit &) override;
205 void postOptimization() override;
206
207private:
208 bool isSnippet(const LiveInterval &SnipLI);
209 void collectRegsToSpill();
210
211 bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
212
213 bool isSibling(unsigned Reg);
214 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
215 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
216
217 void markValueUsed(LiveInterval*, VNInfo*);
218 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
219 void reMaterializeAll();
220
221 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
222 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
223 MachineInstr *LoadMI = nullptr);
224 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
225 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
226
227 void spillAroundUses(unsigned Reg);
228 void spillAll();
229};
230
231} // end anonymous namespace
232
233Spiller::~Spiller() = default;
234
235void Spiller::anchor() {}
236
237Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
238 MachineFunction &mf,
239 VirtRegMap &vrm) {
240 return new InlineSpiller(pass, mf, vrm);
241}
242
243//===----------------------------------------------------------------------===//
244// Snippets
245//===----------------------------------------------------------------------===//
246
247// When spilling a virtual register, we also spill any snippets it is connected
248// to. The snippets are small live ranges that only have a single real use,
249// leftovers from live range splitting. Spilling them enables memory operand
250// folding or tightens the live range around the single use.
251//
252// This minimizes register pressure and maximizes the store-to-load distance for
253// spill slots which can be important in tight loops.
254
255/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
256/// otherwise return 0.
257static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
258 if (!MI.isFullCopy())
259 return 0;
260 if (MI.getOperand(0).getReg() == Reg)
261 return MI.getOperand(1).getReg();
262 if (MI.getOperand(1).getReg() == Reg)
263 return MI.getOperand(0).getReg();
264 return 0;
265}
266
267/// isSnippet - Identify if a live interval is a snippet that should be spilled.
268/// It is assumed that SnipLI is a virtual register with the same original as
269/// Edit->getReg().
270bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
271 unsigned Reg = Edit->getReg();
272
273 // A snippet is a tiny live range with only a single instruction using it
274 // besides copies to/from Reg or spills/fills. We accept:
275 //
276 // %snip = COPY %Reg / FILL fi#
277 // %snip = USE %snip
278 // %Reg = COPY %snip / SPILL %snip, fi#
279 //
280 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
281 return false;
282
283 MachineInstr *UseMI = nullptr;
284
285 // Check that all uses satisfy our criteria.
286 for (MachineRegisterInfo::reg_instr_nodbg_iterator
287 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
288 E = MRI.reg_instr_nodbg_end(); RI != E; ) {
289 MachineInstr &MI = *RI++;
290
291 // Allow copies to/from Reg.
292 if (isFullCopyOf(MI, Reg))
293 continue;
294
295 // Allow stack slot loads.
296 int FI;
297 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
298 continue;
299
300 // Allow stack slot stores.
301 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
302 continue;
303
304 // Allow a single additional instruction.
305 if (UseMI && &MI != UseMI)
306 return false;
307 UseMI = &MI;
308 }
309 return true;
310}
311
312/// collectRegsToSpill - Collect live range snippets that only have a single
313/// real use.
314void InlineSpiller::collectRegsToSpill() {
315 unsigned Reg = Edit->getReg();
316
317 // Main register always spills.
318 RegsToSpill.assign(1, Reg);
319 SnippetCopies.clear();
320
321 // Snippets all have the same original, so there can't be any for an original
322 // register.
323 if (Original == Reg)
324 return;
325
326 for (MachineRegisterInfo::reg_instr_iterator
327 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
328 MachineInstr &MI = *RI++;
329 unsigned SnipReg = isFullCopyOf(MI, Reg);
330 if (!isSibling(SnipReg))
331 continue;
332 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
333 if (!isSnippet(SnipLI))
334 continue;
335 SnippetCopies.insert(&MI);
336 if (isRegToSpill(SnipReg))
337 continue;
338 RegsToSpill.push_back(SnipReg);
339 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\talso spill snippet " <<
SnipLI << '\n'; } } while (false)
;
340 ++NumSnippets;
341 }
342}
343
344bool InlineSpiller::isSibling(unsigned Reg) {
345 return TargetRegisterInfo::isVirtualRegister(Reg) &&
346 VRM.getOriginal(Reg) == Original;
347}
348
349/// It is beneficial to spill to earlier place in the same BB in case
350/// as follows:
351/// There is an alternative def earlier in the same MBB.
352/// Hoist the spill as far as possible in SpillMBB. This can ease
353/// register pressure:
354///
355/// x = def
356/// y = use x
357/// s = copy x
358///
359/// Hoisting the spill of s to immediately after the def removes the
360/// interference between x and y:
361///
362/// x = def
363/// spill x
364/// y = use killed x
365///
366/// This hoist only helps when the copy kills its source.
367///
368bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
369 MachineInstr &CopyMI) {
370 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
371#ifndef NDEBUG
372 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
373 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")((VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"
) ? static_cast<void> (0) : __assert_fail ("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 373, __PRETTY_FUNCTION__))
;
374#endif
375
376 unsigned SrcReg = CopyMI.getOperand(1).getReg();
377 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
378 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
379 LiveQueryResult SrcQ = SrcLI.Query(Idx);
380 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
381 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
382 return false;
383
384 // Conservatively extend the stack slot range to the range of the original
385 // value. We may be able to do better with stack slot coloring by being more
386 // careful here.
387 assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast
<void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 387, __PRETTY_FUNCTION__))
;
388 LiveInterval &OrigLI = LIS.getInterval(Original);
389 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
390 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
391 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
392 << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
;
393
394 // We are going to spill SrcVNI immediately after its def, so clear out
395 // any later spills of the same value.
396 eliminateRedundantSpills(SrcLI, SrcVNI);
397
398 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
399 MachineBasicBlock::iterator MII;
400 if (SrcVNI->isPHIDef())
401 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
402 else {
403 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
404 assert(DefMI && "Defining instruction disappeared")((DefMI && "Defining instruction disappeared") ? static_cast
<void> (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 404, __PRETTY_FUNCTION__))
;
405 MII = DefMI;
406 ++MII;
407 }
408 // Insert spill without kill flag immediately after def.
409 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
410 MRI.getRegClass(SrcReg), &TRI);
411 --MII; // Point to store instruction.
412 LIS.InsertMachineInstrInMaps(*MII);
413 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\thoisted: " << SrcVNI
->def << '\t' << *MII; } } while (false)
;
414
415 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
416 ++NumSpills;
417 return true;
418}
419
420/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
421/// redundant spills of this value in SLI.reg and sibling copies.
422void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
423 assert(VNI && "Missing value")((VNI && "Missing value") ? static_cast<void> (
0) : __assert_fail ("VNI && \"Missing value\"", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 423, __PRETTY_FUNCTION__))
;
424 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
425 WorkList.push_back(std::make_pair(&SLI, VNI));
426 assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast
<void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 426, __PRETTY_FUNCTION__))
;
427
428 do {
429 LiveInterval *LI;
430 std::tie(LI, VNI) = WorkList.pop_back_val();
431 unsigned Reg = LI->reg;
432 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
433 << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
;
434
435 // Regs to spill are taken care of.
436 if (isRegToSpill(Reg))
437 continue;
438
439 // Add all of VNI's live range to StackInt.
440 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
441 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged to stack int: " <<
*StackInt << '\n'; } } while (false)
;
442
443 // Find all spills and copies of VNI.
444 for (MachineRegisterInfo::use_instr_nodbg_iterator
445 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
446 UI != E; ) {
447 MachineInstr &MI = *UI++;
448 if (!MI.isCopy() && !MI.mayStore())
449 continue;
450 SlotIndex Idx = LIS.getInstructionIndex(MI);
451 if (LI->getVNInfoAt(Idx) != VNI)
452 continue;
453
454 // Follow sibling copies down the dominator tree.
455 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
456 if (isSibling(DstReg)) {
457 LiveInterval &DstLI = LIS.getInterval(DstReg);
458 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
459 assert(DstVNI && "Missing defined value")((DstVNI && "Missing defined value") ? static_cast<
void> (0) : __assert_fail ("DstVNI && \"Missing defined value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 459, __PRETTY_FUNCTION__))
;
460 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")((DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"
) ? static_cast<void> (0) : __assert_fail ("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 460, __PRETTY_FUNCTION__))
;
461 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
462 }
463 continue;
464 }
465
466 // Erase spills.
467 int FI;
468 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
469 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Redundant spill " << Idx
<< '\t' << MI; } } while (false)
;
470 // eliminateDeadDefs won't normally remove stores, so switch opcode.
471 MI.setDesc(TII.get(TargetOpcode::KILL));
472 DeadDefs.push_back(&MI);
473 ++NumSpillsRemoved;
474 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
475 --NumSpills;
476 }
477 }
478 } while (!WorkList.empty());
479}
480
481//===----------------------------------------------------------------------===//
482// Rematerialization
483//===----------------------------------------------------------------------===//
484
485/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
486/// instruction cannot be eliminated. See through snippet copies
487void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
488 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
489 WorkList.push_back(std::make_pair(LI, VNI));
490 do {
491 std::tie(LI, VNI) = WorkList.pop_back_val();
492 if (!UsedValues.insert(VNI).second)
493 continue;
494
495 if (VNI->isPHIDef()) {
496 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
497 for (MachineBasicBlock *P : MBB->predecessors()) {
498 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
499 if (PVNI)
500 WorkList.push_back(std::make_pair(LI, PVNI));
501 }
502 continue;
503 }
504
505 // Follow snippet copies.
506 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
507 if (!SnippetCopies.count(MI))
508 continue;
509 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
510 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy")((isRegToSpill(SnipLI.reg) && "Unexpected register in copy"
) ? static_cast<void> (0) : __assert_fail ("isRegToSpill(SnipLI.reg) && \"Unexpected register in copy\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 510, __PRETTY_FUNCTION__))
;
511 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
512 assert(SnipVNI && "Snippet undefined before copy")((SnipVNI && "Snippet undefined before copy") ? static_cast
<void> (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 512, __PRETTY_FUNCTION__))
;
513 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
514 } while (!WorkList.empty());
515}
516
517/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
518bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
519 // Analyze instruction
520 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
521 MIBundleOperands::VirtRegInfo RI =
522 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
523
524 if (!RI.Reads)
525 return false;
526
527 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
528 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
529
530 if (!ParentVNI) {
531 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tadding <undef> flags: "
; } } while (false)
;
532 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
533 MachineOperand &MO = MI.getOperand(i);
534 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
535 MO.setIsUndef();
536 }
537 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << UseIdx << '\t' <<
MI; } } while (false)
;
538 return true;
539 }
540
541 if (SnippetCopies.count(&MI))
542 return false;
543
544 LiveInterval &OrigLI = LIS.getInterval(Original);
545 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
546 LiveRangeEdit::Remat RM(ParentVNI);
547 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
548
549 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
550 markValueUsed(&VirtReg, ParentVNI);
551 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
552 return false;
553 }
554
555 // If the instruction also writes VirtReg.reg, it had better not require the
556 // same register for uses and defs.
557 if (RI.Tied) {
558 markValueUsed(&VirtReg, ParentVNI);
559 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat tied reg: " <<
UseIdx << '\t' << MI; } } while (false)
;
560 return false;
561 }
562
563 // Before rematerializing into a register for a single instruction, try to
564 // fold a load into the instruction. That avoids allocating a new register.
565 if (RM.OrigMI->canFoldAsLoad() &&
566 foldMemoryOperand(Ops, RM.OrigMI)) {
567 Edit->markRematerialized(RM.ParentVNI);
568 ++NumFoldedLoads;
569 return true;
570 }
571
572 // Allocate a new register for the remat.
573 unsigned NewVReg = Edit->createFrom(Original);
574
575 // Finally we can rematerialize OrigMI before MI.
576 SlotIndex DefIdx =
577 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
578
579 // We take the DebugLoc from MI, since OrigMI may be attributed to a
580 // different source location.
581 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
582 NewMI->setDebugLoc(MI.getDebugLoc());
583
584 (void)DefIdx;
585 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
586 << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
;
587
588 // Replace operands
589 for (const auto &OpPair : Ops) {
590 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
591 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
592 MO.setReg(NewVReg);
593 MO.setIsKill();
594 }
595 }
596 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t " << UseIdx <<
'\t' << MI << '\n'; } } while (false)
;
597
598 ++NumRemats;
599 return true;
600}
601
602/// reMaterializeAll - Try to rematerialize as many uses as possible,
603/// and trim the live ranges after.
604void InlineSpiller::reMaterializeAll() {
605 if (!Edit->anyRematerializable(AA))
606 return;
607
608 UsedValues.clear();
609
610 // Try to remat before all uses of snippets.
611 bool anyRemat = false;
612 for (unsigned Reg : RegsToSpill) {
613 LiveInterval &LI = LIS.getInterval(Reg);
614 for (MachineRegisterInfo::reg_bundle_iterator
615 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
616 RegI != E; ) {
617 MachineInstr &MI = *RegI++;
618
619 // Debug values are not allowed to affect codegen.
620 if (MI.isDebugValue())
621 continue;
622
623 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "((!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 624, __PRETTY_FUNCTION__))
624 "instruction that isn't a DBG_VALUE")((!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 624, __PRETTY_FUNCTION__))
;
625
626 anyRemat |= reMaterializeFor(LI, MI);
627 }
628 }
629 if (!anyRemat)
630 return;
631
632 // Remove any values that were completely rematted.
633 for (unsigned Reg : RegsToSpill) {
634 LiveInterval &LI = LIS.getInterval(Reg);
635 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
636 I != E; ++I) {
637 VNInfo *VNI = *I;
638 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
639 continue;
640 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
641 MI->addRegisterDead(Reg, &TRI);
642 if (!MI->allDefsAreDead())
643 continue;
644 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "All defs dead: " << *MI
; } } while (false)
;
645 DeadDefs.push_back(MI);
646 }
647 }
648
649 // Eliminate dead code after remat. Note that some snippet copies may be
650 // deleted here.
651 if (DeadDefs.empty())
652 return;
653 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n"; } } while (false)
;
654 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
655
656 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
657 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
658 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
659 // removed, PHI VNI are still left in the LiveInterval.
660 // So to get rid of unused reg, we need to check whether it has non-dbg
661 // reference instead of whether it has non-empty interval.
662 unsigned ResultPos = 0;
663 for (unsigned Reg : RegsToSpill) {
664 if (MRI.reg_nodbg_empty(Reg)) {
665 Edit->eraseVirtReg(Reg);
666 continue;
667 }
668
669 assert(LIS.hasInterval(Reg) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 671, __PRETTY_FUNCTION__))
670 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 671, __PRETTY_FUNCTION__))
671 "Empty and not used live-range?!")((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 671, __PRETTY_FUNCTION__))
;
672
673 RegsToSpill[ResultPos++] = Reg;
674 }
675 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
676 LLVM_DEBUG(dbgs() << RegsToSpill.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
677 << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
;
678}
679
680//===----------------------------------------------------------------------===//
681// Spilling
682//===----------------------------------------------------------------------===//
683
684/// If MI is a load or store of StackSlot, it can be removed.
685bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
686 int FI = 0;
687 unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
688 bool IsLoad = InstrReg;
689 if (!IsLoad)
690 InstrReg = TII.isStoreToStackSlot(*MI, FI);
691
692 // We have a stack access. Is it the right register and slot?
693 if (InstrReg != Reg || FI != StackSlot)
694 return false;
695
696 if (!IsLoad)
697 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
698
699 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Coalescing stack access: " <<
*MI; } } while (false)
;
700 LIS.RemoveMachineInstrFromMaps(*MI);
701 MI->eraseFromParent();
702
703 if (IsLoad) {
704 ++NumReloadsRemoved;
705 --NumReloads;
706 } else {
707 ++NumSpillsRemoved;
708 --NumSpills;
709 }
710
711 return true;
712}
713
714#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
715LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
716// Dump the range of instructions from B to E with their slot indexes.
717static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
718 MachineBasicBlock::iterator E,
719 LiveIntervals const &LIS,
720 const char *const header,
721 unsigned VReg =0) {
722 char NextLine = '\n';
723 char SlotIndent = '\t';
724
725 if (std::next(B) == E) {
726 NextLine = ' ';
727 SlotIndent = ' ';
728 }
729
730 dbgs() << '\t' << header << ": " << NextLine;
731
732 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
733 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
734
735 // If a register was passed in and this instruction has it as a
736 // destination that is marked as an early clobber, print the
737 // early-clobber slot index.
738 if (VReg) {
739 MachineOperand *MO = I->findRegisterDefOperand(VReg);
740 if (MO && MO->isEarlyClobber())
741 Idx = Idx.getRegSlot(true);
742 }
743
744 dbgs() << SlotIndent << Idx << '\t' << *I;
745 }
746}
747#endif
748
749/// foldMemoryOperand - Try folding stack slot references in Ops into their
750/// instructions.
751///
752/// @param Ops Operand indices from analyzeVirtReg().
753/// @param LoadMI Load instruction to use instead of stack slot when non-null.
754/// @return True on success.
755bool InlineSpiller::
756foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
757 MachineInstr *LoadMI) {
758 if (Ops.empty())
759 return false;
760 // Don't attempt folding in bundles.
761 MachineInstr *MI = Ops.front().first;
762 if (Ops.back().first != MI || MI->isBundled())
763 return false;
764
765 bool WasCopy = MI->isCopy();
766 unsigned ImpReg = 0;
767
768 // Spill subregs if the target allows it.
769 // We always want to spill subregs for stackmap/patchpoint pseudos.
770 bool SpillSubRegs = TII.isSubregFoldable() ||
771 MI->getOpcode() == TargetOpcode::STATEPOINT ||
772 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
773 MI->getOpcode() == TargetOpcode::STACKMAP;
774
775 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
776 // operands.
777 SmallVector<unsigned, 8> FoldOps;
778 for (const auto &OpPair : Ops) {
779 unsigned Idx = OpPair.second;
780 assert(MI == OpPair.first && "Instruction conflict during operand folding")((MI == OpPair.first && "Instruction conflict during operand folding"
) ? static_cast<void> (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 780, __PRETTY_FUNCTION__))
;
781 MachineOperand &MO = MI->getOperand(Idx);
782 if (MO.isImplicit()) {
783 ImpReg = MO.getReg();
784 continue;
785 }
786
787 if (!SpillSubRegs && MO.getSubReg())
788 return false;
789 // We cannot fold a load instruction into a def.
790 if (LoadMI && MO.isDef())
791 return false;
792 // Tied use operands should not be passed to foldMemoryOperand.
793 if (!MI->isRegTiedToDefOperand(Idx))
794 FoldOps.push_back(Idx);
795 }
796
797 // If we only have implicit uses, we won't be able to fold that.
798 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
799 if (FoldOps.empty())
800 return false;
801
802 MachineInstrSpan MIS(MI);
803
804 MachineInstr *FoldMI =
805 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
806 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
807 if (!FoldMI)
808 return false;
809
810 // Remove LIS for any dead defs in the original MI not in FoldMI.
811 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
812 if (!MO->isReg())
813 continue;
814 unsigned Reg = MO->getReg();
815 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
816 MRI.isReserved(Reg)) {
817 continue;
818 }
819 // Skip non-Defs, including undef uses and internal reads.
820 if (MO->isUse())
821 continue;
822 MIBundleOperands::PhysRegInfo RI =
823 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
824 if (RI.FullyDefined)
825 continue;
826 // FoldMI does not define this physreg. Remove the LI segment.
827 assert(MO->isDead() && "Cannot fold physreg def")((MO->isDead() && "Cannot fold physreg def") ? static_cast
<void> (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 827, __PRETTY_FUNCTION__))
;
828 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
829 LIS.removePhysRegDefAt(Reg, Idx);
830 }
831
832 int FI;
833 if (TII.isStoreToStackSlot(*MI, FI) &&
834 HSpiller.rmFromMergeableSpills(*MI, FI))
835 --NumSpills;
836 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
837 MI->eraseFromParent();
838
839 // Insert any new instructions other than FoldMI into the LIS maps.
840 assert(!MIS.empty() && "Unexpected empty span of instructions!")((!MIS.empty() && "Unexpected empty span of instructions!"
) ? static_cast<void> (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 840, __PRETTY_FUNCTION__))
;
841 for (MachineInstr &MI : MIS)
842 if (&MI != FoldMI)
843 LIS.InsertMachineInstrInMaps(MI);
844
845 // TII.foldMemoryOperand may have left some implicit operands on the
846 // instruction. Strip them.
847 if (ImpReg)
848 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
849 MachineOperand &MO = FoldMI->getOperand(i - 1);
850 if (!MO.isReg() || !MO.isImplicit())
851 break;
852 if (MO.getReg() == ImpReg)
853 FoldMI->RemoveOperand(i - 1);
854 }
855
856 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
857 "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
;
858
859 if (!WasCopy)
860 ++NumFolded;
861 else if (Ops.front().second == 0) {
862 ++NumSpills;
863 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
864 } else
865 ++NumReloads;
866 return true;
867}
868
869void InlineSpiller::insertReload(unsigned NewVReg,
870 SlotIndex Idx,
871 MachineBasicBlock::iterator MI) {
872 MachineBasicBlock &MBB = *MI->getParent();
873
874 MachineInstrSpan MIS(MI);
875 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
876 MRI.getRegClass(NewVReg), &TRI);
877
878 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
879
880 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
881 NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
;
882 ++NumReloads;
883}
884
885/// Check if \p Def fully defines a VReg with an undefined value.
886/// If that's the case, that means the value of VReg is actually
887/// not relevant.
888static bool isFullUndefDef(const MachineInstr &Def) {
889 if (!Def.isImplicitDef())
890 return false;
891 assert(Def.getNumOperands() == 1 &&((Def.getNumOperands() == 1 && "Implicit def with more than one definition"
) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 892, __PRETTY_FUNCTION__))
892 "Implicit def with more than one definition")((Def.getNumOperands() == 1 && "Implicit def with more than one definition"
) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 892, __PRETTY_FUNCTION__))
;
893 // We can say that the VReg defined by Def is undef, only if it is
894 // fully defined by Def. Otherwise, some of the lanes may not be
895 // undef and the value of the VReg matters.
896 return !Def.getOperand(0).getSubReg();
897}
898
899/// insertSpill - Insert a spill of NewVReg after MI.
900void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
901 MachineBasicBlock::iterator MI) {
902 MachineBasicBlock &MBB = *MI->getParent();
903
904 MachineInstrSpan MIS(MI);
905 bool IsRealSpill = true;
906 if (isFullUndefDef(*MI)) {
907 // Don't spill undef value.
908 // Anything works for undef, in particular keeping the memory
909 // uninitialized is a viable option and it saves code size and
910 // run time.
911 BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
912 .addReg(NewVReg, getKillRegState(isKill));
913 IsRealSpill = false;
914 } else
915 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
916 MRI.getRegClass(NewVReg), &TRI);
917
918 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
919
920 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
921 "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
;
922 ++NumSpills;
923 if (IsRealSpill)
924 HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
925}
926
927/// spillAroundUses - insert spill code around each use of Reg.
928void InlineSpiller::spillAroundUses(unsigned Reg) {
929 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "spillAroundUses " << printReg
(Reg) << '\n'; } } while (false)
;
930 LiveInterval &OldLI = LIS.getInterval(Reg);
931
932 // Iterate over instructions using Reg.
933 for (MachineRegisterInfo::reg_bundle_iterator
934 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
935 RegI != E; ) {
936 MachineInstr *MI = &*(RegI++);
937
938 // Debug values are not allowed to affect codegen.
939 if (MI->isDebugValue()) {
940 // Modify DBG_VALUE now that the value is in a spill slot.
941 MachineBasicBlock *MBB = MI->getParent();
942 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Modifying debug info due to spill:\t"
<< *MI; } } while (false)
;
943 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
944 MBB->erase(MI);
945 continue;
946 }
947
948 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "((!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 949, __PRETTY_FUNCTION__))
949 "instruction that isn't a DBG_VALUE")((!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 949, __PRETTY_FUNCTION__))
;
950
951 // Ignore copies to/from snippets. We'll delete them.
952 if (SnippetCopies.count(MI))
953 continue;
954
955 // Stack slot accesses may coalesce away.
956 if (coalesceStackAccess(MI, Reg))
957 continue;
958
959 // Analyze instruction.
960 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
961 MIBundleOperands::VirtRegInfo RI =
962 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
963
964 // Find the slot index where this instruction reads and writes OldLI.
965 // This is usually the def slot, except for tied early clobbers.
966 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
967 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
968 if (SlotIndex::isSameInstr(Idx, VNI->def))
969 Idx = VNI->def;
970
971 // Check for a sibling copy.
972 unsigned SibReg = isFullCopyOf(*MI, Reg);
973 if (SibReg && isSibling(SibReg)) {
974 // This may actually be a copy between snippets.
975 if (isRegToSpill(SibReg)) {
976 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Found new snippet copy: " <<
*MI; } } while (false)
;
977 SnippetCopies.insert(MI);
978 continue;
979 }
980 if (RI.Writes) {
981 if (hoistSpillInsideBB(OldLI, *MI)) {
982 // This COPY is now dead, the value is already in the stack slot.
983 MI->getOperand(0).setIsDead();
984 DeadDefs.push_back(MI);
985 continue;
986 }
987 } else {
988 // This is a reload for a sib-reg copy. Drop spills downstream.
989 LiveInterval &SibLI = LIS.getInterval(SibReg);
990 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
991 // The COPY will fold to a reload below.
992 }
993 }
994
995 // Attempt to fold memory ops.
996 if (foldMemoryOperand(Ops))
997 continue;
998
999 // Create a new virtual register for spill/fill.
1000 // FIXME: Infer regclass from instruction alone.
1001 unsigned NewVReg = Edit->createFrom(Reg);
1002
1003 if (RI.Reads)
1004 insertReload(NewVReg, Idx, MI);
1005
1006 // Rewrite instruction operands.
1007 bool hasLiveDef = false;
1008 for (const auto &OpPair : Ops) {
1009 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1010 MO.setReg(NewVReg);
1011 if (MO.isUse()) {
1012 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1013 MO.setIsKill();
1014 } else {
1015 if (!MO.isDead())
1016 hasLiveDef = true;
1017 }
1018 }
1019 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\trewrite: " << Idx <<
'\t' << *MI << '\n'; } } while (false)
;
1020
1021 // FIXME: Use a second vreg if instruction has no tied ops.
1022 if (RI.Writes)
1023 if (hasLiveDef)
1024 insertSpill(NewVReg, true, MI);
1025 }
1026}
1027
1028/// spillAll - Spill all registers remaining after rematerialization.
1029void InlineSpiller::spillAll() {
1030 // Update LiveStacks now that we are committed to spilling.
1031 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1032 StackSlot = VRM.assignVirt2StackSlot(Original);
1033 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1034 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1035 } else
1036 StackInt = &LSS.getInterval(StackSlot);
1037
1038 if (Original != Edit->getReg())
1039 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1040
1041 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")((StackInt->getNumValNums() == 1 && "Bad stack interval values"
) ? static_cast<void> (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1041, __PRETTY_FUNCTION__))
;
1042 for (unsigned Reg : RegsToSpill)
1043 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1044 StackInt->getValNumInfo(0));
1045 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged spilled regs: " <<
*StackInt << '\n'; } } while (false)
;
1046
1047 // Spill around uses of all RegsToSpill.
1048 for (unsigned Reg : RegsToSpill)
1049 spillAroundUses(Reg);
1050
1051 // Hoisted spills may cause dead code.
1052 if (!DeadDefs.empty()) {
1053 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n"; } } while (false)
;
1054 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1055 }
1056
1057 // Finally delete the SnippetCopies.
1058 for (unsigned Reg : RegsToSpill) {
1059 for (MachineRegisterInfo::reg_instr_iterator
1060 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1061 RI != E; ) {
1062 MachineInstr &MI = *(RI++);
1063 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")((SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"
) ? static_cast<void> (0) : __assert_fail ("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1063, __PRETTY_FUNCTION__))
;
1064 // FIXME: Do this with a LiveRangeEdit callback.
1065 LIS.RemoveMachineInstrFromMaps(MI);
1066 MI.eraseFromParent();
1067 }
1068 }
1069
1070 // Delete all spilled registers.
1071 for (unsigned Reg : RegsToSpill)
1072 Edit->eraseVirtReg(Reg);
1073}
1074
1075void InlineSpiller::spill(LiveRangeEdit &edit) {
1076 ++NumSpilledRanges;
1077 Edit = &edit;
1078 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())((!TargetRegisterInfo::isStackSlot(edit.getReg()) && "Trying to spill a stack slot."
) ? static_cast<void> (0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1079, __PRETTY_FUNCTION__))
1079 && "Trying to spill a stack slot.")((!TargetRegisterInfo::isStackSlot(edit.getReg()) && "Trying to spill a stack slot."
) ? static_cast<void> (0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1079, __PRETTY_FUNCTION__))
;
1080 // Share a stack slot among all descendants of Original.
1081 Original = VRM.getOriginal(edit.getReg());
1082 StackSlot = VRM.getStackSlot(Original);
1083 StackInt = nullptr;
1084
1085 LLVM_DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1086 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1087 << ':' << edit.getParent() << "\nFrom original "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1088 << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
;
1089 assert(edit.getParent().isSpillable() &&((edit.getParent().isSpillable() && "Attempting to spill already spilled value."
) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1090, __PRETTY_FUNCTION__))
1090 "Attempting to spill already spilled value.")((edit.getParent().isSpillable() && "Attempting to spill already spilled value."
) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1090, __PRETTY_FUNCTION__))
;
1091 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")((DeadDefs.empty() && "Previous spill didn't remove dead defs"
) ? static_cast<void> (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1091, __PRETTY_FUNCTION__))
;
1092
1093 collectRegsToSpill();
1094 reMaterializeAll();
1095
1096 // Remat may handle everything.
1097 if (!RegsToSpill.empty())
1098 spillAll();
1099
1100 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1101}
1102
1103/// Optimizations after all the reg selections and spills are done.
1104void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1
Calling 'HoistSpillHelper::hoistAllSpills'
1105
1106/// When a spill is inserted, add the spill to MergeableSpills map.
1107void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1108 unsigned Original) {
1109 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1110 LiveInterval &OrigLI = LIS.getInterval(Original);
1111 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1112 // LiveInterval may be cleared after all its references are spilled.
1113 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1114 auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1115 LI->assign(OrigLI, Allocator);
1116 StackSlotToOrigLI[StackSlot] = std::move(LI);
1117 }
1118 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1119 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1120 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1121 MergeableSpills[MIdx].insert(&Spill);
1122}
1123
1124/// When a spill is removed, remove the spill from MergeableSpills map.
1125/// Return true if the spill is removed successfully.
1126bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1127 int StackSlot) {
1128 auto It = StackSlotToOrigLI.find(StackSlot);
1129 if (It == StackSlotToOrigLI.end())
1130 return false;
1131 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1132 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1133 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1134 return MergeableSpills[MIdx].erase(&Spill);
1135}
1136
1137/// Check BB to see if it is a possible target BB to place a hoisted spill,
1138/// i.e., there should be a living sibling of OrigReg at the insert point.
1139bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1140 MachineBasicBlock &BB, unsigned &LiveReg) {
1141 SlotIndex Idx;
1142 unsigned OrigReg = OrigLI.reg;
1143 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1144 if (MI != BB.end())
1145 Idx = LIS.getInstructionIndex(*MI);
1146 else
1147 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1148 SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1149 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")((OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"
) ? static_cast<void> (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1149, __PRETTY_FUNCTION__))
;
1150
1151 for (auto const SibReg : Siblings) {
1152 LiveInterval &LI = LIS.getInterval(SibReg);
1153 VNInfo *VNI = LI.getVNInfoAt(Idx);
1154 if (VNI) {
1155 LiveReg = SibReg;
1156 return true;
1157 }
1158 }
1159 return false;
1160}
1161
1162/// Remove redundant spills in the same BB. Save those redundant spills in
1163/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1164void HoistSpillHelper::rmRedundantSpills(
1165 SmallPtrSet<MachineInstr *, 16> &Spills,
1166 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1167 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1168 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1169 // another spill inside. If a BB contains more than one spill, only keep the
1170 // earlier spill with smaller SlotIndex.
1171 for (const auto CurrentSpill : Spills) {
1172 MachineBasicBlock *Block = CurrentSpill->getParent();
1173 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1174 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1175 if (PrevSpill) {
1176 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1177 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1178 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1179 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1180 SpillsToRm.push_back(SpillToRm);
1181 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1182 } else {
1183 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1184 }
1185 }
1186 for (const auto SpillToRm : SpillsToRm)
1187 Spills.erase(SpillToRm);
1188}
1189
1190/// Starting from \p Root find a top-down traversal order of the dominator
1191/// tree to visit all basic blocks containing the elements of \p Spills.
1192/// Redundant spills will be found and put into \p SpillsToRm at the same
1193/// time. \p SpillBBToSpill will be populated as part of the process and
1194/// maps a basic block to the first store occurring in the basic block.
1195/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1196void HoistSpillHelper::getVisitOrders(
1197 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1198 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1199 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1200 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1201 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1202 // The set contains all the possible BB nodes to which we may hoist
1203 // original spills.
1204 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1205 // Save the BB nodes on the path from the first BB node containing
1206 // non-redundant spill to the Root node.
1207 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1208 // All the spills to be hoisted must originate from a single def instruction
1209 // to the OrigReg. It means the def instruction should dominate all the spills
1210 // to be hoisted. We choose the BB where the def instruction is located as
1211 // the Root.
1212 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1213 // For every node on the dominator tree with spill, walk up on the dominator
1214 // tree towards the Root node until it is reached. If there is other node
1215 // containing spill in the middle of the path, the previous spill saw will
1216 // be redundant and the node containing it will be removed. All the nodes on
1217 // the path starting from the first node with non-redundant spill to the Root
1218 // node will be added to the WorkSet, which will contain all the possible
1219 // locations where spills may be hoisted to after the loop below is done.
1220 for (const auto Spill : Spills) {
1221 MachineBasicBlock *Block = Spill->getParent();
1222 MachineDomTreeNode *Node = MDT[Block];
7
Calling 'MachineDominatorTree::operator[]'
31
Returning from 'MachineDominatorTree::operator[]'
32
'Node' initialized here
1223 MachineInstr *SpillToRm = nullptr;
1224 while (Node != RootIDomNode) {
33
Assuming 'Node' is not equal to 'RootIDomNode'
34
Loop condition is true. Entering loop body
1225 // If Node dominates Block, and it already contains a spill, the spill in
1226 // Block will be redundant.
1227 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
35
Assuming the condition is false
36
Assuming pointer value is null
1228 SpillToRm = SpillBBToSpill[MDT[Block]];
1229 break;
1230 /// If we see the Node already in WorkSet, the path from the Node to
1231 /// the Root node must already be traversed by another spill.
1232 /// Then no need to repeat.
1233 } else if (WorkSet.count(Node)) {
37
Assuming the condition is false
38
Taking false branch
1234 break;
1235 } else {
1236 NodesOnPath.insert(Node);
1237 }
1238 Node = Node->getIDom();
39
Called C++ object pointer is null
1239 }
1240 if (SpillToRm) {
1241 SpillsToRm.push_back(SpillToRm);
1242 } else {
1243 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1244 // set the initial status before hoisting start. The value of BBs
1245 // containing original spills is set to 0, in order to descriminate
1246 // with BBs containing hoisted spills which will be inserted to
1247 // SpillsToKeep later during hoisting.
1248 SpillsToKeep[MDT[Block]] = 0;
1249 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1250 }
1251 NodesOnPath.clear();
1252 }
1253
1254 // Sort the nodes in WorkSet in top-down order and save the nodes
1255 // in Orders. Orders will be used for hoisting in runHoistSpills.
1256 unsigned idx = 0;
1257 Orders.push_back(MDT.getBase().getNode(Root));
1258 do {
1259 MachineDomTreeNode *Node = Orders[idx++];
1260 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1261 unsigned NumChildren = Children.size();
1262 for (unsigned i = 0; i != NumChildren; ++i) {
1263 MachineDomTreeNode *Child = Children[i];
1264 if (WorkSet.count(Child))
1265 Orders.push_back(Child);
1266 }
1267 } while (idx != Orders.size());
1268 assert(Orders.size() == WorkSet.size() &&((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet"
) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1269, __PRETTY_FUNCTION__))
1269 "Orders have different size with WorkSet")((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet"
) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1269, __PRETTY_FUNCTION__))
;
1270
1271#ifndef NDEBUG
1272 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Orders size is " << Orders
.size() << "\n"; } } while (false)
;
1273 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1274 for (; RIt != Orders.rend(); RIt++)
1275 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock
()->getNumber() << ","; } } while (false)
;
1276 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\n"; } } while (false)
;
1277#endif
1278}
1279
1280/// Try to hoist spills according to BB hotness. The spills to removed will
1281/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1282/// \p SpillsToIns.
1283void HoistSpillHelper::runHoistSpills(
1284 LiveInterval &OrigLI, VNInfo &OrigVNI,
1285 SmallPtrSet<MachineInstr *, 16> &Spills,
1286 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1287 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1288 // Visit order of dominator tree nodes.
1289 SmallVector<MachineDomTreeNode *, 32> Orders;
1290 // SpillsToKeep contains all the nodes where spills are to be inserted
1291 // during hoisting. If the spill to be inserted is an original spill
1292 // (not a hoisted one), the value of the map entry is 0. If the spill
1293 // is a hoisted spill, the value of the map entry is the VReg to be used
1294 // as the source of the spill.
1295 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1296 // Map from BB to the first spill inside of it.
1297 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1298
1299 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1300
1301 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1302 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
6
Calling 'HoistSpillHelper::getVisitOrders'
1303 SpillBBToSpill);
1304
1305 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1306 // nodes set and the cost of all the spills inside those nodes.
1307 // The nodes set are the locations where spills are to be inserted
1308 // in the subtree of current node.
1309 using NodesCostPair =
1310 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1311 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1312
1313 // Iterate Orders set in reverse order, which will be a bottom-up order
1314 // in the dominator tree. Once we visit a dom tree node, we know its
1315 // children have already been visited and the spill locations in the
1316 // subtrees of all the children have been determined.
1317 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1318 for (; RIt != Orders.rend(); RIt++) {
1319 MachineBasicBlock *Block = (*RIt)->getBlock();
1320
1321 // If Block contains an original spill, simply continue.
1322 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1323 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1324 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1325 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1326 continue;
1327 }
1328
1329 // Collect spills in subtree of current node (*RIt) to
1330 // SpillsInSubTreeMap[*RIt].first.
1331 const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1332 unsigned NumChildren = Children.size();
1333 for (unsigned i = 0; i != NumChildren; ++i) {
1334 MachineDomTreeNode *Child = Children[i];
1335 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1336 continue;
1337 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1338 // should be placed before getting the begin and end iterators of
1339 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1340 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1341 // and the map grows and then the original buckets in the map are moved.
1342 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1343 SpillsInSubTreeMap[*RIt].first;
1344 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1345 SubTreeCost += SpillsInSubTreeMap[Child].second;
1346 auto BI = SpillsInSubTreeMap[Child].first.begin();
1347 auto EI = SpillsInSubTreeMap[Child].first.end();
1348 SpillsInSubTree.insert(BI, EI);
1349 SpillsInSubTreeMap.erase(Child);
1350 }
1351
1352 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1353 SpillsInSubTreeMap[*RIt].first;
1354 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1355 // No spills in subtree, simply continue.
1356 if (SpillsInSubTree.empty())
1357 continue;
1358
1359 // Check whether Block is a possible candidate to insert spill.
1360 unsigned LiveReg = 0;
1361 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1362 continue;
1363
1364 // If there are multiple spills that could be merged, bias a little
1365 // to hoist the spill.
1366 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1367 ? BranchProbability(9, 10)
1368 : BranchProbability(1, 1);
1369 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1370 // Hoist: Move spills to current Block.
1371 for (const auto SpillBB : SpillsInSubTree) {
1372 // When SpillBB is a BB contains original spill, insert the spill
1373 // to SpillsToRm.
1374 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1375 !SpillsToKeep[SpillBB]) {
1376 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1377 SpillsToRm.push_back(SpillToRm);
1378 }
1379 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1380 SpillsToKeep.erase(SpillBB);
1381 }
1382 // Current Block is the BB containing the new hoisted spill. Add it to
1383 // SpillsToKeep. LiveReg is the source of the new spill.
1384 SpillsToKeep[*RIt] = LiveReg;
1385 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1386 dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1387 for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1388 dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1389 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1390 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1391 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
;
1392 SpillsInSubTree.clear();
1393 SpillsInSubTree.insert(*RIt);
1394 SubTreeCost = MBFI.getBlockFreq(Block);
1395 }
1396 }
1397 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1398 // save them to SpillsToIns.
1399 for (const auto Ent : SpillsToKeep) {
1400 if (Ent.second)
1401 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1402 }
1403}
1404
1405/// For spills with equal values, remove redundant spills and hoist those left
1406/// to less hot spots.
1407///
1408/// Spills with equal values will be collected into the same set in
1409/// MergeableSpills when spill is inserted. These equal spills are originated
1410/// from the same defining instruction and are dominated by the instruction.
1411/// Before hoisting all the equal spills, redundant spills inside in the same
1412/// BB are first marked to be deleted. Then starting from the spills left, walk
1413/// up on the dominator tree towards the Root node where the define instruction
1414/// is located, mark the dominated spills to be deleted along the way and
1415/// collect the BB nodes on the path from non-dominated spills to the define
1416/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1417/// where we are considering to hoist the spills. We iterate the WorkSet in
1418/// bottom-up order, and for each node, we will decide whether to hoist spills
1419/// inside its subtree to that node. In this way, we can get benefit locally
1420/// even if hoisting all the equal spills to one cold place is impossible.
1421void HoistSpillHelper::hoistAllSpills() {
1422 SmallVector<unsigned, 4> NewVRegs;
1423 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1424
1425 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
2
Assuming 'i' is equal to 'e'
3
Loop condition is false. Execution continues on line 1433
1426 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1427 unsigned Original = VRM.getPreSplitReg(Reg);
1428 if (!MRI.def_empty(Reg))
1429 Virt2SiblingsMap[Original].insert(Reg);
1430 }
1431
1432 // Each entry in MergeableSpills contains a spill set with equal values.
1433 for (auto &Ent : MergeableSpills) {
1434 int Slot = Ent.first.first;
1435 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1436 VNInfo *OrigVNI = Ent.first.second;
1437 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1438 if (Ent.second.empty())
4
Taking false branch
1439 continue;
1440
1441 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1442 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1443 << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1444 for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1445 dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1446 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1447 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
;
1448
1449 // SpillsToRm is the spill set to be removed from EqValSpills.
1450 SmallVector<MachineInstr *, 16> SpillsToRm;
1451 // SpillsToIns is the spill set to be newly inserted after hoisting.
1452 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1453
1454 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
5
Calling 'HoistSpillHelper::runHoistSpills'
1455
1456 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1457 dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1458 for (const auto Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1459 dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1460 dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1461 for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1462 dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1463 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1464 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
;
1465
1466 // Stack live range update.
1467 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1468 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1469 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1470 StackIntvl.getValNumInfo(0));
1471
1472 // Insert hoisted spills.
1473 for (auto const Insert : SpillsToIns) {
1474 MachineBasicBlock *BB = Insert.first;
1475 unsigned LiveReg = Insert.second;
1476 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1477 TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1478 MRI.getRegClass(LiveReg), &TRI);
1479 LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1480 ++NumSpills;
1481 }
1482
1483 // Remove redundant spills or change them to dead instructions.
1484 NumSpills -= SpillsToRm.size();
1485 for (auto const RMEnt : SpillsToRm) {
1486 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1487 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1488 MachineOperand &MO = RMEnt->getOperand(i - 1);
1489 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1490 RMEnt->RemoveOperand(i - 1);
1491 }
1492 }
1493 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1494 }
1495}
1496
1497/// For VirtReg clone, the \p New register should have the same physreg or
1498/// stackslot as the \p old register.
1499void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1500 if (VRM.hasPhys(Old))
1501 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1502 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1503 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1504 else
1505 llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/InlineSpiller.cpp"
, 1505)
;
1506}

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/MachineDominators.h

1//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
11// but for target-specific code rather than target-independent IR.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
16#define LLVM_CODEGEN_MACHINEDOMINATORS_H
17
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/CodeGen/MachineBasicBlock.h"
21#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/Support/GenericDomTree.h"
24#include "llvm/Support/GenericDomTreeConstruction.h"
25#include <cassert>
26#include <memory>
27#include <vector>
28
29namespace llvm {
30
31template <>
32inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
33 MachineBasicBlock *MBB) {
34 this->Roots.push_back(MBB);
35}
36
37extern template class DomTreeNodeBase<MachineBasicBlock>;
38extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
39extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
40
41using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
42
43//===-------------------------------------
44/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
45/// compute a normal dominator tree.
46///
47class MachineDominatorTree : public MachineFunctionPass {
48 /// Helper structure used to hold all the basic blocks
49 /// involved in the split of a critical edge.
50 struct CriticalEdge {
51 MachineBasicBlock *FromBB;
52 MachineBasicBlock *ToBB;
53 MachineBasicBlock *NewBB;
54 };
55
56 /// Pile up all the critical edges to be split.
57 /// The splitting of a critical edge is local and thus, it is possible
58 /// to apply several of those changes at the same time.
59 mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
60
61 /// Remember all the basic blocks that are inserted during
62 /// edge splitting.
63 /// Invariant: NewBBs == all the basic blocks contained in the NewBB
64 /// field of all the elements of CriticalEdgesToSplit.
65 /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
66 /// such as BB == elt.NewBB.
67 mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
68
69 /// The DominatorTreeBase that is used to compute a normal dominator tree
70 std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
71
72 /// Apply all the recorded critical edges to the DT.
73 /// This updates the underlying DT information in a way that uses
74 /// the fast query path of DT as much as possible.
75 ///
76 /// \post CriticalEdgesToSplit.empty().
77 void applySplitCriticalEdges() const;
78
79public:
80 static char ID; // Pass ID, replacement for typeid
81
82 MachineDominatorTree();
83
84 DomTreeBase<MachineBasicBlock> &getBase() {
85 if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
86 applySplitCriticalEdges();
87 return *DT;
88 }
89
90 void getAnalysisUsage(AnalysisUsage &AU) const override;
91
92 /// getRoots - Return the root blocks of the current CFG. This may include
93 /// multiple blocks if we are computing post dominators. For forward
94 /// dominators, this will always be a single block (the entry node).
95 ///
96 inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
97 applySplitCriticalEdges();
98 return DT->getRoots();
99 }
100
101 inline MachineBasicBlock *getRoot() const {
102 applySplitCriticalEdges();
103 return DT->getRoot();
104 }
105
106 inline MachineDomTreeNode *getRootNode() const {
107 applySplitCriticalEdges();
108 return DT->getRootNode();
109 }
110
111 bool runOnMachineFunction(MachineFunction &F) override;
112
113 inline bool dominates(const MachineDomTreeNode* A,
114 const MachineDomTreeNode* B) const {
115 applySplitCriticalEdges();
116 return DT->dominates(A, B);
117 }
118
119 inline bool dominates(const MachineBasicBlock* A,
120 const MachineBasicBlock* B) const {
121 applySplitCriticalEdges();
122 return DT->dominates(A, B);
123 }
124
125 // dominates - Return true if A dominates B. This performs the
126 // special checks necessary if A and B are in the same basic block.
127 bool dominates(const MachineInstr *A, const MachineInstr *B) const {
128 applySplitCriticalEdges();
129 const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
130 if (BBA != BBB) return DT->dominates(BBA, BBB);
131
132 // Loop through the basic block until we find A or B.
133 MachineBasicBlock::const_iterator I = BBA->begin();
134 for (; &*I != A && &*I != B; ++I)
135 /*empty*/ ;
136
137 //if(!DT.IsPostDominators) {
138 // A dominates B if it is found first in the basic block.
139 return &*I == A;
140 //} else {
141 // // A post-dominates B if B is found first in the basic block.
142 // return &*I == B;
143 //}
144 }
145
146 inline bool properlyDominates(const MachineDomTreeNode* A,
147 const MachineDomTreeNode* B) const {
148 applySplitCriticalEdges();
149 return DT->properlyDominates(A, B);
150 }
151
152 inline bool properlyDominates(const MachineBasicBlock* A,
153 const MachineBasicBlock* B) const {
154 applySplitCriticalEdges();
155 return DT->properlyDominates(A, B);
156 }
157
158 /// findNearestCommonDominator - Find nearest common dominator basic block
159 /// for basic block A and B. If there is no such block then return NULL.
160 inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
161 MachineBasicBlock *B) {
162 applySplitCriticalEdges();
163 return DT->findNearestCommonDominator(A, B);
164 }
165
166 inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
167 applySplitCriticalEdges();
168 return DT->getNode(BB);
8
Calling 'DominatorTreeBase::getNode'
29
Returning from 'DominatorTreeBase::getNode'
30
Returning pointer
169 }
170
171 /// getNode - return the (Post)DominatorTree node for the specified basic
172 /// block. This is the same as using operator[] on this class.
173 ///
174 inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
175 applySplitCriticalEdges();
176 return DT->getNode(BB);
177 }
178
179 /// addNewBlock - Add a new node to the dominator tree information. This
180 /// creates a new node as a child of DomBB dominator node,linking it into
181 /// the children list of the immediate dominator.
182 inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
183 MachineBasicBlock *DomBB) {
184 applySplitCriticalEdges();
185 return DT->addNewBlock(BB, DomBB);
186 }
187
188 /// changeImmediateDominator - This method is used to update the dominator
189 /// tree information when a node's immediate dominator changes.
190 ///
191 inline void changeImmediateDominator(MachineBasicBlock *N,
192 MachineBasicBlock* NewIDom) {
193 applySplitCriticalEdges();
194 DT->changeImmediateDominator(N, NewIDom);
195 }
196
197 inline void changeImmediateDominator(MachineDomTreeNode *N,
198 MachineDomTreeNode* NewIDom) {
199 applySplitCriticalEdges();
200 DT->changeImmediateDominator(N, NewIDom);
201 }
202
203 /// eraseNode - Removes a node from the dominator tree. Block must not
204 /// dominate any other blocks. Removes node from its immediate dominator's
205 /// children list. Deletes dominator node associated with basic block BB.
206 inline void eraseNode(MachineBasicBlock *BB) {
207 applySplitCriticalEdges();
208 DT->eraseNode(BB);
209 }
210
211 /// splitBlock - BB is split and now it has one successor. Update dominator
212 /// tree to reflect this change.
213 inline void splitBlock(MachineBasicBlock* NewBB) {
214 applySplitCriticalEdges();
215 DT->splitBlock(NewBB);
216 }
217
218 /// isReachableFromEntry - Return true if A is dominated by the entry
219 /// block of the function containing it.
220 bool isReachableFromEntry(const MachineBasicBlock *A) {
221 applySplitCriticalEdges();
222 return DT->isReachableFromEntry(A);
223 }
224
225 void releaseMemory() override;
226
227 void verifyAnalysis() const override;
228
229 void print(raw_ostream &OS, const Module*) const override;
230
231 /// Record that the critical edge (FromBB, ToBB) has been
232 /// split with NewBB.
233 /// This is best to use this method instead of directly update the
234 /// underlying information, because this helps mitigating the
235 /// number of time the DT information is invalidated.
236 ///
237 /// \note Do not use this method with regular edges.
238 ///
239 /// \note To benefit from the compile time improvement incurred by this
240 /// method, the users of this method have to limit the queries to the DT
241 /// interface between two edges splitting. In other words, they have to
242 /// pack the splitting of critical edges as much as possible.
243 void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
244 MachineBasicBlock *ToBB,
245 MachineBasicBlock *NewBB) {
246 bool Inserted = NewBBs.insert(NewBB).second;
247 (void)Inserted;
248 assert(Inserted &&((Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? static_cast<void> (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/MachineDominators.h"
, 249, __PRETTY_FUNCTION__))
249 "A basic block inserted via edge splitting cannot appear twice")((Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? static_cast<void> (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/MachineDominators.h"
, 249, __PRETTY_FUNCTION__))
;
250 CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
251 }
252};
253
254//===-------------------------------------
255/// DominatorTree GraphTraits specialization so the DominatorTree can be
256/// iterable by generic graph iterators.
257///
258
259template <class Node, class ChildIterator>
260struct MachineDomTreeGraphTraitsBase {
261 using NodeRef = Node *;
262 using ChildIteratorType = ChildIterator;
263
264 static NodeRef getEntryNode(NodeRef N) { return N; }
265 static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
266 static ChildIteratorType child_end(NodeRef N) { return N->end(); }
267};
268
269template <class T> struct GraphTraits;
270
271template <>
272struct GraphTraits<MachineDomTreeNode *>
273 : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
274 MachineDomTreeNode::iterator> {};
275
276template <>
277struct GraphTraits<const MachineDomTreeNode *>
278 : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
279 MachineDomTreeNode::const_iterator> {
280};
281
282template <> struct GraphTraits<MachineDominatorTree*>
283 : public GraphTraits<MachineDomTreeNode *> {
284 static NodeRef getEntryNode(MachineDominatorTree *DT) {
285 return DT->getRootNode();
286 }
287};
288
289} // end namespace llvm
290
291#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h

1//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file defines a set of templates that efficiently compute a dominator
12/// tree over a generic graph. This is used typically in LLVM for fast
13/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
14/// graph types.
15///
16/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
17/// on the graph's NodeRef. The NodeRef should be a pointer and,
18/// NodeRef->getParent() must return the parent node that is also a pointer.
19///
20/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
21///
22//===----------------------------------------------------------------------===//
23
24#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
25#define LLVM_SUPPORT_GENERICDOMTREE_H
26
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/GraphTraits.h"
29#include "llvm/ADT/PointerIntPair.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/SmallPtrSet.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/Support/CFGUpdate.h"
34#include "llvm/Support/raw_ostream.h"
35#include <algorithm>
36#include <cassert>
37#include <cstddef>
38#include <iterator>
39#include <memory>
40#include <type_traits>
41#include <utility>
42#include <vector>
43
44namespace llvm {
45
46template <typename NodeT, bool IsPostDom>
47class DominatorTreeBase;
48
49namespace DomTreeBuilder {
50template <typename DomTreeT>
51struct SemiNCAInfo;
52} // namespace DomTreeBuilder
53
54/// Base class for the actual dominator tree node.
55template <class NodeT> class DomTreeNodeBase {
56 friend class PostDominatorTree;
57 friend class DominatorTreeBase<NodeT, false>;
58 friend class DominatorTreeBase<NodeT, true>;
59 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
60 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
61
62 NodeT *TheBB;
63 DomTreeNodeBase *IDom;
64 unsigned Level;
65 std::vector<DomTreeNodeBase *> Children;
66 mutable unsigned DFSNumIn = ~0;
67 mutable unsigned DFSNumOut = ~0;
68
69 public:
70 DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
71 : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
72
73 using iterator = typename std::vector<DomTreeNodeBase *>::iterator;
74 using const_iterator =
75 typename std::vector<DomTreeNodeBase *>::const_iterator;
76
77 iterator begin() { return Children.begin(); }
78 iterator end() { return Children.end(); }
79 const_iterator begin() const { return Children.begin(); }
80 const_iterator end() const { return Children.end(); }
81
82 NodeT *getBlock() const { return TheBB; }
83 DomTreeNodeBase *getIDom() const { return IDom; }
84 unsigned getLevel() const { return Level; }
85
86 const std::vector<DomTreeNodeBase *> &getChildren() const { return Children; }
87
88 std::unique_ptr<DomTreeNodeBase> addChild(
89 std::unique_ptr<DomTreeNodeBase> C) {
90 Children.push_back(C.get());
91 return C;
92 }
93
94 size_t getNumChildren() const { return Children.size(); }
95
96 void clearAllChildren() { Children.clear(); }
97
98 bool compare(const DomTreeNodeBase *Other) const {
99 if (getNumChildren() != Other->getNumChildren())
100 return true;
101
102 if (Level != Other->Level) return true;
103
104 SmallPtrSet<const NodeT *, 4> OtherChildren;
105 for (const DomTreeNodeBase *I : *Other) {
106 const NodeT *Nd = I->getBlock();
107 OtherChildren.insert(Nd);
108 }
109
110 for (const DomTreeNodeBase *I : *this) {
111 const NodeT *N = I->getBlock();
112 if (OtherChildren.count(N) == 0)
113 return true;
114 }
115 return false;
116 }
117
118 void setIDom(DomTreeNodeBase *NewIDom) {
119 assert(IDom && "No immediate dominator?")((IDom && "No immediate dominator?") ? static_cast<
void> (0) : __assert_fail ("IDom && \"No immediate dominator?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 119, __PRETTY_FUNCTION__))
;
120 if (IDom == NewIDom) return;
121
122 auto I = find(IDom->Children, this);
123 assert(I != IDom->Children.end() &&((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 124, __PRETTY_FUNCTION__))
124 "Not in immediate dominator children set!")((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 124, __PRETTY_FUNCTION__))
;
125 // I am no longer your child...
126 IDom->Children.erase(I);
127
128 // Switch to new dominator
129 IDom = NewIDom;
130 IDom->Children.push_back(this);
131
132 UpdateLevel();
133 }
134
135 /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
136 /// in the dominator tree. They are only guaranteed valid if
137 /// updateDFSNumbers() has been called.
138 unsigned getDFSNumIn() const { return DFSNumIn; }
139 unsigned getDFSNumOut() const { return DFSNumOut; }
140
141private:
142 // Return true if this node is dominated by other. Use this only if DFS info
143 // is valid.
144 bool DominatedBy(const DomTreeNodeBase *other) const {
145 return this->DFSNumIn >= other->DFSNumIn &&
146 this->DFSNumOut <= other->DFSNumOut;
147 }
148
149 void UpdateLevel() {
150 assert(IDom)((IDom) ? static_cast<void> (0) : __assert_fail ("IDom"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 150, __PRETTY_FUNCTION__))
;
151 if (Level == IDom->Level + 1) return;
152
153 SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
154
155 while (!WorkStack.empty()) {
156 DomTreeNodeBase *Current = WorkStack.pop_back_val();
157 Current->Level = Current->IDom->Level + 1;
158
159 for (DomTreeNodeBase *C : *Current) {
160 assert(C->IDom)((C->IDom) ? static_cast<void> (0) : __assert_fail (
"C->IDom", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 160, __PRETTY_FUNCTION__))
;
161 if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
162 }
163 }
164 }
165};
166
167template <class NodeT>
168raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
169 if (Node->getBlock())
170 Node->getBlock()->printAsOperand(O, false);
171 else
172 O << " <<exit node>>";
173
174 O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
175 << Node->getLevel() << "]\n";
176
177 return O;
178}
179
180template <class NodeT>
181void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
182 unsigned Lev) {
183 O.indent(2 * Lev) << "[" << Lev << "] " << N;
184 for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
185 E = N->end();
186 I != E; ++I)
187 PrintDomTree<NodeT>(*I, O, Lev + 1);
188}
189
190namespace DomTreeBuilder {
191// The routines below are provided in a separate header but referenced here.
192template <typename DomTreeT>
193void Calculate(DomTreeT &DT);
194
195template <typename DomTreeT>
196void CalculateWithUpdates(DomTreeT &DT,
197 ArrayRef<typename DomTreeT::UpdateType> Updates);
198
199template <typename DomTreeT>
200void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
201 typename DomTreeT::NodePtr To);
202
203template <typename DomTreeT>
204void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
205 typename DomTreeT::NodePtr To);
206
207template <typename DomTreeT>
208void ApplyUpdates(DomTreeT &DT,
209 ArrayRef<typename DomTreeT::UpdateType> Updates);
210
211template <typename DomTreeT>
212bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
213} // namespace DomTreeBuilder
214
215/// Core dominator tree base class.
216///
217/// This class is a generic template over graph nodes. It is instantiated for
218/// various graphs in the LLVM IR or in the code generator.
219template <typename NodeT, bool IsPostDom>
220class DominatorTreeBase {
221 public:
222 static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
223 "Currently DominatorTreeBase supports only pointer nodes");
224 using NodeType = NodeT;
225 using NodePtr = NodeT *;
226 using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
227 static_assert(std::is_pointer<ParentPtr>::value,
228 "Currently NodeT's parent must be a pointer type");
229 using ParentType = typename std::remove_pointer<ParentPtr>::type;
230 static constexpr bool IsPostDominator = IsPostDom;
231
232 using UpdateType = cfg::Update<NodePtr>;
233 using UpdateKind = cfg::UpdateKind;
234 static constexpr UpdateKind Insert = UpdateKind::Insert;
235 static constexpr UpdateKind Delete = UpdateKind::Delete;
236
237 enum class VerificationLevel { Fast, Basic, Full };
238
239protected:
240 // Dominators always have a single root, postdominators can have more.
241 SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
242
243 using DomTreeNodeMapType =
244 DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
245 DomTreeNodeMapType DomTreeNodes;
246 DomTreeNodeBase<NodeT> *RootNode;
247 ParentPtr Parent = nullptr;
248
249 mutable bool DFSInfoValid = false;
250 mutable unsigned int SlowQueries = 0;
251
252 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
253
254 public:
255 DominatorTreeBase() {}
256
257 DominatorTreeBase(DominatorTreeBase &&Arg)
258 : Roots(std::move(Arg.Roots)),
259 DomTreeNodes(std::move(Arg.DomTreeNodes)),
260 RootNode(Arg.RootNode),
261 Parent(Arg.Parent),
262 DFSInfoValid(Arg.DFSInfoValid),
263 SlowQueries(Arg.SlowQueries) {
264 Arg.wipe();
265 }
266
267 DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
268 Roots = std::move(RHS.Roots);
269 DomTreeNodes = std::move(RHS.DomTreeNodes);
270 RootNode = RHS.RootNode;
271 Parent = RHS.Parent;
272 DFSInfoValid = RHS.DFSInfoValid;
273 SlowQueries = RHS.SlowQueries;
274 RHS.wipe();
275 return *this;
276 }
277
278 DominatorTreeBase(const DominatorTreeBase &) = delete;
279 DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
280
281 /// getRoots - Return the root blocks of the current CFG. This may include
282 /// multiple blocks if we are computing post dominators. For forward
283 /// dominators, this will always be a single block (the entry node).
284 ///
285 const SmallVectorImpl<NodeT *> &getRoots() const { return Roots; }
286
287 /// isPostDominator - Returns true if analysis based of postdoms
288 ///
289 bool isPostDominator() const { return IsPostDominator; }
290
291 /// compare - Return false if the other dominator tree base matches this
292 /// dominator tree base. Otherwise return true.
293 bool compare(const DominatorTreeBase &Other) const {
294 if (Parent != Other.Parent) return true;
295
296 if (Roots.size() != Other.Roots.size())
297 return true;
298
299 if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
300 return true;
301
302 const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
303 if (DomTreeNodes.size() != OtherDomTreeNodes.size())
304 return true;
305
306 for (const auto &DomTreeNode : DomTreeNodes) {
307 NodeT *BB = DomTreeNode.first;
308 typename DomTreeNodeMapType::const_iterator OI =
309 OtherDomTreeNodes.find(BB);
310 if (OI == OtherDomTreeNodes.end())
311 return true;
312
313 DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
314 DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
315
316 if (MyNd.compare(&OtherNd))
317 return true;
318 }
319
320 return false;
321 }
322
323 void releaseMemory() { reset(); }
324
325 /// getNode - return the (Post)DominatorTree node for the specified basic
326 /// block. This is the same as using operator[] on this class. The result
327 /// may (but is not required to) be null for a forward (backwards)
328 /// statically unreachable block.
329 DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
330 auto I = DomTreeNodes.find(BB);
331 if (I != DomTreeNodes.end())
9
Assuming the condition is true
10
Taking true branch
332 return I->second.get();
11
Calling 'unique_ptr::get'
26
Returning from 'unique_ptr::get'
27
Returning pointer
333 return nullptr;
334 }
28
Assigning value
335
336 /// See getNode.
337 DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const {
338 return getNode(BB);
339 }
340
341 /// getRootNode - This returns the entry node for the CFG of the function. If
342 /// this tree represents the post-dominance relations for a function, however,
343 /// this root may be a node with the block == NULL. This is the case when
344 /// there are multiple exit nodes from a particular function. Consumers of
345 /// post-dominance information must be capable of dealing with this
346 /// possibility.
347 ///
348 DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
349 const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
350
351 /// Get all nodes dominated by R, including R itself.
352 void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
353 Result.clear();
354 const DomTreeNodeBase<NodeT> *RN = getNode(R);
355 if (!RN)
356 return; // If R is unreachable, it will not be present in the DOM tree.
357 SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
358 WL.push_back(RN);
359
360 while (!WL.empty()) {
361 const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
362 Result.push_back(N->getBlock());
363 WL.append(N->begin(), N->end());
364 }
365 }
366
367 /// properlyDominates - Returns true iff A dominates B and A != B.
368 /// Note that this is not a constant time operation!
369 ///
370 bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
371 const DomTreeNodeBase<NodeT> *B) const {
372 if (!A || !B)
373 return false;
374 if (A == B)
375 return false;
376 return dominates(A, B);
377 }
378
379 bool properlyDominates(const NodeT *A, const NodeT *B) const;
380
381 /// isReachableFromEntry - Return true if A is dominated by the entry
382 /// block of the function containing it.
383 bool isReachableFromEntry(const NodeT *A) const {
384 assert(!this->isPostDominator() &&((!this->isPostDominator() && "This is not implemented for post dominators"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 385, __PRETTY_FUNCTION__))
385 "This is not implemented for post dominators")((!this->isPostDominator() && "This is not implemented for post dominators"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 385, __PRETTY_FUNCTION__))
;
386 return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
387 }
388
389 bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
390
391 /// dominates - Returns true iff A dominates B. Note that this is not a
392 /// constant time operation!
393 ///
394 bool dominates(const DomTreeNodeBase<NodeT> *A,
395 const DomTreeNodeBase<NodeT> *B) const {
396 // A node trivially dominates itself.
397 if (B == A)
398 return true;
399
400 // An unreachable node is dominated by anything.
401 if (!isReachableFromEntry(B))
402 return true;
403
404 // And dominates nothing.
405 if (!isReachableFromEntry(A))
406 return false;
407
408 if (B->getIDom() == A) return true;
409
410 if (A->getIDom() == B) return false;
411
412 // A can only dominate B if it is higher in the tree.
413 if (A->getLevel() >= B->getLevel()) return false;
414
415 // Compare the result of the tree walk and the dfs numbers, if expensive
416 // checks are enabled.
417#ifdef EXPENSIVE_CHECKS
418 assert((!DFSInfoValid ||(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 420, __PRETTY_FUNCTION__))
419 (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 420, __PRETTY_FUNCTION__))
420 "Tree walk disagrees with dfs numbers!")(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 420, __PRETTY_FUNCTION__))
;
421#endif
422
423 if (DFSInfoValid)
424 return B->DominatedBy(A);
425
426 // If we end up with too many slow queries, just update the
427 // DFS numbers on the theory that we are going to keep querying.
428 SlowQueries++;
429 if (SlowQueries > 32) {
430 updateDFSNumbers();
431 return B->DominatedBy(A);
432 }
433
434 return dominatedBySlowTreeWalk(A, B);
435 }
436
437 bool dominates(const NodeT *A, const NodeT *B) const;
438
439 NodeT *getRoot() const {
440 assert(this->Roots.size() == 1 && "Should always have entry node!")((this->Roots.size() == 1 && "Should always have entry node!"
) ? static_cast<void> (0) : __assert_fail ("this->Roots.size() == 1 && \"Should always have entry node!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 440, __PRETTY_FUNCTION__))
;
441 return this->Roots[0];
442 }
443
444 /// findNearestCommonDominator - Find nearest common dominator basic block
445 /// for basic block A and B. If there is no such block then return nullptr.
446 NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
447 assert(A && B && "Pointers are not valid")((A && B && "Pointers are not valid") ? static_cast
<void> (0) : __assert_fail ("A && B && \"Pointers are not valid\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 447, __PRETTY_FUNCTION__))
;
448 assert(A->getParent() == B->getParent() &&((A->getParent() == B->getParent() && "Two blocks are not in same function"
) ? static_cast<void> (0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 449, __PRETTY_FUNCTION__))
449 "Two blocks are not in same function")((A->getParent() == B->getParent() && "Two blocks are not in same function"
) ? static_cast<void> (0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 449, __PRETTY_FUNCTION__))
;
450
451 // If either A or B is a entry block then it is nearest common dominator
452 // (for forward-dominators).
453 if (!isPostDominator()) {
454 NodeT &Entry = A->getParent()->front();
455 if (A == &Entry || B == &Entry)
456 return &Entry;
457 }
458
459 DomTreeNodeBase<NodeT> *NodeA = getNode(A);
460 DomTreeNodeBase<NodeT> *NodeB = getNode(B);
461
462 if (!NodeA || !NodeB) return nullptr;
463
464 // Use level information to go up the tree until the levels match. Then
465 // continue going up til we arrive at the same node.
466 while (NodeA && NodeA != NodeB) {
467 if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
468
469 NodeA = NodeA->IDom;
470 }
471
472 return NodeA ? NodeA->getBlock() : nullptr;
473 }
474
475 const NodeT *findNearestCommonDominator(const NodeT *A,
476 const NodeT *B) const {
477 // Cast away the const qualifiers here. This is ok since
478 // const is re-introduced on the return type.
479 return findNearestCommonDominator(const_cast<NodeT *>(A),
480 const_cast<NodeT *>(B));
481 }
482
483 bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
484 return isPostDominator() && !A->getBlock();
485 }
486
487 //===--------------------------------------------------------------------===//
488 // API to update (Post)DominatorTree information based on modifications to
489 // the CFG...
490
491 /// Inform the dominator tree about a sequence of CFG edge insertions and
492 /// deletions and perform a batch update on the tree.
493 ///
494 /// This function should be used when there were multiple CFG updates after
495 /// the last dominator tree update. It takes care of performing the updates
496 /// in sync with the CFG and optimizes away the redundant operations that
497 /// cancel each other.
498 /// The functions expects the sequence of updates to be balanced. Eg.:
499 /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
500 /// logically it results in a single insertions.
501 /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
502 /// sense to insert the same edge twice.
503 ///
504 /// What's more, the functions assumes that it's safe to ask every node in the
505 /// CFG about its children and inverse children. This implies that deletions
506 /// of CFG edges must not delete the CFG nodes before calling this function.
507 ///
508 /// The applyUpdates function can reorder the updates and remove redundant
509 /// ones internally. The batch updater is also able to detect sequences of
510 /// zero and exactly one update -- it's optimized to do less work in these
511 /// cases.
512 ///
513 /// Note that for postdominators it automatically takes care of applying
514 /// updates on reverse edges internally (so there's no need to swap the
515 /// From and To pointers when constructing DominatorTree::UpdateType).
516 /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
517 /// with the same template parameter T.
518 ///
519 /// \param Updates An unordered sequence of updates to perform.
520 ///
521 void applyUpdates(ArrayRef<UpdateType> Updates) {
522 DomTreeBuilder::ApplyUpdates(*this, Updates);
523 }
524
525 /// Inform the dominator tree about a CFG edge insertion and update the tree.
526 ///
527 /// This function has to be called just before or just after making the update
528 /// on the actual CFG. There cannot be any other updates that the dominator
529 /// tree doesn't know about.
530 ///
531 /// Note that for postdominators it automatically takes care of inserting
532 /// a reverse edge internally (so there's no need to swap the parameters).
533 ///
534 void insertEdge(NodeT *From, NodeT *To) {
535 assert(From)((From) ? static_cast<void> (0) : __assert_fail ("From"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 535, __PRETTY_FUNCTION__))
;
536 assert(To)((To) ? static_cast<void> (0) : __assert_fail ("To", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 536, __PRETTY_FUNCTION__))
;
537 assert(From->getParent() == Parent)((From->getParent() == Parent) ? static_cast<void> (
0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 537, __PRETTY_FUNCTION__))
;
538 assert(To->getParent() == Parent)((To->getParent() == Parent) ? static_cast<void> (0)
: __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 538, __PRETTY_FUNCTION__))
;
539 DomTreeBuilder::InsertEdge(*this, From, To);
540 }
541
542 /// Inform the dominator tree about a CFG edge deletion and update the tree.
543 ///
544 /// This function has to be called just after making the update on the actual
545 /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
546 /// DEBUG mode. There cannot be any other updates that the
547 /// dominator tree doesn't know about.
548 ///
549 /// Note that for postdominators it automatically takes care of deleting
550 /// a reverse edge internally (so there's no need to swap the parameters).
551 ///
552 void deleteEdge(NodeT *From, NodeT *To) {
553 assert(From)((From) ? static_cast<void> (0) : __assert_fail ("From"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 553, __PRETTY_FUNCTION__))
;
554 assert(To)((To) ? static_cast<void> (0) : __assert_fail ("To", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 554, __PRETTY_FUNCTION__))
;
555 assert(From->getParent() == Parent)((From->getParent() == Parent) ? static_cast<void> (
0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 555, __PRETTY_FUNCTION__))
;
556 assert(To->getParent() == Parent)((To->getParent() == Parent) ? static_cast<void> (0)
: __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 556, __PRETTY_FUNCTION__))
;
557 DomTreeBuilder::DeleteEdge(*this, From, To);
558 }
559
560 /// Add a new node to the dominator tree information.
561 ///
562 /// This creates a new node as a child of DomBB dominator node, linking it
563 /// into the children list of the immediate dominator.
564 ///
565 /// \param BB New node in CFG.
566 /// \param DomBB CFG node that is dominator for BB.
567 /// \returns New dominator tree node that represents new CFG node.
568 ///
569 DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
570 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((getNode(BB) == nullptr && "Block already in dominator tree!"
) ? static_cast<void> (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 570, __PRETTY_FUNCTION__))
;
571 DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
572 assert(IDomNode && "Not immediate dominator specified for block!")((IDomNode && "Not immediate dominator specified for block!"
) ? static_cast<void> (0) : __assert_fail ("IDomNode && \"Not immediate dominator specified for block!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 572, __PRETTY_FUNCTION__))
;
573 DFSInfoValid = false;
574 return (DomTreeNodes[BB] = IDomNode->addChild(
575 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
576 }
577
578 /// Add a new node to the forward dominator tree and make it a new root.
579 ///
580 /// \param BB New node in CFG.
581 /// \returns New dominator tree node that represents new CFG node.
582 ///
583 DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
584 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((getNode(BB) == nullptr && "Block already in dominator tree!"
) ? static_cast<void> (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 584, __PRETTY_FUNCTION__))
;
585 assert(!this->isPostDominator() &&((!this->isPostDominator() && "Cannot change root of post-dominator tree"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 586, __PRETTY_FUNCTION__))
586 "Cannot change root of post-dominator tree")((!this->isPostDominator() && "Cannot change root of post-dominator tree"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 586, __PRETTY_FUNCTION__))
;
587 DFSInfoValid = false;
588 DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
589 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
590 if (Roots.empty()) {
591 addRoot(BB);
592 } else {
593 assert(Roots.size() == 1)((Roots.size() == 1) ? static_cast<void> (0) : __assert_fail
("Roots.size() == 1", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 593, __PRETTY_FUNCTION__))
;
594 NodeT *OldRoot = Roots.front();
595 auto &OldNode = DomTreeNodes[OldRoot];
596 OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
597 OldNode->IDom = NewNode;
598 OldNode->UpdateLevel();
599 Roots[0] = BB;
600 }
601 return RootNode = NewNode;
602 }
603
604 /// changeImmediateDominator - This method is used to update the dominator
605 /// tree information when a node's immediate dominator changes.
606 ///
607 void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
608 DomTreeNodeBase<NodeT> *NewIDom) {
609 assert(N && NewIDom && "Cannot change null node pointers!")((N && NewIDom && "Cannot change null node pointers!"
) ? static_cast<void> (0) : __assert_fail ("N && NewIDom && \"Cannot change null node pointers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 609, __PRETTY_FUNCTION__))
;
610 DFSInfoValid = false;
611 N->setIDom(NewIDom);
612 }
613
614 void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
615 changeImmediateDominator(getNode(BB), getNode(NewBB));
616 }
617
618 /// eraseNode - Removes a node from the dominator tree. Block must not
619 /// dominate any other blocks. Removes node from its immediate dominator's
620 /// children list. Deletes dominator node associated with basic block BB.
621 void eraseNode(NodeT *BB) {
622 DomTreeNodeBase<NodeT> *Node = getNode(BB);
623 assert(Node && "Removing node that isn't in dominator tree.")((Node && "Removing node that isn't in dominator tree."
) ? static_cast<void> (0) : __assert_fail ("Node && \"Removing node that isn't in dominator tree.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 623, __PRETTY_FUNCTION__))
;
624 assert(Node->getChildren().empty() && "Node is not a leaf node.")((Node->getChildren().empty() && "Node is not a leaf node."
) ? static_cast<void> (0) : __assert_fail ("Node->getChildren().empty() && \"Node is not a leaf node.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 624, __PRETTY_FUNCTION__))
;
625
626 DFSInfoValid = false;
627
628 // Remove node from immediate dominator's children list.
629 DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
630 if (IDom) {
631 const auto I = find(IDom->Children, Node);
632 assert(I != IDom->Children.end() &&((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 633, __PRETTY_FUNCTION__))
633 "Not in immediate dominator children set!")((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 633, __PRETTY_FUNCTION__))
;
634 // I am no longer your child...
635 IDom->Children.erase(I);
636 }
637
638 DomTreeNodes.erase(BB);
639
640 if (!IsPostDom) return;
641
642 // Remember to update PostDominatorTree roots.
643 auto RIt = llvm::find(Roots, BB);
644 if (RIt != Roots.end()) {
645 std::swap(*RIt, Roots.back());
646 Roots.pop_back();
647 }
648 }
649
650 /// splitBlock - BB is split and now it has one successor. Update dominator
651 /// tree to reflect this change.
652 void splitBlock(NodeT *NewBB) {
653 if (IsPostDominator)
654 Split<Inverse<NodeT *>>(NewBB);
655 else
656 Split<NodeT *>(NewBB);
657 }
658
659 /// print - Convert to human readable form
660 ///
661 void print(raw_ostream &O) const {
662 O << "=============================--------------------------------\n";
663 if (IsPostDominator)
664 O << "Inorder PostDominator Tree: ";
665 else
666 O << "Inorder Dominator Tree: ";
667 if (!DFSInfoValid)
668 O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
669 O << "\n";
670
671 // The postdom tree can have a null root if there are no returns.
672 if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
673 if (IsPostDominator) {
674 O << "Roots: ";
675 for (const NodePtr Block : Roots) {
676 Block->printAsOperand(O, false);
677 O << " ";
678 }
679 O << "\n";
680 }
681 }
682
683public:
684 /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
685 /// dominator tree in dfs order.
686 void updateDFSNumbers() const {
687 if (DFSInfoValid) {
688 SlowQueries = 0;
689 return;
690 }
691
692 SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
693 typename DomTreeNodeBase<NodeT>::const_iterator>,
694 32> WorkStack;
695
696 const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
697 assert((!Parent || ThisRoot) && "Empty constructed DomTree")(((!Parent || ThisRoot) && "Empty constructed DomTree"
) ? static_cast<void> (0) : __assert_fail ("(!Parent || ThisRoot) && \"Empty constructed DomTree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 697, __PRETTY_FUNCTION__))
;
698 if (!ThisRoot)
699 return;
700
701 // Both dominators and postdominators have a single root node. In the case
702 // case of PostDominatorTree, this node is a virtual root.
703 WorkStack.push_back({ThisRoot, ThisRoot->begin()});
704
705 unsigned DFSNum = 0;
706 ThisRoot->DFSNumIn = DFSNum++;
707
708 while (!WorkStack.empty()) {
709 const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
710 const auto ChildIt = WorkStack.back().second;
711
712 // If we visited all of the children of this node, "recurse" back up the
713 // stack setting the DFOutNum.
714 if (ChildIt == Node->end()) {
715 Node->DFSNumOut = DFSNum++;
716 WorkStack.pop_back();
717 } else {
718 // Otherwise, recursively visit this child.
719 const DomTreeNodeBase<NodeT> *Child = *ChildIt;
720 ++WorkStack.back().second;
721
722 WorkStack.push_back({Child, Child->begin()});
723 Child->DFSNumIn = DFSNum++;
724 }
725 }
726
727 SlowQueries = 0;
728 DFSInfoValid = true;
729 }
730
731 /// recalculate - compute a dominator tree for the given function
732 void recalculate(ParentType &Func) {
733 Parent = &Func;
734 DomTreeBuilder::Calculate(*this);
735 }
736
737 void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
738 Parent = &Func;
739 DomTreeBuilder::CalculateWithUpdates(*this, Updates);
740 }
741
742 /// verify - checks if the tree is correct. There are 3 level of verification:
743 /// - Full -- verifies if the tree is correct by making sure all the
744 /// properties (including the parent and the sibling property)
745 /// hold.
746 /// Takes O(N^3) time.
747 ///
748 /// - Basic -- checks if the tree is correct, but compares it to a freshly
749 /// constructed tree instead of checking the sibling property.
750 /// Takes O(N^2) time.
751 ///
752 /// - Fast -- checks basic tree structure and compares it with a freshly
753 /// constructed tree.
754 /// Takes O(N^2) time worst case, but is faster in practise (same
755 /// as tree construction).
756 bool verify(VerificationLevel VL = VerificationLevel::Full) const {
757 return DomTreeBuilder::Verify(*this, VL);
758 }
759
760protected:
761 void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
762
763 void reset() {
764 DomTreeNodes.clear();
765 Roots.clear();
766 RootNode = nullptr;
767 Parent = nullptr;
768 DFSInfoValid = false;
769 SlowQueries = 0;
770 }
771
772 // NewBB is split and now it has one successor. Update dominator tree to
773 // reflect this change.
774 template <class N>
775 void Split(typename GraphTraits<N>::NodeRef NewBB) {
776 using GraphT = GraphTraits<N>;
777 using NodeRef = typename GraphT::NodeRef;
778 assert(std::distance(GraphT::child_begin(NewBB),((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 780, __PRETTY_FUNCTION__))
779 GraphT::child_end(NewBB)) == 1 &&((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 780, __PRETTY_FUNCTION__))
780 "NewBB should have a single successor!")((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 780, __PRETTY_FUNCTION__))
;
781 NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
782
783 std::vector<NodeRef> PredBlocks;
784 for (const auto &Pred : children<Inverse<N>>(NewBB))
785 PredBlocks.push_back(Pred);
786
787 assert(!PredBlocks.empty() && "No predblocks?")((!PredBlocks.empty() && "No predblocks?") ? static_cast
<void> (0) : __assert_fail ("!PredBlocks.empty() && \"No predblocks?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 787, __PRETTY_FUNCTION__))
;
788
789 bool NewBBDominatesNewBBSucc = true;
790 for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
791 if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
792 isReachableFromEntry(Pred)) {
793 NewBBDominatesNewBBSucc = false;
794 break;
795 }
796 }
797
798 // Find NewBB's immediate dominator and create new dominator tree node for
799 // NewBB.
800 NodeT *NewBBIDom = nullptr;
801 unsigned i = 0;
802 for (i = 0; i < PredBlocks.size(); ++i)
803 if (isReachableFromEntry(PredBlocks[i])) {
804 NewBBIDom = PredBlocks[i];
805 break;
806 }
807
808 // It's possible that none of the predecessors of NewBB are reachable;
809 // in that case, NewBB itself is unreachable, so nothing needs to be
810 // changed.
811 if (!NewBBIDom) return;
812
813 for (i = i + 1; i < PredBlocks.size(); ++i) {
814 if (isReachableFromEntry(PredBlocks[i]))
815 NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
816 }
817
818 // Create the new dominator tree node... and set the idom of NewBB.
819 DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
820
821 // If NewBB strictly dominates other blocks, then it is now the immediate
822 // dominator of NewBBSucc. Update the dominator tree as appropriate.
823 if (NewBBDominatesNewBBSucc) {
824 DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
825 changeImmediateDominator(NewBBSuccNode, NewBBNode);
826 }
827 }
828
829 private:
830 bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
831 const DomTreeNodeBase<NodeT> *B) const {
832 assert(A != B)((A != B) ? static_cast<void> (0) : __assert_fail ("A != B"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 832, __PRETTY_FUNCTION__))
;
833 assert(isReachableFromEntry(B))((isReachableFromEntry(B)) ? static_cast<void> (0) : __assert_fail
("isReachableFromEntry(B)", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 833, __PRETTY_FUNCTION__))
;
834 assert(isReachableFromEntry(A))((isReachableFromEntry(A)) ? static_cast<void> (0) : __assert_fail
("isReachableFromEntry(A)", "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/GenericDomTree.h"
, 834, __PRETTY_FUNCTION__))
;
835
836 const unsigned ALevel = A->getLevel();
837 const DomTreeNodeBase<NodeT> *IDom;
838
839 // Don't walk nodes above A's subtree. When we reach A's level, we must
840 // either find A or be in some other subtree not dominated by A.
841 while ((IDom = B->getIDom()) != nullptr && IDom->getLevel() >= ALevel)
842 B = IDom; // Walk up the tree
843
844 return B == A;
845 }
846
847 /// Wipe this tree's state without releasing any resources.
848 ///
849 /// This is essentially a post-move helper only. It leaves the object in an
850 /// assignable and destroyable state, but otherwise invalid.
851 void wipe() {
852 DomTreeNodes.clear();
853 RootNode = nullptr;
854 Parent = nullptr;
855 }
856};
857
858template <typename T>
859using DomTreeBase = DominatorTreeBase<T, false>;
860
861template <typename T>
862using PostDomTreeBase = DominatorTreeBase<T, true>;
863
864// These two functions are declared out of line as a workaround for building
865// with old (< r147295) versions of clang because of pr11642.
866template <typename NodeT, bool IsPostDom>
867bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
868 const NodeT *B) const {
869 if (A == B)
870 return true;
871
872 // Cast away the const qualifiers here. This is ok since
873 // this function doesn't actually return the values returned
874 // from getNode.
875 return dominates(getNode(const_cast<NodeT *>(A)),
876 getNode(const_cast<NodeT *>(B)));
877}
878template <typename NodeT, bool IsPostDom>
879bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
880 const NodeT *A, const NodeT *B) const {
881 if (A == B)
882 return false;
883
884 // Cast away the const qualifiers here. This is ok since
885 // this function doesn't actually return the values returned
886 // from getNode.
887 return dominates(getNode(const_cast<NodeT *>(A)),
888 getNode(const_cast<NodeT *>(B)));
889}
890
891} // end namespace llvm
892
893#endif // LLVM_SUPPORT_GENERICDOMTREE_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38
39namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
40{
41_GLIBCXX_BEGIN_NAMESPACE_VERSION
42
43 /**
44 * @addtogroup pointer_abstractions
45 * @{
46 */
47
48#if _GLIBCXX_USE_DEPRECATED1
49 template<typename> class auto_ptr;
50#endif
51
52 /// Primary template of default_delete, used by unique_ptr
53 template<typename _Tp>
54 struct default_delete
55 {
56 /// Default constructor
57 constexpr default_delete() noexcept = default;
58
59 /** @brief Converting constructor.
60 *
61 * Allows conversion from a deleter for arrays of another type, @p _Up,
62 * only if @p _Up* is convertible to @p _Tp*.
63 */
64 template<typename _Up, typename = typename
65 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
66 default_delete(const default_delete<_Up>&) noexcept { }
67
68 /// Calls @c delete @p __ptr
69 void
70 operator()(_Tp* __ptr) const
71 {
72 static_assert(!is_void<_Tp>::value,
73 "can't delete pointer to incomplete type");
74 static_assert(sizeof(_Tp)>0,
75 "can't delete pointer to incomplete type");
76 delete __ptr;
77 }
78 };
79
80 // _GLIBCXX_RESOLVE_LIB_DEFECTS
81 // DR 740 - omit specialization for array objects with a compile time length
82 /// Specialization for arrays, default_delete.
83 template<typename _Tp>
84 struct default_delete<_Tp[]>
85 {
86 public:
87 /// Default constructor
88 constexpr default_delete() noexcept = default;
89
90 /** @brief Converting constructor.
91 *
92 * Allows conversion from a deleter for arrays of another type, such as
93 * a const-qualified version of @p _Tp.
94 *
95 * Conversions from types derived from @c _Tp are not allowed because
96 * it is unsafe to @c delete[] an array of derived types through a
97 * pointer to the base type.
98 */
99 template<typename _Up, typename = typename
100 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
101 default_delete(const default_delete<_Up[]>&) noexcept { }
102
103 /// Calls @c delete[] @p __ptr
104 template<typename _Up>
105 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
106 operator()(_Up* __ptr) const
107 {
108 static_assert(sizeof(_Tp)>0,
109 "can't delete pointer to incomplete type");
110 delete [] __ptr;
111 }
112 };
113
114 /// 20.7.1.2 unique_ptr for single objects.
115 template <typename _Tp, typename _Dp = default_delete<_Tp> >
116 class unique_ptr
117 {
118 // use SFINAE to determine whether _Del::pointer exists
119 class _Pointer
120 {
121 template<typename _Up>
122 static typename _Up::pointer __test(typename _Up::pointer*);
123
124 template<typename _Up>
125 static _Tp* __test(...);
126
127 typedef typename remove_reference<_Dp>::type _Del;
128
129 public:
130 typedef decltype(__test<_Del>(0)) type;
131 };
132
133 typedef std::tuple<typename _Pointer::type, _Dp> __tuple_type;
134 __tuple_type _M_t;
135
136 public:
137 typedef typename _Pointer::type pointer;
138 typedef _Tp element_type;
139 typedef _Dp deleter_type;
140
141
142 // helper template for detecting a safe conversion from another
143 // unique_ptr
144 template<typename _Up, typename _Ep>
145 using __safe_conversion_up = __and_<
146 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
147 __not_<is_array<_Up>>,
148 __or_<__and_<is_reference<deleter_type>,
149 is_same<deleter_type, _Ep>>,
150 __and_<__not_<is_reference<deleter_type>>,
151 is_convertible<_Ep, deleter_type>>
152 >
153 >;
154
155 // Constructors.
156
157 /// Default constructor, creates a unique_ptr that owns nothing.
158 constexpr unique_ptr() noexcept
159 : _M_t()
160 { static_assert(!is_pointer<deleter_type>::value,
161 "constructed with null function pointer deleter"); }
162
163 /** Takes ownership of a pointer.
164 *
165 * @param __p A pointer to an object of @c element_type
166 *
167 * The deleter will be value-initialized.
168 */
169 explicit
170 unique_ptr(pointer __p) noexcept
171 : _M_t()
172 {
173 std::get<0>(_M_t) = __p;
174 static_assert(!is_pointer<deleter_type>::value,
175 "constructed with null function pointer deleter");
176 }
177
178 /** Takes ownership of a pointer.
179 *
180 * @param __p A pointer to an object of @c element_type
181 * @param __d A reference to a deleter.
182 *
183 * The deleter will be initialized with @p __d
184 */
185 unique_ptr(pointer __p,
186 typename conditional<is_reference<deleter_type>::value,
187 deleter_type, const deleter_type&>::type __d) noexcept
188 : _M_t(__p, __d) { }
189
190 /** Takes ownership of a pointer.
191 *
192 * @param __p A pointer to an object of @c element_type
193 * @param __d An rvalue reference to a deleter.
194 *
195 * The deleter will be initialized with @p std::move(__d)
196 */
197 unique_ptr(pointer __p,
198 typename remove_reference<deleter_type>::type&& __d) noexcept
199 : _M_t(std::move(__p), std::move(__d))
200 { static_assert(!std::is_reference<deleter_type>::value,
201 "rvalue deleter bound to reference"); }
202
203 /// Creates a unique_ptr that owns nothing.
204 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
205
206 // Move constructors.
207
208 /// Move constructor.
209 unique_ptr(unique_ptr&& __u) noexcept
210 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
211
212 /** @brief Converting constructor from another type
213 *
214 * Requires that the pointer owned by @p __u is convertible to the
215 * type of pointer owned by this object, @p __u does not own an array,
216 * and @p __u has a compatible deleter type.
217 */
218 template<typename _Up, typename _Ep, typename = _Require<
219 __safe_conversion_up<_Up, _Ep>,
220 typename conditional<is_reference<_Dp>::value,
221 is_same<_Ep, _Dp>,
222 is_convertible<_Ep, _Dp>>::type>>
223 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
224 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
225 { }
226
227#if _GLIBCXX_USE_DEPRECATED1
228 /// Converting constructor from @c auto_ptr
229 template<typename _Up, typename = _Require<
230 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
231 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
232#endif
233
234 /// Destructor, invokes the deleter if the stored pointer is not null.
235 ~unique_ptr() noexcept
236 {
237 auto& __ptr = std::get<0>(_M_t);
238 if (__ptr != nullptr)
239 get_deleter()(__ptr);
240 __ptr = pointer();
241 }
242
243 // Assignment.
244
245 /** @brief Move assignment operator.
246 *
247 * @param __u The object to transfer ownership from.
248 *
249 * Invokes the deleter first if this object owns a pointer.
250 */
251 unique_ptr&
252 operator=(unique_ptr&& __u) noexcept
253 {
254 reset(__u.release());
255 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
256 return *this;
257 }
258
259 /** @brief Assignment from another type.
260 *
261 * @param __u The object to transfer ownership from, which owns a
262 * convertible pointer to a non-array object.
263 *
264 * Invokes the deleter first if this object owns a pointer.
265 */
266 template<typename _Up, typename _Ep>
267 typename enable_if< __and_<
268 __safe_conversion_up<_Up, _Ep>,
269 is_assignable<deleter_type&, _Ep&&>
270 >::value,
271 unique_ptr&>::type
272 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
273 {
274 reset(__u.release());
275 get_deleter() = std::forward<_Ep>(__u.get_deleter());
276 return *this;
277 }
278
279 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
280 unique_ptr&
281 operator=(nullptr_t) noexcept
282 {
283 reset();
284 return *this;
285 }
286
287 // Observers.
288
289 /// Dereference the stored pointer.
290 typename add_lvalue_reference<element_type>::type
291 operator*() const
292 {
293 __glibcxx_assert(get() != pointer());
294 return *get();
295 }
296
297 /// Return the stored pointer.
298 pointer
299 operator->() const noexcept
300 {
301 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
302 return get();
303 }
304
305 /// Return the stored pointer.
306 pointer
307 get() const noexcept
308 { return std::get<0>(_M_t); }
12
Calling 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
23
Returning from 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
24
Returning pointer
25
Assigning value
309
310 /// Return a reference to the stored deleter.
311 deleter_type&
312 get_deleter() noexcept
313 { return std::get<1>(_M_t); }
314
315 /// Return a reference to the stored deleter.
316 const deleter_type&
317 get_deleter() const noexcept
318 { return std::get<1>(_M_t); }
319
320 /// Return @c true if the stored pointer is not null.
321 explicit operator bool() const noexcept
322 { return get() == pointer() ? false : true; }
323
324 // Modifiers.
325
326 /// Release ownership of any stored pointer.
327 pointer
328 release() noexcept
329 {
330 pointer __p = get();
331 std::get<0>(_M_t) = pointer();
332 return __p;
333 }
334
335 /** @brief Replace the stored pointer.
336 *
337 * @param __p The new pointer to store.
338 *
339 * The deleter will be invoked if a pointer is already owned.
340 */
341 void
342 reset(pointer __p = pointer()) noexcept
343 {
344 using std::swap;
345 swap(std::get<0>(_M_t), __p);
346 if (__p != pointer())
347 get_deleter()(__p);
348 }
349
350 /// Exchange the pointer and deleter with another object.
351 void
352 swap(unique_ptr& __u) noexcept
353 {
354 using std::swap;
355 swap(_M_t, __u._M_t);
356 }
357
358 // Disable copy from lvalue.
359 unique_ptr(const unique_ptr&) = delete;
360 unique_ptr& operator=(const unique_ptr&) = delete;
361 };
362
363 /// 20.7.1.3 unique_ptr for array objects with a runtime length
364 // [unique.ptr.runtime]
365 // _GLIBCXX_RESOLVE_LIB_DEFECTS
366 // DR 740 - omit specialization for array objects with a compile time length
367 template<typename _Tp, typename _Dp>
368 class unique_ptr<_Tp[], _Dp>
369 {
370 // use SFINAE to determine whether _Del::pointer exists
371 class _Pointer
372 {
373 template<typename _Up>
374 static typename _Up::pointer __test(typename _Up::pointer*);
375
376 template<typename _Up>
377 static _Tp* __test(...);
378
379 typedef typename remove_reference<_Dp>::type _Del;
380
381 public:
382 typedef decltype(__test<_Del>(0)) type;
383 };
384
385 typedef std::tuple<typename _Pointer::type, _Dp> __tuple_type;
386 __tuple_type _M_t;
387
388 template<typename _Up>
389 using __remove_cv = typename remove_cv<_Up>::type;
390
391 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
392 template<typename _Up>
393 using __is_derived_Tp
394 = __and_< is_base_of<_Tp, _Up>,
395 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
396
397
398 public:
399 typedef typename _Pointer::type pointer;
400 typedef _Tp element_type;
401 typedef _Dp deleter_type;
402
403 // helper template for detecting a safe conversion from another
404 // unique_ptr
405 template<typename _Up, typename _Ep,
406 typename _Up_up = unique_ptr<_Up, _Ep>,
407 typename _Up_element_type = typename _Up_up::element_type>
408 using __safe_conversion_up = __and_<
409 is_array<_Up>,
410 is_same<pointer, element_type*>,
411 is_same<typename _Up_up::pointer, _Up_element_type*>,
412 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
413 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
414 __and_<__not_<is_reference<deleter_type>>,
415 is_convertible<_Ep, deleter_type>>>
416 >;
417
418 // helper template for detecting a safe conversion from a raw pointer
419 template<typename _Up>
420 using __safe_conversion_raw = __and_<
421 __or_<__or_<is_same<_Up, pointer>,
422 is_same<_Up, nullptr_t>>,
423 __and_<is_pointer<_Up>,
424 is_same<pointer, element_type*>,
425 is_convertible<
426 typename remove_pointer<_Up>::type(*)[],
427 element_type(*)[]>
428 >
429 >
430 >;
431
432 // Constructors.
433
434 /// Default constructor, creates a unique_ptr that owns nothing.
435 constexpr unique_ptr() noexcept
436 : _M_t()
437 { static_assert(!std::is_pointer<deleter_type>::value,
438 "constructed with null function pointer deleter"); }
439
440 /** Takes ownership of a pointer.
441 *
442 * @param __p A pointer to an array of a type safely convertible
443 * to an array of @c element_type
444 *
445 * The deleter will be value-initialized.
446 */
447 template<typename _Up,
448 typename = typename enable_if<
449 __safe_conversion_raw<_Up>::value, bool>::type>
450 explicit
451 unique_ptr(_Up __p) noexcept
452 : _M_t(__p, deleter_type())
453 { static_assert(!is_pointer<deleter_type>::value,
454 "constructed with null function pointer deleter"); }
455
456 /** Takes ownership of a pointer.
457 *
458 * @param __p A pointer to an array of a type safely convertible
459 * to an array of @c element_type
460 * @param __d A reference to a deleter.
461 *
462 * The deleter will be initialized with @p __d
463 */
464 template<typename _Up,
465 typename = typename enable_if<
466 __safe_conversion_raw<_Up>::value, bool>::type>
467 unique_ptr(_Up __p,
468 typename conditional<is_reference<deleter_type>::value,
469 deleter_type, const deleter_type&>::type __d) noexcept
470 : _M_t(__p, __d) { }
471
472 /** Takes ownership of a pointer.
473 *
474 * @param __p A pointer to an array of a type safely convertible
475 * to an array of @c element_type
476 * @param __d A reference to a deleter.
477 *
478 * The deleter will be initialized with @p std::move(__d)
479 */
480 template<typename _Up,
481 typename = typename enable_if<
482 __safe_conversion_raw<_Up>::value, bool>::type>
483 unique_ptr(_Up __p, typename
484 remove_reference<deleter_type>::type&& __d) noexcept
485 : _M_t(std::move(__p), std::move(__d))
486 { static_assert(!is_reference<deleter_type>::value,
487 "rvalue deleter bound to reference"); }
488
489 /// Move constructor.
490 unique_ptr(unique_ptr&& __u) noexcept
491 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
492
493 /// Creates a unique_ptr that owns nothing.
494 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
495
496 template<typename _Up, typename _Ep,
497 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
498 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
499 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
500 { }
501
502 /// Destructor, invokes the deleter if the stored pointer is not null.
503 ~unique_ptr()
504 {
505 auto& __ptr = std::get<0>(_M_t);
506 if (__ptr != nullptr)
507 get_deleter()(__ptr);
508 __ptr = pointer();
509 }
510
511 // Assignment.
512
513 /** @brief Move assignment operator.
514 *
515 * @param __u The object to transfer ownership from.
516 *
517 * Invokes the deleter first if this object owns a pointer.
518 */
519 unique_ptr&
520 operator=(unique_ptr&& __u) noexcept
521 {
522 reset(__u.release());
523 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
524 return *this;
525 }
526
527 /** @brief Assignment from another type.
528 *
529 * @param __u The object to transfer ownership from, which owns a
530 * convertible pointer to an array object.
531 *
532 * Invokes the deleter first if this object owns a pointer.
533 */
534 template<typename _Up, typename _Ep>
535 typename
536 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
537 is_assignable<deleter_type&, _Ep&&>
538 >::value,
539 unique_ptr&>::type
540 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
541 {
542 reset(__u.release());
543 get_deleter() = std::forward<_Ep>(__u.get_deleter());
544 return *this;
545 }
546
547 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
548 unique_ptr&
549 operator=(nullptr_t) noexcept
550 {
551 reset();
552 return *this;
553 }
554
555 // Observers.
556
557 /// Access an element of owned array.
558 typename std::add_lvalue_reference<element_type>::type
559 operator[](size_t __i) const
560 {
561 __glibcxx_assert(get() != pointer());
562 return get()[__i];
563 }
564
565 /// Return the stored pointer.
566 pointer
567 get() const noexcept
568 { return std::get<0>(_M_t); }
569
570 /// Return a reference to the stored deleter.
571 deleter_type&
572 get_deleter() noexcept
573 { return std::get<1>(_M_t); }
574
575 /// Return a reference to the stored deleter.
576 const deleter_type&
577 get_deleter() const noexcept
578 { return std::get<1>(_M_t); }
579
580 /// Return @c true if the stored pointer is not null.
581 explicit operator bool() const noexcept
582 { return get() == pointer() ? false : true; }
583
584 // Modifiers.
585
586 /// Release ownership of any stored pointer.
587 pointer
588 release() noexcept
589 {
590 pointer __p = get();
591 std::get<0>(_M_t) = pointer();
592 return __p;
593 }
594
595 /** @brief Replace the stored pointer.
596 *
597 * @param __p The new pointer to store.
598 *
599 * The deleter will be invoked if a pointer is already owned.
600 */
601 template <typename _Up,
602 typename = _Require<
603 __or_<is_same<_Up, pointer>,
604 __and_<is_same<pointer, element_type*>,
605 is_pointer<_Up>,
606 is_convertible<
607 typename remove_pointer<_Up>::type(*)[],
608 element_type(*)[]
609 >
610 >
611 >
612 >>
613 void
614 reset(_Up __p) noexcept
615 {
616 pointer __ptr = __p;
617 using std::swap;
618 swap(std::get<0>(_M_t), __ptr);
619 if (__ptr != nullptr)
620 get_deleter()(__ptr);
621 }
622
623 void reset(nullptr_t = nullptr) noexcept
624 {
625 reset(pointer());
626 }
627
628 /// Exchange the pointer and deleter with another object.
629 void
630 swap(unique_ptr& __u) noexcept
631 {
632 using std::swap;
633 swap(_M_t, __u._M_t);
634 }
635
636 // Disable copy from lvalue.
637 unique_ptr(const unique_ptr&) = delete;
638 unique_ptr& operator=(const unique_ptr&) = delete;
639 };
640
641 template<typename _Tp, typename _Dp>
642 inline void
643 swap(unique_ptr<_Tp, _Dp>& __x,
644 unique_ptr<_Tp, _Dp>& __y) noexcept
645 { __x.swap(__y); }
646
647 template<typename _Tp, typename _Dp,
648 typename _Up, typename _Ep>
649 inline bool
650 operator==(const unique_ptr<_Tp, _Dp>& __x,
651 const unique_ptr<_Up, _Ep>& __y)
652 { return __x.get() == __y.get(); }
653
654 template<typename _Tp, typename _Dp>
655 inline bool
656 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
657 { return !__x; }
658
659 template<typename _Tp, typename _Dp>
660 inline bool
661 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
662 { return !__x; }
663
664 template<typename _Tp, typename _Dp,
665 typename _Up, typename _Ep>
666 inline bool
667 operator!=(const unique_ptr<_Tp, _Dp>& __x,
668 const unique_ptr<_Up, _Ep>& __y)
669 { return __x.get() != __y.get(); }
670
671 template<typename _Tp, typename _Dp>
672 inline bool
673 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
674 { return (bool)__x; }
675
676 template<typename _Tp, typename _Dp>
677 inline bool
678 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
679 { return (bool)__x; }
680
681 template<typename _Tp, typename _Dp,
682 typename _Up, typename _Ep>
683 inline bool
684 operator<(const unique_ptr<_Tp, _Dp>& __x,
685 const unique_ptr<_Up, _Ep>& __y)
686 {
687 typedef typename
688 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
689 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
690 return std::less<_CT>()(__x.get(), __y.get());
691 }
692
693 template<typename _Tp, typename _Dp>
694 inline bool
695 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
696 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
697 nullptr); }
698
699 template<typename _Tp, typename _Dp>
700 inline bool
701 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
702 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
703 __x.get()); }
704
705 template<typename _Tp, typename _Dp,
706 typename _Up, typename _Ep>
707 inline bool
708 operator<=(const unique_ptr<_Tp, _Dp>& __x,
709 const unique_ptr<_Up, _Ep>& __y)
710 { return !(__y < __x); }
711
712 template<typename _Tp, typename _Dp>
713 inline bool
714 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
715 { return !(nullptr < __x); }
716
717 template<typename _Tp, typename _Dp>
718 inline bool
719 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
720 { return !(__x < nullptr); }
721
722 template<typename _Tp, typename _Dp,
723 typename _Up, typename _Ep>
724 inline bool
725 operator>(const unique_ptr<_Tp, _Dp>& __x,
726 const unique_ptr<_Up, _Ep>& __y)
727 { return (__y < __x); }
728
729 template<typename _Tp, typename _Dp>
730 inline bool
731 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
732 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
733 __x.get()); }
734
735 template<typename _Tp, typename _Dp>
736 inline bool
737 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
738 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
739 nullptr); }
740
741 template<typename _Tp, typename _Dp,
742 typename _Up, typename _Ep>
743 inline bool
744 operator>=(const unique_ptr<_Tp, _Dp>& __x,
745 const unique_ptr<_Up, _Ep>& __y)
746 { return !(__x < __y); }
747
748 template<typename _Tp, typename _Dp>
749 inline bool
750 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
751 { return !(__x < nullptr); }
752
753 template<typename _Tp, typename _Dp>
754 inline bool
755 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
756 { return !(nullptr < __x); }
757
758 /// std::hash specialization for unique_ptr.
759 template<typename _Tp, typename _Dp>
760 struct hash<unique_ptr<_Tp, _Dp>>
761 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>
762 {
763 size_t
764 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
765 {
766 typedef unique_ptr<_Tp, _Dp> _UP;
767 return std::hash<typename _UP::pointer>()(__u.get());
768 }
769 };
770
771#if __cplusplus201103L > 201103L
772
773#define __cpp_lib_make_unique 201304
774
775 template<typename _Tp>
776 struct _MakeUniq
777 { typedef unique_ptr<_Tp> __single_object; };
778
779 template<typename _Tp>
780 struct _MakeUniq<_Tp[]>
781 { typedef unique_ptr<_Tp[]> __array; };
782
783 template<typename _Tp, size_t _Bound>
784 struct _MakeUniq<_Tp[_Bound]>
785 { struct __invalid_type { }; };
786
787 /// std::make_unique for single objects
788 template<typename _Tp, typename... _Args>
789 inline typename _MakeUniq<_Tp>::__single_object
790 make_unique(_Args&&... __args)
791 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
792
793 /// std::make_unique for arrays of unknown bound
794 template<typename _Tp>
795 inline typename _MakeUniq<_Tp>::__array
796 make_unique(size_t __num)
797 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
798
799 /// Disable std::make_unique for arrays of known bound
800 template<typename _Tp, typename... _Args>
801 inline typename _MakeUniq<_Tp>::__invalid_type
802 make_unique(_Args&&...) = delete;
803#endif
804
805 // @} group pointer_abstractions
806
807_GLIBCXX_END_NAMESPACE_VERSION
808} // namespace
809
810#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41
42namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
43{
44_GLIBCXX_BEGIN_NAMESPACE_VERSION
45
46 /**
47 * @addtogroup utilities
48 * @{
49 */
50
51 template<std::size_t _Idx, typename _Head, bool _IsEmptyNotFinal>
52 struct _Head_base;
53
54 template<std::size_t _Idx, typename _Head>
55 struct _Head_base<_Idx, _Head, true>
56 : public _Head
57 {
58 constexpr _Head_base()
59 : _Head() { }
60
61 constexpr _Head_base(const _Head& __h)
62 : _Head(__h) { }
63
64 constexpr _Head_base(const _Head_base&) = default;
65 constexpr _Head_base(_Head_base&&) = default;
66
67 template<typename _UHead>
68 constexpr _Head_base(_UHead&& __h)
69 : _Head(std::forward<_UHead>(__h)) { }
70
71 _Head_base(allocator_arg_t, __uses_alloc0)
72 : _Head() { }
73
74 template<typename _Alloc>
75 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
76 : _Head(allocator_arg, *__a._M_a) { }
77
78 template<typename _Alloc>
79 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
80 : _Head(*__a._M_a) { }
81
82 template<typename _UHead>
83 _Head_base(__uses_alloc0, _UHead&& __uhead)
84 : _Head(std::forward<_UHead>(__uhead)) { }
85
86 template<typename _Alloc, typename _UHead>
87 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
88 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
89
90 template<typename _Alloc, typename _UHead>
91 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
92 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
93
94 static constexpr _Head&
95 _M_head(_Head_base& __b) noexcept { return __b; }
96
97 static constexpr const _Head&
98 _M_head(const _Head_base& __b) noexcept { return __b; }
99 };
100
101 template<std::size_t _Idx, typename _Head>
102 struct _Head_base<_Idx, _Head, false>
103 {
104 constexpr _Head_base()
105 : _M_head_impl() { }
106
107 constexpr _Head_base(const _Head& __h)
108 : _M_head_impl(__h) { }
109
110 constexpr _Head_base(const _Head_base&) = default;
111 constexpr _Head_base(_Head_base&&) = default;
112
113 template<typename _UHead>
114 constexpr _Head_base(_UHead&& __h)
115 : _M_head_impl(std::forward<_UHead>(__h)) { }
116
117 _Head_base(allocator_arg_t, __uses_alloc0)
118 : _M_head_impl() { }
119
120 template<typename _Alloc>
121 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
122 : _M_head_impl(allocator_arg, *__a._M_a) { }
123
124 template<typename _Alloc>
125 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
126 : _M_head_impl(*__a._M_a) { }
127
128 template<typename _UHead>
129 _Head_base(__uses_alloc0, _UHead&& __uhead)
130 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
131
132 template<typename _Alloc, typename _UHead>
133 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
134 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
135 { }
136
137 template<typename _Alloc, typename _UHead>
138 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
139 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
140
141 static constexpr _Head&
142 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
143
144 static constexpr const _Head&
145 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
16
Returning pointer (reference to field '_M_head_impl')
146
147 _Head _M_head_impl;
148 };
149
150 /**
151 * Contains the actual implementation of the @c tuple template, stored
152 * as a recursive inheritance hierarchy from the first element (most
153 * derived class) to the last (least derived class). The @c Idx
154 * parameter gives the 0-based index of the element stored at this
155 * point in the hierarchy; we use it to implement a constant-time
156 * get() operation.
157 */
158 template<std::size_t _Idx, typename... _Elements>
159 struct _Tuple_impl;
160
161 template<typename _Tp>
162 struct __is_empty_non_tuple : is_empty<_Tp> { };
163
164 // Using EBO for elements that are tuples causes ambiguous base errors.
165 template<typename _El0, typename... _El>
166 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
167
168 // Use the Empty Base-class Optimization for empty, non-final types.
169 template<typename _Tp>
170 using __empty_not_final
171 = typename conditional<__is_final(_Tp), false_type,
172 __is_empty_non_tuple<_Tp>>::type;
173
174 /**
175 * Recursive tuple implementation. Here we store the @c Head element
176 * and derive from a @c Tuple_impl containing the remaining elements
177 * (which contains the @c Tail).
178 */
179 template<std::size_t _Idx, typename _Head, typename... _Tail>
180 struct _Tuple_impl<_Idx, _Head, _Tail...>
181 : public _Tuple_impl<_Idx + 1, _Tail...>,
182 private _Head_base<_Idx, _Head, __empty_not_final<_Head>::value>
183 {
184 template<std::size_t, typename...> friend class _Tuple_impl;
185
186 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
187 typedef _Head_base<_Idx, _Head, __empty_not_final<_Head>::value> _Base;
188
189 static constexpr _Head&
190 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
191
192 static constexpr const _Head&
193 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
15
Calling '_Head_base::_M_head'
17
Returning from '_Head_base::_M_head'
18
Returning pointer (reference to field '_M_head_impl')
194
195 static constexpr _Inherited&
196 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
197
198 static constexpr const _Inherited&
199 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
200
201 constexpr _Tuple_impl()
202 : _Inherited(), _Base() { }
203
204 explicit
205 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
206 : _Inherited(__tail...), _Base(__head) { }
207
208 template<typename _UHead, typename... _UTail, typename = typename
209 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
210 explicit
211 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
212 : _Inherited(std::forward<_UTail>(__tail)...),
213 _Base(std::forward<_UHead>(__head)) { }
214
215 constexpr _Tuple_impl(const _Tuple_impl&) = default;
216
217 constexpr
218 _Tuple_impl(_Tuple_impl&& __in)
219 noexcept(__and_<is_nothrow_move_constructible<_Head>,
220 is_nothrow_move_constructible<_Inherited>>::value)
221 : _Inherited(std::move(_M_tail(__in))),
222 _Base(std::forward<_Head>(_M_head(__in))) { }
223
224 template<typename... _UElements>
225 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
226 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
227 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
228
229 template<typename _UHead, typename... _UTails>
230 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
231 : _Inherited(std::move
232 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
233 _Base(std::forward<_UHead>
234 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
235
236 template<typename _Alloc>
237 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
238 : _Inherited(__tag, __a),
239 _Base(__tag, __use_alloc<_Head>(__a)) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
243 const _Head& __head, const _Tail&... __tail)
244 : _Inherited(__tag, __a, __tail...),
245 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
246
247 template<typename _Alloc, typename _UHead, typename... _UTail,
248 typename = typename enable_if<sizeof...(_Tail)
249 == sizeof...(_UTail)>::type>
250 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
251 _UHead&& __head, _UTail&&... __tail)
252 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
253 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
254 std::forward<_UHead>(__head)) { }
255
256 template<typename _Alloc>
257 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
258 const _Tuple_impl& __in)
259 : _Inherited(__tag, __a, _M_tail(__in)),
260 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
261
262 template<typename _Alloc>
263 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
264 _Tuple_impl&& __in)
265 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
266 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
267 std::forward<_Head>(_M_head(__in))) { }
268
269 template<typename _Alloc, typename... _UElements>
270 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
271 const _Tuple_impl<_Idx, _UElements...>& __in)
272 : _Inherited(__tag, __a,
273 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
274 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
275 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
276
277 template<typename _Alloc, typename _UHead, typename... _UTails>
278 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
279 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
280 : _Inherited(__tag, __a, std::move
281 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
282 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
283 std::forward<_UHead>
284 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
285
286 _Tuple_impl&
287 operator=(const _Tuple_impl& __in)
288 {
289 _M_head(*this) = _M_head(__in);
290 _M_tail(*this) = _M_tail(__in);
291 return *this;
292 }
293
294 _Tuple_impl&
295 operator=(_Tuple_impl&& __in)
296 noexcept(__and_<is_nothrow_move_assignable<_Head>,
297 is_nothrow_move_assignable<_Inherited>>::value)
298 {
299 _M_head(*this) = std::forward<_Head>(_M_head(__in));
300 _M_tail(*this) = std::move(_M_tail(__in));
301 return *this;
302 }
303
304 template<typename... _UElements>
305 _Tuple_impl&
306 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
307 {
308 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
309 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
310 return *this;
311 }
312
313 template<typename _UHead, typename... _UTails>
314 _Tuple_impl&
315 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
316 {
317 _M_head(*this) = std::forward<_UHead>
318 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
319 _M_tail(*this) = std::move
320 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
321 return *this;
322 }
323
324 protected:
325 void
326 _M_swap(_Tuple_impl& __in)
327 noexcept(__is_nothrow_swappable<_Head>::value
328 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
329 {
330 using std::swap;
331 swap(_M_head(*this), _M_head(__in));
332 _Inherited::_M_swap(_M_tail(__in));
333 }
334 };
335
336 // Basis case of inheritance recursion.
337 template<std::size_t _Idx, typename _Head>
338 struct _Tuple_impl<_Idx, _Head>
339 : private _Head_base<_Idx, _Head, __empty_not_final<_Head>::value>
340 {
341 template<std::size_t, typename...> friend class _Tuple_impl;
342
343 typedef _Head_base<_Idx, _Head, __empty_not_final<_Head>::value> _Base;
344
345 static constexpr _Head&
346 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
347
348 static constexpr const _Head&
349 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
350
351 constexpr _Tuple_impl()
352 : _Base() { }
353
354 explicit
355 constexpr _Tuple_impl(const _Head& __head)
356 : _Base(__head) { }
357
358 template<typename _UHead>
359 explicit
360 constexpr _Tuple_impl(_UHead&& __head)
361 : _Base(std::forward<_UHead>(__head)) { }
362
363 constexpr _Tuple_impl(const _Tuple_impl&) = default;
364
365 constexpr
366 _Tuple_impl(_Tuple_impl&& __in)
367 noexcept(is_nothrow_move_constructible<_Head>::value)
368 : _Base(std::forward<_Head>(_M_head(__in))) { }
369
370 template<typename _UHead>
371 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
372 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
373
374 template<typename _UHead>
375 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
376 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
377 { }
378
379 template<typename _Alloc>
380 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
381 : _Base(__tag, __use_alloc<_Head>(__a)) { }
382
383 template<typename _Alloc>
384 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
385 const _Head& __head)
386 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
387
388 template<typename _Alloc, typename _UHead>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 _UHead&& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
392 std::forward<_UHead>(__head)) { }
393
394 template<typename _Alloc>
395 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
396 const _Tuple_impl& __in)
397 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 _Tuple_impl&& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
403 std::forward<_Head>(_M_head(__in))) { }
404
405 template<typename _Alloc, typename _UHead>
406 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
407 const _Tuple_impl<_Idx, _UHead>& __in)
408 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
409 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
410
411 template<typename _Alloc, typename _UHead>
412 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
413 _Tuple_impl<_Idx, _UHead>&& __in)
414 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
415 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
416 { }
417
418 _Tuple_impl&
419 operator=(const _Tuple_impl& __in)
420 {
421 _M_head(*this) = _M_head(__in);
422 return *this;
423 }
424
425 _Tuple_impl&
426 operator=(_Tuple_impl&& __in)
427 noexcept(is_nothrow_move_assignable<_Head>::value)
428 {
429 _M_head(*this) = std::forward<_Head>(_M_head(__in));
430 return *this;
431 }
432
433 template<typename _UHead>
434 _Tuple_impl&
435 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
436 {
437 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
438 return *this;
439 }
440
441 template<typename _UHead>
442 _Tuple_impl&
443 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
444 {
445 _M_head(*this)
446 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
447 return *this;
448 }
449
450 protected:
451 void
452 _M_swap(_Tuple_impl& __in)
453 noexcept(__is_nothrow_swappable<_Head>::value)
454 {
455 using std::swap;
456 swap(_M_head(*this), _M_head(__in));
457 }
458 };
459
460 template<typename... _Elements>
461 class tuple;
462
463 // Concept utility functions, reused in conditionally-explicit
464 // constructors.
465 template<bool, typename... _Elements>
466 struct _TC
467 {
468 template<typename... _UElements>
469 static constexpr bool _ConstructibleTuple()
470 {
471 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
472 }
473
474 template<typename... _UElements>
475 static constexpr bool _ImplicitlyConvertibleTuple()
476 {
477 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
478 }
479
480 template<typename... _UElements>
481 static constexpr bool _MoveConstructibleTuple()
482 {
483 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
484 }
485
486 template<typename... _UElements>
487 static constexpr bool _ImplicitlyMoveConvertibleTuple()
488 {
489 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
490 }
491
492 template<typename _SrcTuple>
493 static constexpr bool _NonNestedTuple()
494 {
495 return __and_<__not_<is_same<tuple<_Elements...>,
496 typename remove_cv<
497 typename remove_reference<_SrcTuple>::type
498 >::type>>,
499 __not_<is_convertible<_SrcTuple, _Elements...>>,
500 __not_<is_constructible<_Elements..., _SrcTuple>>
501 >::value;
502 }
503 template<typename... _UElements>
504 static constexpr bool _NotSameTuple()
505 {
506 return __not_<is_same<tuple<_Elements...>,
507 typename remove_const<
508 typename remove_reference<_UElements...>::type
509 >::type>>::value;
510 }
511 };
512
513 template<typename... _Elements>
514 struct _TC<false, _Elements...>
515 {
516 template<typename... _UElements>
517 static constexpr bool _ConstructibleTuple()
518 {
519 return false;
520 }
521
522 template<typename... _UElements>
523 static constexpr bool _ImplicitlyConvertibleTuple()
524 {
525 return false;
526 }
527
528 template<typename... _UElements>
529 static constexpr bool _MoveConstructibleTuple()
530 {
531 return false;
532 }
533
534 template<typename... _UElements>
535 static constexpr bool _ImplicitlyMoveConvertibleTuple()
536 {
537 return false;
538 }
539
540 template<typename... _UElements>
541 static constexpr bool _NonNestedTuple()
542 {
543 return true;
544 }
545 template<typename... _UElements>
546 static constexpr bool _NotSameTuple()
547 {
548 return true;
549 }
550 };
551
552 /// Primary class template, tuple
553 template<typename... _Elements>
554 class tuple : public _Tuple_impl<0, _Elements...>
555 {
556 typedef _Tuple_impl<0, _Elements...> _Inherited;
557
558 // Used for constraining the default constructor so
559 // that it becomes dependent on the constraints.
560 template<typename _Dummy>
561 struct _TC2
562 {
563 static constexpr bool _DefaultConstructibleTuple()
564 {
565 return __and_<is_default_constructible<_Elements>...>::value;
566 }
567 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
568 {
569 return __and_<__is_implicitly_default_constructible<_Elements>...>
570 ::value;
571 }
572 };
573
574 public:
575 template<typename _Dummy = void,
576 typename enable_if<_TC2<_Dummy>::
577 _ImplicitlyDefaultConstructibleTuple(),
578 bool>::type = true>
579 constexpr tuple()
580 : _Inherited() { }
581
582 template<typename _Dummy = void,
583 typename enable_if<_TC2<_Dummy>::
584 _DefaultConstructibleTuple()
585 &&
586 !_TC2<_Dummy>::
587 _ImplicitlyDefaultConstructibleTuple(),
588 bool>::type = false>
589 explicit constexpr tuple()
590 : _Inherited() { }
591
592 // Shortcut for the cases where constructors taking _Elements...
593 // need to be constrained.
594 template<typename _Dummy> using _TCC =
595 _TC<is_same<_Dummy, void>::value,
596 _Elements...>;
597
598 template<typename _Dummy = void,
599 typename enable_if<
600 _TCC<_Dummy>::template
601 _ConstructibleTuple<_Elements...>()
602 && _TCC<_Dummy>::template
603 _ImplicitlyConvertibleTuple<_Elements...>()
604 && (sizeof...(_Elements) >= 1),
605 bool>::type=true>
606 constexpr tuple(const _Elements&... __elements)
607 : _Inherited(__elements...) { }
608
609 template<typename _Dummy = void,
610 typename enable_if<
611 _TCC<_Dummy>::template
612 _ConstructibleTuple<_Elements...>()
613 && !_TCC<_Dummy>::template
614 _ImplicitlyConvertibleTuple<_Elements...>()
615 && (sizeof...(_Elements) >= 1),
616 bool>::type=false>
617 explicit constexpr tuple(const _Elements&... __elements)
618 : _Inherited(__elements...) { }
619
620 // Shortcut for the cases where constructors taking _UElements...
621 // need to be constrained.
622 template<typename... _UElements> using _TMC =
623 _TC<(sizeof...(_Elements) == sizeof...(_UElements)),
624 _Elements...>;
625
626 template<typename... _UElements, typename
627 enable_if<
628 _TC<sizeof...(_UElements) == 1, _Elements...>::template
629 _NotSameTuple<_UElements...>()
630 && _TMC<_UElements...>::template
631 _MoveConstructibleTuple<_UElements...>()
632 && _TMC<_UElements...>::template
633 _ImplicitlyMoveConvertibleTuple<_UElements...>()
634 && (sizeof...(_Elements) >= 1),
635 bool>::type=true>
636 constexpr tuple(_UElements&&... __elements)
637 : _Inherited(std::forward<_UElements>(__elements)...) { }
638
639 template<typename... _UElements, typename
640 enable_if<
641 _TC<sizeof...(_UElements) == 1, _Elements...>::template
642 _NotSameTuple<_UElements...>()
643 && _TMC<_UElements...>::template
644 _MoveConstructibleTuple<_UElements...>()
645 && !_TMC<_UElements...>::template
646 _ImplicitlyMoveConvertibleTuple<_UElements...>()
647 && (sizeof...(_Elements) >= 1),
648 bool>::type=false>
649 explicit constexpr tuple(_UElements&&... __elements)
650 : _Inherited(std::forward<_UElements>(__elements)...) { }
651
652 constexpr tuple(const tuple&) = default;
653
654 constexpr tuple(tuple&&) = default;
655
656 // Shortcut for the cases where constructors taking tuples
657 // must avoid creating temporaries.
658 template<typename _Dummy> using _TNTC =
659 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
660 _Elements...>;
661
662 template<typename... _UElements, typename _Dummy = void, typename
663 enable_if<_TMC<_UElements...>::template
664 _ConstructibleTuple<_UElements...>()
665 && _TMC<_UElements...>::template
666 _ImplicitlyConvertibleTuple<_UElements...>()
667 && _TNTC<_Dummy>::template
668 _NonNestedTuple<const tuple<_UElements...>&>(),
669 bool>::type=true>
670 constexpr tuple(const tuple<_UElements...>& __in)
671 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
672 { }
673
674 template<typename... _UElements, typename _Dummy = void, typename
675 enable_if<_TMC<_UElements...>::template
676 _ConstructibleTuple<_UElements...>()
677 && !_TMC<_UElements...>::template
678 _ImplicitlyConvertibleTuple<_UElements...>()
679 && _TNTC<_Dummy>::template
680 _NonNestedTuple<const tuple<_UElements...>&>(),
681 bool>::type=false>
682 explicit constexpr tuple(const tuple<_UElements...>& __in)
683 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
684 { }
685
686 template<typename... _UElements, typename _Dummy = void, typename
687 enable_if<_TMC<_UElements...>::template
688 _MoveConstructibleTuple<_UElements...>()
689 && _TMC<_UElements...>::template
690 _ImplicitlyMoveConvertibleTuple<_UElements...>()
691 && _TNTC<_Dummy>::template
692 _NonNestedTuple<tuple<_UElements...>&&>(),
693 bool>::type=true>
694 constexpr tuple(tuple<_UElements...>&& __in)
695 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
696
697 template<typename... _UElements, typename _Dummy = void, typename
698 enable_if<_TMC<_UElements...>::template
699 _MoveConstructibleTuple<_UElements...>()
700 && !_TMC<_UElements...>::template
701 _ImplicitlyMoveConvertibleTuple<_UElements...>()
702 && _TNTC<_Dummy>::template
703 _NonNestedTuple<tuple<_UElements...>&&>(),
704 bool>::type=false>
705 explicit constexpr tuple(tuple<_UElements...>&& __in)
706 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
707
708 // Allocator-extended constructors.
709
710 template<typename _Alloc>
711 tuple(allocator_arg_t __tag, const _Alloc& __a)
712 : _Inherited(__tag, __a) { }
713
714 template<typename _Alloc, typename _Dummy = void,
715 typename enable_if<
716 _TCC<_Dummy>::template
717 _ConstructibleTuple<_Elements...>()
718 && _TCC<_Dummy>::template
719 _ImplicitlyConvertibleTuple<_Elements...>(),
720 bool>::type=true>
721 tuple(allocator_arg_t __tag, const _Alloc& __a,
722 const _Elements&... __elements)
723 : _Inherited(__tag, __a, __elements...) { }
724
725 template<typename _Alloc, typename _Dummy = void,
726 typename enable_if<
727 _TCC<_Dummy>::template
728 _ConstructibleTuple<_Elements...>()
729 && !_TCC<_Dummy>::template
730 _ImplicitlyConvertibleTuple<_Elements...>(),
731 bool>::type=false>
732 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
733 const _Elements&... __elements)
734 : _Inherited(__tag, __a, __elements...) { }
735
736 template<typename _Alloc, typename... _UElements, typename
737 enable_if<_TMC<_UElements...>::template
738 _MoveConstructibleTuple<_UElements...>()
739 && _TMC<_UElements...>::template
740 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
741 bool>::type=true>
742 tuple(allocator_arg_t __tag, const _Alloc& __a,
743 _UElements&&... __elements)
744 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
745 { }
746
747 template<typename _Alloc, typename... _UElements, typename
748 enable_if<_TMC<_UElements...>::template
749 _MoveConstructibleTuple<_UElements...>()
750 && !_TMC<_UElements...>::template
751 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
752 bool>::type=false>
753 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
754 _UElements&&... __elements)
755 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
756 { }
757
758 template<typename _Alloc>
759 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
760 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
761
762 template<typename _Alloc>
763 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
764 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
765
766 template<typename _Alloc, typename... _UElements, typename
767 enable_if<_TMC<_UElements...>::template
768 _ConstructibleTuple<_UElements...>()
769 && _TMC<_UElements...>::template
770 _ImplicitlyConvertibleTuple<_UElements...>(),
771 bool>::type=true>
772 tuple(allocator_arg_t __tag, const _Alloc& __a,
773 const tuple<_UElements...>& __in)
774 : _Inherited(__tag, __a,
775 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
776 { }
777
778 template<typename _Alloc, typename... _UElements, typename
779 enable_if<_TMC<_UElements...>::template
780 _ConstructibleTuple<_UElements...>()
781 && !_TMC<_UElements...>::template
782 _ImplicitlyConvertibleTuple<_UElements...>(),
783 bool>::type=false>
784 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
785 const tuple<_UElements...>& __in)
786 : _Inherited(__tag, __a,
787 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
788 { }
789
790 template<typename _Alloc, typename... _UElements, typename
791 enable_if<_TMC<_UElements...>::template
792 _MoveConstructibleTuple<_UElements...>()
793 && _TMC<_UElements...>::template
794 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
795 bool>::type=true>
796 tuple(allocator_arg_t __tag, const _Alloc& __a,
797 tuple<_UElements...>&& __in)
798 : _Inherited(__tag, __a,
799 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
800 { }
801
802 template<typename _Alloc, typename... _UElements, typename
803 enable_if<_TMC<_UElements...>::template
804 _MoveConstructibleTuple<_UElements...>()
805 && !_TMC<_UElements...>::template
806 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
807 bool>::type=false>
808 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
809 tuple<_UElements...>&& __in)
810 : _Inherited(__tag, __a,
811 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
812 { }
813
814 tuple&
815 operator=(const tuple& __in)
816 {
817 static_cast<_Inherited&>(*this) = __in;
818 return *this;
819 }
820
821 tuple&
822 operator=(tuple&& __in)
823 noexcept(is_nothrow_move_assignable<_Inherited>::value)
824 {
825 static_cast<_Inherited&>(*this) = std::move(__in);
826 return *this;
827 }
828
829 template<typename... _UElements, typename = typename
830 enable_if<sizeof...(_UElements)
831 == sizeof...(_Elements)>::type>
832 tuple&
833 operator=(const tuple<_UElements...>& __in)
834 {
835 static_cast<_Inherited&>(*this) = __in;
836 return *this;
837 }
838
839 template<typename... _UElements, typename = typename
840 enable_if<sizeof...(_UElements)
841 == sizeof...(_Elements)>::type>
842 tuple&
843 operator=(tuple<_UElements...>&& __in)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 void
850 swap(tuple& __in)
851 noexcept(noexcept(__in._M_swap(__in)))
852 { _Inherited::_M_swap(__in); }
853 };
854
855 // Explicit specialization, zero-element tuple.
856 template<>
857 class tuple<>
858 {
859 public:
860 void swap(tuple&) noexcept { /* no-op */ }
861 };
862
863 /// Partial specialization, 2-element tuple.
864 /// Includes construction and assignment from a pair.
865 template<typename _T1, typename _T2>
866 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
867 {
868 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
869
870 public:
871 template <typename _U1 = _T1,
872 typename _U2 = _T2,
873 typename enable_if<__and_<
874 __is_implicitly_default_constructible<_U1>,
875 __is_implicitly_default_constructible<_U2>>
876 ::value, bool>::type = true>
877
878 constexpr tuple()
879 : _Inherited() { }
880
881 template <typename _U1 = _T1,
882 typename _U2 = _T2,
883 typename enable_if<
884 __and_<
885 is_default_constructible<_U1>,
886 is_default_constructible<_U2>,
887 __not_<
888 __and_<__is_implicitly_default_constructible<_U1>,
889 __is_implicitly_default_constructible<_U2>>>>
890 ::value, bool>::type = false>
891
892 explicit constexpr tuple()
893 : _Inherited() { }
894
895 // Shortcut for the cases where constructors taking _T1, _T2
896 // need to be constrained.
897 template<typename _Dummy> using _TCC =
898 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
899
900 template<typename _Dummy = void, typename
901 enable_if<_TCC<_Dummy>::template
902 _ConstructibleTuple<_T1, _T2>()
903 && _TCC<_Dummy>::template
904 _ImplicitlyConvertibleTuple<_T1, _T2>(),
905 bool>::type = true>
906 constexpr tuple(const _T1& __a1, const _T2& __a2)
907 : _Inherited(__a1, __a2) { }
908
909 template<typename _Dummy = void, typename
910 enable_if<_TCC<_Dummy>::template
911 _ConstructibleTuple<_T1, _T2>()
912 && !_TCC<_Dummy>::template
913 _ImplicitlyConvertibleTuple<_T1, _T2>(),
914 bool>::type = false>
915 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
916 : _Inherited(__a1, __a2) { }
917
918 // Shortcut for the cases where constructors taking _U1, _U2
919 // need to be constrained.
920 using _TMC = _TC<true, _T1, _T2>;
921
922 template<typename _U1, typename _U2, typename
923 enable_if<_TMC::template
924 _MoveConstructibleTuple<_U1, _U2>()
925 && _TMC::template
926 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
927 && !is_same<typename decay<_U1>::type,
928 allocator_arg_t>::value,
929 bool>::type = true>
930 constexpr tuple(_U1&& __a1, _U2&& __a2)
931 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
932
933 template<typename _U1, typename _U2, typename
934 enable_if<_TMC::template
935 _MoveConstructibleTuple<_U1, _U2>()
936 && !_TMC::template
937 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
938 && !is_same<typename decay<_U1>::type,
939 allocator_arg_t>::value,
940 bool>::type = false>
941 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
942 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
943
944 constexpr tuple(const tuple&) = default;
945
946 constexpr tuple(tuple&&) = default;
947
948 template<typename _U1, typename _U2, typename
949 enable_if<_TMC::template
950 _ConstructibleTuple<_U1, _U2>()
951 && _TMC::template
952 _ImplicitlyConvertibleTuple<_U1, _U2>(),
953 bool>::type = true>
954 constexpr tuple(const tuple<_U1, _U2>& __in)
955 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
956
957 template<typename _U1, typename _U2, typename
958 enable_if<_TMC::template
959 _ConstructibleTuple<_U1, _U2>()
960 && !_TMC::template
961 _ImplicitlyConvertibleTuple<_U1, _U2>(),
962 bool>::type = false>
963 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
964 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
965
966 template<typename _U1, typename _U2, typename
967 enable_if<_TMC::template
968 _MoveConstructibleTuple<_U1, _U2>()
969 && _TMC::template
970 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
971 bool>::type = true>
972 constexpr tuple(tuple<_U1, _U2>&& __in)
973 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
974
975 template<typename _U1, typename _U2, typename
976 enable_if<_TMC::template
977 _MoveConstructibleTuple<_U1, _U2>()
978 && !_TMC::template
979 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
980 bool>::type = false>
981 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
982 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
983
984 template<typename _U1, typename _U2, typename
985 enable_if<_TMC::template
986 _ConstructibleTuple<_U1, _U2>()
987 && _TMC::template
988 _ImplicitlyConvertibleTuple<_U1, _U2>(),
989 bool>::type = true>
990 constexpr tuple(const pair<_U1, _U2>& __in)
991 : _Inherited(__in.first, __in.second) { }
992
993 template<typename _U1, typename _U2, typename
994 enable_if<_TMC::template
995 _ConstructibleTuple<_U1, _U2>()
996 && !_TMC::template
997 _ImplicitlyConvertibleTuple<_U1, _U2>(),
998 bool>::type = false>
999 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1000 : _Inherited(__in.first, __in.second) { }
1001
1002 template<typename _U1, typename _U2, typename
1003 enable_if<_TMC::template
1004 _MoveConstructibleTuple<_U1, _U2>()
1005 && _TMC::template
1006 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1007 bool>::type = true>
1008 constexpr tuple(pair<_U1, _U2>&& __in)
1009 : _Inherited(std::forward<_U1>(__in.first),
1010 std::forward<_U2>(__in.second)) { }
1011
1012 template<typename _U1, typename _U2, typename
1013 enable_if<_TMC::template
1014 _MoveConstructibleTuple<_U1, _U2>()
1015 && !_TMC::template
1016 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1017 bool>::type = false>
1018 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1019 : _Inherited(std::forward<_U1>(__in.first),
1020 std::forward<_U2>(__in.second)) { }
1021
1022 // Allocator-extended constructors.
1023
1024 template<typename _Alloc>
1025 tuple(allocator_arg_t __tag, const _Alloc& __a)
1026 : _Inherited(__tag, __a) { }
1027
1028 template<typename _Alloc, typename _Dummy = void,
1029 typename enable_if<
1030 _TCC<_Dummy>::template
1031 _ConstructibleTuple<_T1, _T2>()
1032 && _TCC<_Dummy>::template
1033 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1034 bool>::type=true>
1035
1036 tuple(allocator_arg_t __tag, const _Alloc& __a,
1037 const _T1& __a1, const _T2& __a2)
1038 : _Inherited(__tag, __a, __a1, __a2) { }
1039
1040 template<typename _Alloc, typename _Dummy = void,
1041 typename enable_if<
1042 _TCC<_Dummy>::template
1043 _ConstructibleTuple<_T1, _T2>()
1044 && !_TCC<_Dummy>::template
1045 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1046 bool>::type=false>
1047
1048 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1049 const _T1& __a1, const _T2& __a2)
1050 : _Inherited(__tag, __a, __a1, __a2) { }
1051
1052 template<typename _Alloc, typename _U1, typename _U2, typename
1053 enable_if<_TMC::template
1054 _MoveConstructibleTuple<_U1, _U2>()
1055 && _TMC::template
1056 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1057 bool>::type = true>
1058 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1059 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1060 std::forward<_U2>(__a2)) { }
1061
1062 template<typename _Alloc, typename _U1, typename _U2, typename
1063 enable_if<_TMC::template
1064 _MoveConstructibleTuple<_U1, _U2>()
1065 && !_TMC::template
1066 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1067 bool>::type = false>
1068 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1069 _U1&& __a1, _U2&& __a2)
1070 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1071 std::forward<_U2>(__a2)) { }
1072
1073 template<typename _Alloc>
1074 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1075 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1076
1077 template<typename _Alloc>
1078 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1079 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1080
1081 template<typename _Alloc, typename _U1, typename _U2, typename
1082 enable_if<_TMC::template
1083 _ConstructibleTuple<_U1, _U2>()
1084 && _TMC::template
1085 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1086 bool>::type = true>
1087 tuple(allocator_arg_t __tag, const _Alloc& __a,
1088 const tuple<_U1, _U2>& __in)
1089 : _Inherited(__tag, __a,
1090 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1091 { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _ConstructibleTuple<_U1, _U2>()
1096 && !_TMC::template
1097 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1098 bool>::type = false>
1099 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1100 const tuple<_U1, _U2>& __in)
1101 : _Inherited(__tag, __a,
1102 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1103 { }
1104
1105 template<typename _Alloc, typename _U1, typename _U2, typename
1106 enable_if<_TMC::template
1107 _MoveConstructibleTuple<_U1, _U2>()
1108 && _TMC::template
1109 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1110 bool>::type = true>
1111 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1112 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1113 { }
1114
1115 template<typename _Alloc, typename _U1, typename _U2, typename
1116 enable_if<_TMC::template
1117 _MoveConstructibleTuple<_U1, _U2>()
1118 && !_TMC::template
1119 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1120 bool>::type = false>
1121 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1122 tuple<_U1, _U2>&& __in)
1123 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1124 { }
1125
1126 template<typename _Alloc, typename _U1, typename _U2, typename
1127 enable_if<_TMC::template
1128 _ConstructibleTuple<_U1, _U2>()
1129 && _TMC::template
1130 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1131 bool>::type = true>
1132 tuple(allocator_arg_t __tag, const _Alloc& __a,
1133 const pair<_U1, _U2>& __in)
1134 : _Inherited(__tag, __a, __in.first, __in.second) { }
1135
1136 template<typename _Alloc, typename _U1, typename _U2, typename
1137 enable_if<_TMC::template
1138 _ConstructibleTuple<_U1, _U2>()
1139 && !_TMC::template
1140 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1141 bool>::type = false>
1142 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1143 const pair<_U1, _U2>& __in)
1144 : _Inherited(__tag, __a, __in.first, __in.second) { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1154 std::forward<_U2>(__in.second)) { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 pair<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1165 std::forward<_U2>(__in.second)) { }
1166
1167 tuple&
1168 operator=(const tuple& __in)
1169 {
1170 static_cast<_Inherited&>(*this) = __in;
1171 return *this;
1172 }
1173
1174 tuple&
1175 operator=(tuple&& __in)
1176 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1177 {
1178 static_cast<_Inherited&>(*this) = std::move(__in);
1179 return *this;
1180 }
1181
1182 template<typename _U1, typename _U2>
1183 tuple&
1184 operator=(const tuple<_U1, _U2>& __in)
1185 {
1186 static_cast<_Inherited&>(*this) = __in;
1187 return *this;
1188 }
1189
1190 template<typename _U1, typename _U2>
1191 tuple&
1192 operator=(tuple<_U1, _U2>&& __in)
1193 {
1194 static_cast<_Inherited&>(*this) = std::move(__in);
1195 return *this;
1196 }
1197
1198 template<typename _U1, typename _U2>
1199 tuple&
1200 operator=(const pair<_U1, _U2>& __in)
1201 {
1202 this->_M_head(*this) = __in.first;
1203 this->_M_tail(*this)._M_head(*this) = __in.second;
1204 return *this;
1205 }
1206
1207 template<typename _U1, typename _U2>
1208 tuple&
1209 operator=(pair<_U1, _U2>&& __in)
1210 {
1211 this->_M_head(*this) = std::forward<_U1>(__in.first);
1212 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1213 return *this;
1214 }
1215
1216 void
1217 swap(tuple& __in)
1218 noexcept(noexcept(__in._M_swap(__in)))
1219 { _Inherited::_M_swap(__in); }
1220 };
1221
1222
1223 /**
1224 * Recursive case for tuple_element: strip off the first element in
1225 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1226 */
1227 template<std::size_t __i, typename _Head, typename... _Tail>
1228 struct tuple_element<__i, tuple<_Head, _Tail...> >
1229 : tuple_element<__i - 1, tuple<_Tail...> > { };
1230
1231 /**
1232 * Basis case for tuple_element: The first element is the one we're seeking.
1233 */
1234 template<typename _Head, typename... _Tail>
1235 struct tuple_element<0, tuple<_Head, _Tail...> >
1236 {
1237 typedef _Head type;
1238 };
1239
1240 /// class tuple_size
1241 template<typename... _Elements>
1242 struct tuple_size<tuple<_Elements...>>
1243 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1244
1245 template<std::size_t __i, typename _Head, typename... _Tail>
1246 constexpr _Head&
1247 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1248 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1249
1250 template<std::size_t __i, typename _Head, typename... _Tail>
1251 constexpr const _Head&
1252 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1253 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
14
Calling '_Tuple_impl::_M_head'
19
Returning from '_Tuple_impl::_M_head'
20
Returning pointer (reference to field '_M_head_impl')
1254
1255 /// Return a reference to the ith element of a tuple.
1256 template<std::size_t __i, typename... _Elements>
1257 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1258 get(tuple<_Elements...>& __t) noexcept
1259 { return std::__get_helper<__i>(__t); }
1260
1261 /// Return a const reference to the ith element of a const tuple.
1262 template<std::size_t __i, typename... _Elements>
1263 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1264 get(const tuple<_Elements...>& __t) noexcept
1265 { return std::__get_helper<__i>(__t); }
13
Calling '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
21
Returning from '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
22
Returning pointer (reference to field '_M_head_impl')
1266
1267 /// Return an rvalue reference to the ith element of a tuple rvalue.
1268 template<std::size_t __i, typename... _Elements>
1269 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1270 get(tuple<_Elements...>&& __t) noexcept
1271 {
1272 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1273 return std::forward<__element_type&&>(std::get<__i>(__t));
1274 }
1275
1276#if __cplusplus201103L > 201103L
1277
1278#define __cpp_lib_tuples_by_type 201304
1279
1280 template<typename _Head, size_t __i, typename... _Tail>
1281 constexpr _Head&
1282 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1283 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1284
1285 template<typename _Head, size_t __i, typename... _Tail>
1286 constexpr const _Head&
1287 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1288 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1289
1290 /// Return a reference to the unique element of type _Tp of a tuple.
1291 template <typename _Tp, typename... _Types>
1292 constexpr _Tp&
1293 get(tuple<_Types...>& __t) noexcept
1294 { return std::__get_helper2<_Tp>(__t); }
1295
1296 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1297 template <typename _Tp, typename... _Types>
1298 constexpr _Tp&&
1299 get(tuple<_Types...>&& __t) noexcept
1300 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1301
1302 /// Return a const reference to the unique element of type _Tp of a tuple.
1303 template <typename _Tp, typename... _Types>
1304 constexpr const _Tp&
1305 get(const tuple<_Types...>& __t) noexcept
1306 { return std::__get_helper2<_Tp>(__t); }
1307#endif
1308
1309 // This class performs the comparison operations on tuples
1310 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1311 struct __tuple_compare
1312 {
1313 static constexpr bool
1314 __eq(const _Tp& __t, const _Up& __u)
1315 {
1316 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1317 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1318 }
1319
1320 static constexpr bool
1321 __less(const _Tp& __t, const _Up& __u)
1322 {
1323 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1324 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1325 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1326 }
1327 };
1328
1329 template<typename _Tp, typename _Up, size_t __size>
1330 struct __tuple_compare<_Tp, _Up, __size, __size>
1331 {
1332 static constexpr bool
1333 __eq(const _Tp&, const _Up&) { return true; }
1334
1335 static constexpr bool
1336 __less(const _Tp&, const _Up&) { return false; }
1337 };
1338
1339 template<typename... _TElements, typename... _UElements>
1340 constexpr bool
1341 operator==(const tuple<_TElements...>& __t,
1342 const tuple<_UElements...>& __u)
1343 {
1344 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1345 "tuple objects can only be compared if they have equal sizes.");
1346 using __compare = __tuple_compare<tuple<_TElements...>,
1347 tuple<_UElements...>,
1348 0, sizeof...(_TElements)>;
1349 return __compare::__eq(__t, __u);
1350 }
1351
1352 template<typename... _TElements, typename... _UElements>
1353 constexpr bool
1354 operator<(const tuple<_TElements...>& __t,
1355 const tuple<_UElements...>& __u)
1356 {
1357 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1358 "tuple objects can only be compared if they have equal sizes.");
1359 using __compare = __tuple_compare<tuple<_TElements...>,
1360 tuple<_UElements...>,
1361 0, sizeof...(_TElements)>;
1362 return __compare::__less(__t, __u);
1363 }
1364
1365 template<typename... _TElements, typename... _UElements>
1366 constexpr bool
1367 operator!=(const tuple<_TElements...>& __t,
1368 const tuple<_UElements...>& __u)
1369 { return !(__t == __u); }
1370
1371 template<typename... _TElements, typename... _UElements>
1372 constexpr bool
1373 operator>(const tuple<_TElements...>& __t,
1374 const tuple<_UElements...>& __u)
1375 { return __u < __t; }
1376
1377 template<typename... _TElements, typename... _UElements>
1378 constexpr bool
1379 operator<=(const tuple<_TElements...>& __t,
1380 const tuple<_UElements...>& __u)
1381 { return !(__u < __t); }
1382
1383 template<typename... _TElements, typename... _UElements>
1384 constexpr bool
1385 operator>=(const tuple<_TElements...>& __t,
1386 const tuple<_UElements...>& __u)
1387 { return !(__t < __u); }
1388
1389 // NB: DR 705.
1390 template<typename... _Elements>
1391 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1392 make_tuple(_Elements&&... __args)
1393 {
1394 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1395 __result_type;
1396 return __result_type(std::forward<_Elements>(__args)...);
1397 }
1398
1399 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1400 // 2275. Why is forward_as_tuple not constexpr?
1401 template<typename... _Elements>
1402 constexpr tuple<_Elements&&...>
1403 forward_as_tuple(_Elements&&... __args) noexcept
1404 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1405
1406 template<typename... _Tps>
1407 struct __is_tuple_like_impl<tuple<_Tps...>> : true_type
1408 { };
1409
1410 // Internal type trait that allows us to sfinae-protect tuple_cat.
1411 template<typename _Tp>
1412 struct __is_tuple_like
1413 : public __is_tuple_like_impl<typename std::remove_cv
1414 <typename std::remove_reference<_Tp>::type>::type>::type
1415 { };
1416
1417 template<size_t, typename, typename, size_t>
1418 struct __make_tuple_impl;
1419
1420 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1421 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1422 : __make_tuple_impl<_Idx + 1,
1423 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1424 _Tuple, _Nm>
1425 { };
1426
1427 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1428 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1429 {
1430 typedef tuple<_Tp...> __type;
1431 };
1432
1433 template<typename _Tuple>
1434 struct __do_make_tuple
1435 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1436 { };
1437
1438 // Returns the std::tuple equivalent of a tuple-like type.
1439 template<typename _Tuple>
1440 struct __make_tuple
1441 : public __do_make_tuple<typename std::remove_cv
1442 <typename std::remove_reference<_Tuple>::type>::type>
1443 { };
1444
1445 // Combines several std::tuple's into a single one.
1446 template<typename...>
1447 struct __combine_tuples;
1448
1449 template<>
1450 struct __combine_tuples<>
1451 {
1452 typedef tuple<> __type;
1453 };
1454
1455 template<typename... _Ts>
1456 struct __combine_tuples<tuple<_Ts...>>
1457 {
1458 typedef tuple<_Ts...> __type;
1459 };
1460
1461 template<typename... _T1s, typename... _T2s, typename... _Rem>
1462 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1463 {
1464 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1465 _Rem...>::__type __type;
1466 };
1467
1468 // Computes the result type of tuple_cat given a set of tuple-like types.
1469 template<typename... _Tpls>
1470 struct __tuple_cat_result
1471 {
1472 typedef typename __combine_tuples
1473 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1474 };
1475
1476 // Helper to determine the index set for the first tuple-like
1477 // type of a given set.
1478 template<typename...>
1479 struct __make_1st_indices;
1480
1481 template<>
1482 struct __make_1st_indices<>
1483 {
1484 typedef std::_Index_tuple<> __type;
1485 };
1486
1487 template<typename _Tp, typename... _Tpls>
1488 struct __make_1st_indices<_Tp, _Tpls...>
1489 {
1490 typedef typename std::_Build_index_tuple<std::tuple_size<
1491 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1492 };
1493
1494 // Performs the actual concatenation by step-wise expanding tuple-like
1495 // objects into the elements, which are finally forwarded into the
1496 // result tuple.
1497 template<typename _Ret, typename _Indices, typename... _Tpls>
1498 struct __tuple_concater;
1499
1500 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1501 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1502 {
1503 template<typename... _Us>
1504 static constexpr _Ret
1505 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1506 {
1507 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1508 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1509 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1510 std::forward<_Us>(__us)...,
1511 std::get<_Is>(std::forward<_Tp>(__tp))...);
1512 }
1513 };
1514
1515 template<typename _Ret>
1516 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1517 {
1518 template<typename... _Us>
1519 static constexpr _Ret
1520 _S_do(_Us&&... __us)
1521 {
1522 return _Ret(std::forward<_Us>(__us)...);
1523 }
1524 };
1525
1526 /// tuple_cat
1527 template<typename... _Tpls, typename = typename
1528 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1529 constexpr auto
1530 tuple_cat(_Tpls&&... __tpls)
1531 -> typename __tuple_cat_result<_Tpls...>::__type
1532 {
1533 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1534 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1535 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1536 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1537 }
1538
1539 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1540 // 2301. Why is tie not constexpr?
1541 /// tie
1542 template<typename... _Elements>
1543 constexpr tuple<_Elements&...>
1544 tie(_Elements&... __args) noexcept
1545 { return tuple<_Elements&...>(__args...); }
1546
1547 /// swap
1548 template<typename... _Elements>
1549 inline void
1550 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1551 noexcept(noexcept(__x.swap(__y)))
1552 { __x.swap(__y); }
1553
1554 // A class (and instance) which can be used in 'tie' when an element
1555 // of a tuple is not required
1556 struct _Swallow_assign
1557 {
1558 template<class _Tp>
1559 const _Swallow_assign&
1560 operator=(const _Tp&) const
1561 { return *this; }
1562 };
1563
1564 const _Swallow_assign ignore{};
1565
1566 /// Partial specialization for tuples
1567 template<typename... _Types, typename _Alloc>
1568 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1569
1570 // See stl_pair.h...
1571 template<class _T1, class _T2>
1572 template<typename... _Args1, typename... _Args2>
1573 inline
1574 pair<_T1, _T2>::
1575 pair(piecewise_construct_t,
1576 tuple<_Args1...> __first, tuple<_Args2...> __second)
1577 : pair(__first, __second,
1578 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1579 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1580 { }
1581
1582 template<class _T1, class _T2>
1583 template<typename... _Args1, std::size_t... _Indexes1,
1584 typename... _Args2, std::size_t... _Indexes2>
1585 inline
1586 pair<_T1, _T2>::
1587 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1588 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1589 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1590 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1591 { }
1592
1593 /// @}
1594
1595_GLIBCXX_END_NAMESPACE_VERSION
1596} // namespace std
1597
1598#endif // C++11
1599
1600#endif // _GLIBCXX_TUPLE