Bug Summary

File:lib/CodeGen/InlineSpiller.cpp
Warning:line 1272, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InlineSpiller.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn362543/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn362543=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-06-05-060531-1271-1 -x c++ /build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp -faddrsig

/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp

1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The inline spiller modifies the machine function directly instead of
10// inserting spills and restores in VirtRegMap.
11//
12//===----------------------------------------------------------------------===//
13
14#include "LiveRangeCalc.h"
15#include "Spiller.h"
16#include "SplitKit.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/CodeGen/LiveInterval.h"
28#include "llvm/CodeGen/LiveIntervals.h"
29#include "llvm/CodeGen/LiveRangeEdit.h"
30#include "llvm/CodeGen/LiveStacks.h"
31#include "llvm/CodeGen/MachineBasicBlock.h"
32#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
33#include "llvm/CodeGen/MachineDominators.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFunctionPass.h"
36#include "llvm/CodeGen/MachineInstr.h"
37#include "llvm/CodeGen/MachineInstrBuilder.h"
38#include "llvm/CodeGen/MachineInstrBundle.h"
39#include "llvm/CodeGen/MachineLoopInfo.h"
40#include "llvm/CodeGen/MachineOperand.h"
41#include "llvm/CodeGen/MachineRegisterInfo.h"
42#include "llvm/CodeGen/SlotIndexes.h"
43#include "llvm/CodeGen/TargetInstrInfo.h"
44#include "llvm/CodeGen/TargetOpcodes.h"
45#include "llvm/CodeGen/TargetRegisterInfo.h"
46#include "llvm/CodeGen/TargetSubtargetInfo.h"
47#include "llvm/CodeGen/VirtRegMap.h"
48#include "llvm/Config/llvm-config.h"
49#include "llvm/Support/BlockFrequency.h"
50#include "llvm/Support/BranchProbability.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Compiler.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/raw_ostream.h"
56#include <cassert>
57#include <iterator>
58#include <tuple>
59#include <utility>
60#include <vector>
61
62using namespace llvm;
63
64#define DEBUG_TYPE"regalloc" "regalloc"
65
66STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges"
, "Number of spilled live ranges", {0}, {false}}
;
67STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets"
, "Number of spilled snippets", {0}, {false}}
;
68STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted"
, {0}, {false}}
;
69STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved"
, "Number of spills removed", {0}, {false}}
;
70STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads"
, "Number of reloads inserted", {0}, {false}}
;
71STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved"
, "Number of reloads removed", {0}, {false}}
;
72STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses"
, {0}, {false}}
;
73STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads"
, "Number of folded loads", {0}, {false}}
;
74STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling"
, {0}, {false}}
;
75
76static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77 cl::desc("Disable inline spill hoisting"));
78static cl::opt<bool>
79RestrictStatepointRemat("restrict-statepoint-remat",
80 cl::init(false), cl::Hidden,
81 cl::desc("Restrict remat for statepoint operands"));
82
83namespace {
84
85class HoistSpillHelper : private LiveRangeEdit::Delegate {
86 MachineFunction &MF;
87 LiveIntervals &LIS;
88 LiveStacks &LSS;
89 AliasAnalysis *AA;
90 MachineDominatorTree &MDT;
91 MachineLoopInfo &Loops;
92 VirtRegMap &VRM;
93 MachineRegisterInfo &MRI;
94 const TargetInstrInfo &TII;
95 const TargetRegisterInfo &TRI;
96 const MachineBlockFrequencyInfo &MBFI;
97
98 InsertPointAnalysis IPA;
99
100 // Map from StackSlot to the LiveInterval of the original register.
101 // Note the LiveInterval of the original register may have been deleted
102 // after it is spilled. We keep a copy here to track the range where
103 // spills can be moved.
104 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
105
106 // Map from pair of (StackSlot and Original VNI) to a set of spills which
107 // have the same stackslot and have equal values defined by Original VNI.
108 // These spills are mergeable and are hoist candiates.
109 using MergeableSpillsMap =
110 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
111 MergeableSpillsMap MergeableSpills;
112
113 /// This is the map from original register to a set containing all its
114 /// siblings. To hoist a spill to another BB, we need to find out a live
115 /// sibling there and use it as the source of the new spill.
116 DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
117
118 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119 MachineBasicBlock &BB, unsigned &LiveReg);
120
121 void rmRedundantSpills(
122 SmallPtrSet<MachineInstr *, 16> &Spills,
123 SmallVectorImpl<MachineInstr *> &SpillsToRm,
124 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
125
126 void getVisitOrders(
127 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
128 SmallVectorImpl<MachineDomTreeNode *> &Orders,
129 SmallVectorImpl<MachineInstr *> &SpillsToRm,
130 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
131 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
132
133 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
134 SmallPtrSet<MachineInstr *, 16> &Spills,
135 SmallVectorImpl<MachineInstr *> &SpillsToRm,
136 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
137
138public:
139 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140 VirtRegMap &vrm)
141 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142 LSS(pass.getAnalysis<LiveStacks>()),
143 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144 MDT(pass.getAnalysis<MachineDominatorTree>()),
145 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147 TRI(*mf.getSubtarget().getRegisterInfo()),
148 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149 IPA(LIS, mf.getNumBlockIDs()) {}
150
151 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152 unsigned Original);
153 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154 void hoistAllSpills();
155 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
156};
157
158class InlineSpiller : public Spiller {
159 MachineFunction &MF;
160 LiveIntervals &LIS;
161 LiveStacks &LSS;
162 AliasAnalysis *AA;
163 MachineDominatorTree &MDT;
164 MachineLoopInfo &Loops;
165 VirtRegMap &VRM;
166 MachineRegisterInfo &MRI;
167 const TargetInstrInfo &TII;
168 const TargetRegisterInfo &TRI;
169 const MachineBlockFrequencyInfo &MBFI;
170
171 // Variables that are valid during spill(), but used by multiple methods.
172 LiveRangeEdit *Edit;
173 LiveInterval *StackInt;
174 int StackSlot;
175 unsigned Original;
176
177 // All registers to spill to StackSlot, including the main register.
178 SmallVector<unsigned, 8> RegsToSpill;
179
180 // All COPY instructions to/from snippets.
181 // They are ignored since both operands refer to the same stack slot.
182 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
183
184 // Values that failed to remat at some point.
185 SmallPtrSet<VNInfo*, 8> UsedValues;
186
187 // Dead defs generated during spilling.
188 SmallVector<MachineInstr*, 8> DeadDefs;
189
190 // Object records spills information and does the hoisting.
191 HoistSpillHelper HSpiller;
192
193 ~InlineSpiller() override = default;
194
195public:
196 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
197 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
198 LSS(pass.getAnalysis<LiveStacks>()),
199 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
200 MDT(pass.getAnalysis<MachineDominatorTree>()),
201 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
202 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
203 TRI(*mf.getSubtarget().getRegisterInfo()),
204 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
205 HSpiller(pass, mf, vrm) {}
206
207 void spill(LiveRangeEdit &) override;
208 void postOptimization() override;
209
210private:
211 bool isSnippet(const LiveInterval &SnipLI);
212 void collectRegsToSpill();
213
214 bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
215
216 bool isSibling(unsigned Reg);
217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
219
220 void markValueUsed(LiveInterval*, VNInfo*);
221 bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223 void reMaterializeAll();
224
225 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227 MachineInstr *LoadMI = nullptr);
228 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
229 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
230
231 void spillAroundUses(unsigned Reg);
232 void spillAll();
233};
234
235} // end anonymous namespace
236
237Spiller::~Spiller() = default;
238
239void Spiller::anchor() {}
240
241Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
242 MachineFunction &mf,
243 VirtRegMap &vrm) {
244 return new InlineSpiller(pass, mf, vrm);
245}
246
247//===----------------------------------------------------------------------===//
248// Snippets
249//===----------------------------------------------------------------------===//
250
251// When spilling a virtual register, we also spill any snippets it is connected
252// to. The snippets are small live ranges that only have a single real use,
253// leftovers from live range splitting. Spilling them enables memory operand
254// folding or tightens the live range around the single use.
255//
256// This minimizes register pressure and maximizes the store-to-load distance for
257// spill slots which can be important in tight loops.
258
259/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260/// otherwise return 0.
261static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
262 if (!MI.isFullCopy())
263 return 0;
264 if (MI.getOperand(0).getReg() == Reg)
265 return MI.getOperand(1).getReg();
266 if (MI.getOperand(1).getReg() == Reg)
267 return MI.getOperand(0).getReg();
268 return 0;
269}
270
271/// isSnippet - Identify if a live interval is a snippet that should be spilled.
272/// It is assumed that SnipLI is a virtual register with the same original as
273/// Edit->getReg().
274bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
275 unsigned Reg = Edit->getReg();
276
277 // A snippet is a tiny live range with only a single instruction using it
278 // besides copies to/from Reg or spills/fills. We accept:
279 //
280 // %snip = COPY %Reg / FILL fi#
281 // %snip = USE %snip
282 // %Reg = COPY %snip / SPILL %snip, fi#
283 //
284 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
285 return false;
286
287 MachineInstr *UseMI = nullptr;
288
289 // Check that all uses satisfy our criteria.
290 for (MachineRegisterInfo::reg_instr_nodbg_iterator
291 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
292 E = MRI.reg_instr_nodbg_end(); RI != E; ) {
293 MachineInstr &MI = *RI++;
294
295 // Allow copies to/from Reg.
296 if (isFullCopyOf(MI, Reg))
297 continue;
298
299 // Allow stack slot loads.
300 int FI;
301 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
302 continue;
303
304 // Allow stack slot stores.
305 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
306 continue;
307
308 // Allow a single additional instruction.
309 if (UseMI && &MI != UseMI)
310 return false;
311 UseMI = &MI;
312 }
313 return true;
314}
315
316/// collectRegsToSpill - Collect live range snippets that only have a single
317/// real use.
318void InlineSpiller::collectRegsToSpill() {
319 unsigned Reg = Edit->getReg();
320
321 // Main register always spills.
322 RegsToSpill.assign(1, Reg);
323 SnippetCopies.clear();
324
325 // Snippets all have the same original, so there can't be any for an original
326 // register.
327 if (Original == Reg)
328 return;
329
330 for (MachineRegisterInfo::reg_instr_iterator
331 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
332 MachineInstr &MI = *RI++;
333 unsigned SnipReg = isFullCopyOf(MI, Reg);
334 if (!isSibling(SnipReg))
335 continue;
336 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
337 if (!isSnippet(SnipLI))
338 continue;
339 SnippetCopies.insert(&MI);
340 if (isRegToSpill(SnipReg))
341 continue;
342 RegsToSpill.push_back(SnipReg);
343 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\talso spill snippet " <<
SnipLI << '\n'; } } while (false)
;
344 ++NumSnippets;
345 }
346}
347
348bool InlineSpiller::isSibling(unsigned Reg) {
349 return TargetRegisterInfo::isVirtualRegister(Reg) &&
350 VRM.getOriginal(Reg) == Original;
351}
352
353/// It is beneficial to spill to earlier place in the same BB in case
354/// as follows:
355/// There is an alternative def earlier in the same MBB.
356/// Hoist the spill as far as possible in SpillMBB. This can ease
357/// register pressure:
358///
359/// x = def
360/// y = use x
361/// s = copy x
362///
363/// Hoisting the spill of s to immediately after the def removes the
364/// interference between x and y:
365///
366/// x = def
367/// spill x
368/// y = use killed x
369///
370/// This hoist only helps when the copy kills its source.
371///
372bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
373 MachineInstr &CopyMI) {
374 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
375#ifndef NDEBUG
376 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
377 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")((VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"
) ? static_cast<void> (0) : __assert_fail ("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 377, __PRETTY_FUNCTION__))
;
378#endif
379
380 unsigned SrcReg = CopyMI.getOperand(1).getReg();
381 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
382 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
383 LiveQueryResult SrcQ = SrcLI.Query(Idx);
384 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
385 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
386 return false;
387
388 // Conservatively extend the stack slot range to the range of the original
389 // value. We may be able to do better with stack slot coloring by being more
390 // careful here.
391 assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast
<void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 391, __PRETTY_FUNCTION__))
;
392 LiveInterval &OrigLI = LIS.getInterval(Original);
393 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
394 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
395 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
396 << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
;
397
398 // We are going to spill SrcVNI immediately after its def, so clear out
399 // any later spills of the same value.
400 eliminateRedundantSpills(SrcLI, SrcVNI);
401
402 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
403 MachineBasicBlock::iterator MII;
404 if (SrcVNI->isPHIDef())
405 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
406 else {
407 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
408 assert(DefMI && "Defining instruction disappeared")((DefMI && "Defining instruction disappeared") ? static_cast
<void> (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 408, __PRETTY_FUNCTION__))
;
409 MII = DefMI;
410 ++MII;
411 }
412 // Insert spill without kill flag immediately after def.
413 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
414 MRI.getRegClass(SrcReg), &TRI);
415 --MII; // Point to store instruction.
416 LIS.InsertMachineInstrInMaps(*MII);
417 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\thoisted: " << SrcVNI
->def << '\t' << *MII; } } while (false)
;
418
419 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
420 ++NumSpills;
421 return true;
422}
423
424/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
425/// redundant spills of this value in SLI.reg and sibling copies.
426void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
427 assert(VNI && "Missing value")((VNI && "Missing value") ? static_cast<void> (
0) : __assert_fail ("VNI && \"Missing value\"", "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 427, __PRETTY_FUNCTION__))
;
428 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
429 WorkList.push_back(std::make_pair(&SLI, VNI));
430 assert(StackInt && "No stack slot assigned yet.")((StackInt && "No stack slot assigned yet.") ? static_cast
<void> (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 430, __PRETTY_FUNCTION__))
;
431
432 do {
433 LiveInterval *LI;
434 std::tie(LI, VNI) = WorkList.pop_back_val();
435 unsigned Reg = LI->reg;
436 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
437 << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
;
438
439 // Regs to spill are taken care of.
440 if (isRegToSpill(Reg))
441 continue;
442
443 // Add all of VNI's live range to StackInt.
444 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
445 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged to stack int: " <<
*StackInt << '\n'; } } while (false)
;
446
447 // Find all spills and copies of VNI.
448 for (MachineRegisterInfo::use_instr_nodbg_iterator
449 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
450 UI != E; ) {
451 MachineInstr &MI = *UI++;
452 if (!MI.isCopy() && !MI.mayStore())
453 continue;
454 SlotIndex Idx = LIS.getInstructionIndex(MI);
455 if (LI->getVNInfoAt(Idx) != VNI)
456 continue;
457
458 // Follow sibling copies down the dominator tree.
459 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
460 if (isSibling(DstReg)) {
461 LiveInterval &DstLI = LIS.getInterval(DstReg);
462 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
463 assert(DstVNI && "Missing defined value")((DstVNI && "Missing defined value") ? static_cast<
void> (0) : __assert_fail ("DstVNI && \"Missing defined value\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 463, __PRETTY_FUNCTION__))
;
464 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")((DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"
) ? static_cast<void> (0) : __assert_fail ("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 464, __PRETTY_FUNCTION__))
;
465 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
466 }
467 continue;
468 }
469
470 // Erase spills.
471 int FI;
472 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
473 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Redundant spill " << Idx
<< '\t' << MI; } } while (false)
;
474 // eliminateDeadDefs won't normally remove stores, so switch opcode.
475 MI.setDesc(TII.get(TargetOpcode::KILL));
476 DeadDefs.push_back(&MI);
477 ++NumSpillsRemoved;
478 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
479 --NumSpills;
480 }
481 }
482 } while (!WorkList.empty());
483}
484
485//===----------------------------------------------------------------------===//
486// Rematerialization
487//===----------------------------------------------------------------------===//
488
489/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
490/// instruction cannot be eliminated. See through snippet copies
491void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
492 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
493 WorkList.push_back(std::make_pair(LI, VNI));
494 do {
495 std::tie(LI, VNI) = WorkList.pop_back_val();
496 if (!UsedValues.insert(VNI).second)
497 continue;
498
499 if (VNI->isPHIDef()) {
500 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
501 for (MachineBasicBlock *P : MBB->predecessors()) {
502 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
503 if (PVNI)
504 WorkList.push_back(std::make_pair(LI, PVNI));
505 }
506 continue;
507 }
508
509 // Follow snippet copies.
510 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
511 if (!SnippetCopies.count(MI))
512 continue;
513 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
514 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy")((isRegToSpill(SnipLI.reg) && "Unexpected register in copy"
) ? static_cast<void> (0) : __assert_fail ("isRegToSpill(SnipLI.reg) && \"Unexpected register in copy\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 514, __PRETTY_FUNCTION__))
;
515 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
516 assert(SnipVNI && "Snippet undefined before copy")((SnipVNI && "Snippet undefined before copy") ? static_cast
<void> (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 516, __PRETTY_FUNCTION__))
;
517 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
518 } while (!WorkList.empty());
519}
520
521bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
522 MachineInstr &MI) {
523 if (!RestrictStatepointRemat)
524 return true;
525 // Here's a quick explanation of the problem we're trying to handle here:
526 // * There are some pseudo instructions with more vreg uses than there are
527 // physical registers on the machine.
528 // * This is normally handled by spilling the vreg, and folding the reload
529 // into the user instruction. (Thus decreasing the number of used vregs
530 // until the remainder can be assigned to physregs.)
531 // * However, since we may try to spill vregs in any order, we can end up
532 // trying to spill each operand to the instruction, and then rematting it
533 // instead. When that happens, the new live intervals (for the remats) are
534 // expected to be trivially assignable (i.e. RS_Done). However, since we
535 // may have more remats than physregs, we're guaranteed to fail to assign
536 // one.
537 // At the moment, we only handle this for STATEPOINTs since they're the only
538 // psuedo op where we've seen this. If we start seeing other instructions
539 // with the same problem, we need to revisit this.
540 return (MI.getOpcode() != TargetOpcode::STATEPOINT);
541}
542
543/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
544bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
545 // Analyze instruction
546 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
547 MIBundleOperands::VirtRegInfo RI =
548 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
549
550 if (!RI.Reads)
551 return false;
552
553 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
554 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
555
556 if (!ParentVNI) {
557 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tadding <undef> flags: "
; } } while (false)
;
558 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
559 MachineOperand &MO = MI.getOperand(i);
560 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
561 MO.setIsUndef();
562 }
563 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << UseIdx << '\t' <<
MI; } } while (false)
;
564 return true;
565 }
566
567 if (SnippetCopies.count(&MI))
568 return false;
569
570 LiveInterval &OrigLI = LIS.getInterval(Original);
571 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
572 LiveRangeEdit::Remat RM(ParentVNI);
573 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
574
575 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
576 markValueUsed(&VirtReg, ParentVNI);
577 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
578 return false;
579 }
580
581 // If the instruction also writes VirtReg.reg, it had better not require the
582 // same register for uses and defs.
583 if (RI.Tied) {
584 markValueUsed(&VirtReg, ParentVNI);
585 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat tied reg: " <<
UseIdx << '\t' << MI; } } while (false)
;
586 return false;
587 }
588
589 // Before rematerializing into a register for a single instruction, try to
590 // fold a load into the instruction. That avoids allocating a new register.
591 if (RM.OrigMI->canFoldAsLoad() &&
592 foldMemoryOperand(Ops, RM.OrigMI)) {
593 Edit->markRematerialized(RM.ParentVNI);
594 ++NumFoldedLoads;
595 return true;
596 }
597
598 // If we can't guarantee that we'll be able to actually assign the new vreg,
599 // we can't remat.
600 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg, MI)) {
601 markValueUsed(&VirtReg, ParentVNI);
602 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
603 return false;
604 }
605
606 // Allocate a new register for the remat.
607 unsigned NewVReg = Edit->createFrom(Original);
608
609 // Finally we can rematerialize OrigMI before MI.
610 SlotIndex DefIdx =
611 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
612
613 // We take the DebugLoc from MI, since OrigMI may be attributed to a
614 // different source location.
615 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
616 NewMI->setDebugLoc(MI.getDebugLoc());
617
618 (void)DefIdx;
619 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
620 << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
;
621
622 // Replace operands
623 for (const auto &OpPair : Ops) {
624 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
625 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
626 MO.setReg(NewVReg);
627 MO.setIsKill();
628 }
629 }
630 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t " << UseIdx <<
'\t' << MI << '\n'; } } while (false)
;
631
632 ++NumRemats;
633 return true;
634}
635
636/// reMaterializeAll - Try to rematerialize as many uses as possible,
637/// and trim the live ranges after.
638void InlineSpiller::reMaterializeAll() {
639 if (!Edit->anyRematerializable(AA))
640 return;
641
642 UsedValues.clear();
643
644 // Try to remat before all uses of snippets.
645 bool anyRemat = false;
646 for (unsigned Reg : RegsToSpill) {
647 LiveInterval &LI = LIS.getInterval(Reg);
648 for (MachineRegisterInfo::reg_bundle_iterator
649 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
650 RegI != E; ) {
651 MachineInstr &MI = *RegI++;
652
653 // Debug values are not allowed to affect codegen.
654 if (MI.isDebugValue())
655 continue;
656
657 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "((!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 658, __PRETTY_FUNCTION__))
658 "instruction that isn't a DBG_VALUE")((!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 658, __PRETTY_FUNCTION__))
;
659
660 anyRemat |= reMaterializeFor(LI, MI);
661 }
662 }
663 if (!anyRemat)
664 return;
665
666 // Remove any values that were completely rematted.
667 for (unsigned Reg : RegsToSpill) {
668 LiveInterval &LI = LIS.getInterval(Reg);
669 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
670 I != E; ++I) {
671 VNInfo *VNI = *I;
672 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
673 continue;
674 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
675 MI->addRegisterDead(Reg, &TRI);
676 if (!MI->allDefsAreDead())
677 continue;
678 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "All defs dead: " << *MI
; } } while (false)
;
679 DeadDefs.push_back(MI);
680 }
681 }
682
683 // Eliminate dead code after remat. Note that some snippet copies may be
684 // deleted here.
685 if (DeadDefs.empty())
686 return;
687 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n"; } } while (false)
;
688 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
689
690 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
691 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
692 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
693 // removed, PHI VNI are still left in the LiveInterval.
694 // So to get rid of unused reg, we need to check whether it has non-dbg
695 // reference instead of whether it has non-empty interval.
696 unsigned ResultPos = 0;
697 for (unsigned Reg : RegsToSpill) {
698 if (MRI.reg_nodbg_empty(Reg)) {
699 Edit->eraseVirtReg(Reg);
700 continue;
701 }
702
703 assert(LIS.hasInterval(Reg) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 705, __PRETTY_FUNCTION__))
704 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 705, __PRETTY_FUNCTION__))
705 "Empty and not used live-range?!")((LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty
() || !MRI.reg_nodbg_empty(Reg)) && "Empty and not used live-range?!"
) ? static_cast<void> (0) : __assert_fail ("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 705, __PRETTY_FUNCTION__))
;
706
707 RegsToSpill[ResultPos++] = Reg;
708 }
709 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
710 LLVM_DEBUG(dbgs() << RegsToSpill.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
711 << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
;
712}
713
714//===----------------------------------------------------------------------===//
715// Spilling
716//===----------------------------------------------------------------------===//
717
718/// If MI is a load or store of StackSlot, it can be removed.
719bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
720 int FI = 0;
721 unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
722 bool IsLoad = InstrReg;
723 if (!IsLoad)
724 InstrReg = TII.isStoreToStackSlot(*MI, FI);
725
726 // We have a stack access. Is it the right register and slot?
727 if (InstrReg != Reg || FI != StackSlot)
728 return false;
729
730 if (!IsLoad)
731 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
732
733 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Coalescing stack access: " <<
*MI; } } while (false)
;
734 LIS.RemoveMachineInstrFromMaps(*MI);
735 MI->eraseFromParent();
736
737 if (IsLoad) {
738 ++NumReloadsRemoved;
739 --NumReloads;
740 } else {
741 ++NumSpillsRemoved;
742 --NumSpills;
743 }
744
745 return true;
746}
747
748#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
749LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
750// Dump the range of instructions from B to E with their slot indexes.
751static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
752 MachineBasicBlock::iterator E,
753 LiveIntervals const &LIS,
754 const char *const header,
755 unsigned VReg =0) {
756 char NextLine = '\n';
757 char SlotIndent = '\t';
758
759 if (std::next(B) == E) {
760 NextLine = ' ';
761 SlotIndent = ' ';
762 }
763
764 dbgs() << '\t' << header << ": " << NextLine;
765
766 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
767 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
768
769 // If a register was passed in and this instruction has it as a
770 // destination that is marked as an early clobber, print the
771 // early-clobber slot index.
772 if (VReg) {
773 MachineOperand *MO = I->findRegisterDefOperand(VReg);
774 if (MO && MO->isEarlyClobber())
775 Idx = Idx.getRegSlot(true);
776 }
777
778 dbgs() << SlotIndent << Idx << '\t' << *I;
779 }
780}
781#endif
782
783/// foldMemoryOperand - Try folding stack slot references in Ops into their
784/// instructions.
785///
786/// @param Ops Operand indices from analyzeVirtReg().
787/// @param LoadMI Load instruction to use instead of stack slot when non-null.
788/// @return True on success.
789bool InlineSpiller::
790foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
791 MachineInstr *LoadMI) {
792 if (Ops.empty())
793 return false;
794 // Don't attempt folding in bundles.
795 MachineInstr *MI = Ops.front().first;
796 if (Ops.back().first != MI || MI->isBundled())
797 return false;
798
799 bool WasCopy = MI->isCopy();
800 unsigned ImpReg = 0;
801
802 // Spill subregs if the target allows it.
803 // We always want to spill subregs for stackmap/patchpoint pseudos.
804 bool SpillSubRegs = TII.isSubregFoldable() ||
805 MI->getOpcode() == TargetOpcode::STATEPOINT ||
806 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
807 MI->getOpcode() == TargetOpcode::STACKMAP;
808
809 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
810 // operands.
811 SmallVector<unsigned, 8> FoldOps;
812 for (const auto &OpPair : Ops) {
813 unsigned Idx = OpPair.second;
814 assert(MI == OpPair.first && "Instruction conflict during operand folding")((MI == OpPair.first && "Instruction conflict during operand folding"
) ? static_cast<void> (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 814, __PRETTY_FUNCTION__))
;
815 MachineOperand &MO = MI->getOperand(Idx);
816 if (MO.isImplicit()) {
817 ImpReg = MO.getReg();
818 continue;
819 }
820
821 if (!SpillSubRegs && MO.getSubReg())
822 return false;
823 // We cannot fold a load instruction into a def.
824 if (LoadMI && MO.isDef())
825 return false;
826 // Tied use operands should not be passed to foldMemoryOperand.
827 if (!MI->isRegTiedToDefOperand(Idx))
828 FoldOps.push_back(Idx);
829 }
830
831 // If we only have implicit uses, we won't be able to fold that.
832 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
833 if (FoldOps.empty())
834 return false;
835
836 MachineInstrSpan MIS(MI);
837
838 MachineInstr *FoldMI =
839 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
840 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
841 if (!FoldMI)
842 return false;
843
844 // Remove LIS for any dead defs in the original MI not in FoldMI.
845 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
846 if (!MO->isReg())
847 continue;
848 unsigned Reg = MO->getReg();
849 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
850 MRI.isReserved(Reg)) {
851 continue;
852 }
853 // Skip non-Defs, including undef uses and internal reads.
854 if (MO->isUse())
855 continue;
856 MIBundleOperands::PhysRegInfo RI =
857 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
858 if (RI.FullyDefined)
859 continue;
860 // FoldMI does not define this physreg. Remove the LI segment.
861 assert(MO->isDead() && "Cannot fold physreg def")((MO->isDead() && "Cannot fold physreg def") ? static_cast
<void> (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 861, __PRETTY_FUNCTION__))
;
862 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
863 LIS.removePhysRegDefAt(Reg, Idx);
864 }
865
866 int FI;
867 if (TII.isStoreToStackSlot(*MI, FI) &&
868 HSpiller.rmFromMergeableSpills(*MI, FI))
869 --NumSpills;
870 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
871 MI->eraseFromParent();
872
873 // Insert any new instructions other than FoldMI into the LIS maps.
874 assert(!MIS.empty() && "Unexpected empty span of instructions!")((!MIS.empty() && "Unexpected empty span of instructions!"
) ? static_cast<void> (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 874, __PRETTY_FUNCTION__))
;
875 for (MachineInstr &MI : MIS)
876 if (&MI != FoldMI)
877 LIS.InsertMachineInstrInMaps(MI);
878
879 // TII.foldMemoryOperand may have left some implicit operands on the
880 // instruction. Strip them.
881 if (ImpReg)
882 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
883 MachineOperand &MO = FoldMI->getOperand(i - 1);
884 if (!MO.isReg() || !MO.isImplicit())
885 break;
886 if (MO.getReg() == ImpReg)
887 FoldMI->RemoveOperand(i - 1);
888 }
889
890 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
891 "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
;
892
893 if (!WasCopy)
894 ++NumFolded;
895 else if (Ops.front().second == 0) {
896 ++NumSpills;
897 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
898 } else
899 ++NumReloads;
900 return true;
901}
902
903void InlineSpiller::insertReload(unsigned NewVReg,
904 SlotIndex Idx,
905 MachineBasicBlock::iterator MI) {
906 MachineBasicBlock &MBB = *MI->getParent();
907
908 MachineInstrSpan MIS(MI);
909 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
910 MRI.getRegClass(NewVReg), &TRI);
911
912 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
913
914 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
915 NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
;
916 ++NumReloads;
917}
918
919/// Check if \p Def fully defines a VReg with an undefined value.
920/// If that's the case, that means the value of VReg is actually
921/// not relevant.
922static bool isFullUndefDef(const MachineInstr &Def) {
923 if (!Def.isImplicitDef())
924 return false;
925 assert(Def.getNumOperands() == 1 &&((Def.getNumOperands() == 1 && "Implicit def with more than one definition"
) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 926, __PRETTY_FUNCTION__))
926 "Implicit def with more than one definition")((Def.getNumOperands() == 1 && "Implicit def with more than one definition"
) ? static_cast<void> (0) : __assert_fail ("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 926, __PRETTY_FUNCTION__))
;
927 // We can say that the VReg defined by Def is undef, only if it is
928 // fully defined by Def. Otherwise, some of the lanes may not be
929 // undef and the value of the VReg matters.
930 return !Def.getOperand(0).getSubReg();
931}
932
933/// insertSpill - Insert a spill of NewVReg after MI.
934void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
935 MachineBasicBlock::iterator MI) {
936 MachineBasicBlock &MBB = *MI->getParent();
937
938 MachineInstrSpan MIS(MI);
939 bool IsRealSpill = true;
940 if (isFullUndefDef(*MI)) {
941 // Don't spill undef value.
942 // Anything works for undef, in particular keeping the memory
943 // uninitialized is a viable option and it saves code size and
944 // run time.
945 BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
946 .addReg(NewVReg, getKillRegState(isKill));
947 IsRealSpill = false;
948 } else
949 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
950 MRI.getRegClass(NewVReg), &TRI);
951
952 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
953
954 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
955 "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
;
956 ++NumSpills;
957 if (IsRealSpill)
958 HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
959}
960
961/// spillAroundUses - insert spill code around each use of Reg.
962void InlineSpiller::spillAroundUses(unsigned Reg) {
963 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "spillAroundUses " << printReg
(Reg) << '\n'; } } while (false)
;
964 LiveInterval &OldLI = LIS.getInterval(Reg);
965
966 // Iterate over instructions using Reg.
967 for (MachineRegisterInfo::reg_bundle_iterator
968 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
969 RegI != E; ) {
970 MachineInstr *MI = &*(RegI++);
971
972 // Debug values are not allowed to affect codegen.
973 if (MI->isDebugValue()) {
974 // Modify DBG_VALUE now that the value is in a spill slot.
975 MachineBasicBlock *MBB = MI->getParent();
976 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Modifying debug info due to spill:\t"
<< *MI; } } while (false)
;
977 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
978 MBB->erase(MI);
979 continue;
980 }
981
982 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "((!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 983, __PRETTY_FUNCTION__))
983 "instruction that isn't a DBG_VALUE")((!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? static_cast<void>
(0) : __assert_fail ("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 983, __PRETTY_FUNCTION__))
;
984
985 // Ignore copies to/from snippets. We'll delete them.
986 if (SnippetCopies.count(MI))
987 continue;
988
989 // Stack slot accesses may coalesce away.
990 if (coalesceStackAccess(MI, Reg))
991 continue;
992
993 // Analyze instruction.
994 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
995 MIBundleOperands::VirtRegInfo RI =
996 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
997
998 // Find the slot index where this instruction reads and writes OldLI.
999 // This is usually the def slot, except for tied early clobbers.
1000 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1001 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1002 if (SlotIndex::isSameInstr(Idx, VNI->def))
1003 Idx = VNI->def;
1004
1005 // Check for a sibling copy.
1006 unsigned SibReg = isFullCopyOf(*MI, Reg);
1007 if (SibReg && isSibling(SibReg)) {
1008 // This may actually be a copy between snippets.
1009 if (isRegToSpill(SibReg)) {
1010 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Found new snippet copy: " <<
*MI; } } while (false)
;
1011 SnippetCopies.insert(MI);
1012 continue;
1013 }
1014 if (RI.Writes) {
1015 if (hoistSpillInsideBB(OldLI, *MI)) {
1016 // This COPY is now dead, the value is already in the stack slot.
1017 MI->getOperand(0).setIsDead();
1018 DeadDefs.push_back(MI);
1019 continue;
1020 }
1021 } else {
1022 // This is a reload for a sib-reg copy. Drop spills downstream.
1023 LiveInterval &SibLI = LIS.getInterval(SibReg);
1024 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1025 // The COPY will fold to a reload below.
1026 }
1027 }
1028
1029 // Attempt to fold memory ops.
1030 if (foldMemoryOperand(Ops))
1031 continue;
1032
1033 // Create a new virtual register for spill/fill.
1034 // FIXME: Infer regclass from instruction alone.
1035 unsigned NewVReg = Edit->createFrom(Reg);
1036
1037 if (RI.Reads)
1038 insertReload(NewVReg, Idx, MI);
1039
1040 // Rewrite instruction operands.
1041 bool hasLiveDef = false;
1042 for (const auto &OpPair : Ops) {
1043 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1044 MO.setReg(NewVReg);
1045 if (MO.isUse()) {
1046 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1047 MO.setIsKill();
1048 } else {
1049 if (!MO.isDead())
1050 hasLiveDef = true;
1051 }
1052 }
1053 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\trewrite: " << Idx <<
'\t' << *MI << '\n'; } } while (false)
;
1054
1055 // FIXME: Use a second vreg if instruction has no tied ops.
1056 if (RI.Writes)
1057 if (hasLiveDef)
1058 insertSpill(NewVReg, true, MI);
1059 }
1060}
1061
1062/// spillAll - Spill all registers remaining after rematerialization.
1063void InlineSpiller::spillAll() {
1064 // Update LiveStacks now that we are committed to spilling.
1065 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1066 StackSlot = VRM.assignVirt2StackSlot(Original);
1067 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1068 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1069 } else
1070 StackInt = &LSS.getInterval(StackSlot);
1071
1072 if (Original != Edit->getReg())
1073 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1074
1075 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")((StackInt->getNumValNums() == 1 && "Bad stack interval values"
) ? static_cast<void> (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1075, __PRETTY_FUNCTION__))
;
1076 for (unsigned Reg : RegsToSpill)
1077 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1078 StackInt->getValNumInfo(0));
1079 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged spilled regs: " <<
*StackInt << '\n'; } } while (false)
;
1080
1081 // Spill around uses of all RegsToSpill.
1082 for (unsigned Reg : RegsToSpill)
1083 spillAroundUses(Reg);
1084
1085 // Hoisted spills may cause dead code.
1086 if (!DeadDefs.empty()) {
1087 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n"; } } while (false)
;
1088 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1089 }
1090
1091 // Finally delete the SnippetCopies.
1092 for (unsigned Reg : RegsToSpill) {
1093 for (MachineRegisterInfo::reg_instr_iterator
1094 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1095 RI != E; ) {
1096 MachineInstr &MI = *(RI++);
1097 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")((SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"
) ? static_cast<void> (0) : __assert_fail ("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1097, __PRETTY_FUNCTION__))
;
1098 // FIXME: Do this with a LiveRangeEdit callback.
1099 LIS.RemoveMachineInstrFromMaps(MI);
1100 MI.eraseFromParent();
1101 }
1102 }
1103
1104 // Delete all spilled registers.
1105 for (unsigned Reg : RegsToSpill)
1106 Edit->eraseVirtReg(Reg);
1107}
1108
1109void InlineSpiller::spill(LiveRangeEdit &edit) {
1110 ++NumSpilledRanges;
1111 Edit = &edit;
1112 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())((!TargetRegisterInfo::isStackSlot(edit.getReg()) && "Trying to spill a stack slot."
) ? static_cast<void> (0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1113, __PRETTY_FUNCTION__))
1113 && "Trying to spill a stack slot.")((!TargetRegisterInfo::isStackSlot(edit.getReg()) && "Trying to spill a stack slot."
) ? static_cast<void> (0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1113, __PRETTY_FUNCTION__))
;
1114 // Share a stack slot among all descendants of Original.
1115 Original = VRM.getOriginal(edit.getReg());
1116 StackSlot = VRM.getStackSlot(Original);
1117 StackInt = nullptr;
1118
1119 LLVM_DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1120 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1121 << ':' << edit.getParent() << "\nFrom original "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1122 << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
;
1123 assert(edit.getParent().isSpillable() &&((edit.getParent().isSpillable() && "Attempting to spill already spilled value."
) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1124, __PRETTY_FUNCTION__))
1124 "Attempting to spill already spilled value.")((edit.getParent().isSpillable() && "Attempting to spill already spilled value."
) ? static_cast<void> (0) : __assert_fail ("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1124, __PRETTY_FUNCTION__))
;
1125 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")((DeadDefs.empty() && "Previous spill didn't remove dead defs"
) ? static_cast<void> (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1125, __PRETTY_FUNCTION__))
;
1126
1127 collectRegsToSpill();
1128 reMaterializeAll();
1129
1130 // Remat may handle everything.
1131 if (!RegsToSpill.empty())
1132 spillAll();
1133
1134 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1135}
1136
1137/// Optimizations after all the reg selections and spills are done.
1138void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1
Calling 'HoistSpillHelper::hoistAllSpills'
1139
1140/// When a spill is inserted, add the spill to MergeableSpills map.
1141void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1142 unsigned Original) {
1143 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1144 LiveInterval &OrigLI = LIS.getInterval(Original);
1145 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1146 // LiveInterval may be cleared after all its references are spilled.
1147 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1148 auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1149 LI->assign(OrigLI, Allocator);
1150 StackSlotToOrigLI[StackSlot] = std::move(LI);
1151 }
1152 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1153 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1154 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1155 MergeableSpills[MIdx].insert(&Spill);
1156}
1157
1158/// When a spill is removed, remove the spill from MergeableSpills map.
1159/// Return true if the spill is removed successfully.
1160bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1161 int StackSlot) {
1162 auto It = StackSlotToOrigLI.find(StackSlot);
1163 if (It == StackSlotToOrigLI.end())
1164 return false;
1165 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1166 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1167 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1168 return MergeableSpills[MIdx].erase(&Spill);
1169}
1170
1171/// Check BB to see if it is a possible target BB to place a hoisted spill,
1172/// i.e., there should be a living sibling of OrigReg at the insert point.
1173bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1174 MachineBasicBlock &BB, unsigned &LiveReg) {
1175 SlotIndex Idx;
1176 unsigned OrigReg = OrigLI.reg;
1177 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1178 if (MI != BB.end())
1179 Idx = LIS.getInstructionIndex(*MI);
1180 else
1181 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1182 SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1183 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")((OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI"
) ? static_cast<void> (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1183, __PRETTY_FUNCTION__))
;
1184
1185 for (auto const SibReg : Siblings) {
1186 LiveInterval &LI = LIS.getInterval(SibReg);
1187 VNInfo *VNI = LI.getVNInfoAt(Idx);
1188 if (VNI) {
1189 LiveReg = SibReg;
1190 return true;
1191 }
1192 }
1193 return false;
1194}
1195
1196/// Remove redundant spills in the same BB. Save those redundant spills in
1197/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1198void HoistSpillHelper::rmRedundantSpills(
1199 SmallPtrSet<MachineInstr *, 16> &Spills,
1200 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1201 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1202 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1203 // another spill inside. If a BB contains more than one spill, only keep the
1204 // earlier spill with smaller SlotIndex.
1205 for (const auto CurrentSpill : Spills) {
1206 MachineBasicBlock *Block = CurrentSpill->getParent();
1207 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1208 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1209 if (PrevSpill) {
1210 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1211 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1212 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1213 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1214 SpillsToRm.push_back(SpillToRm);
1215 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1216 } else {
1217 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1218 }
1219 }
1220 for (const auto SpillToRm : SpillsToRm)
1221 Spills.erase(SpillToRm);
1222}
1223
1224/// Starting from \p Root find a top-down traversal order of the dominator
1225/// tree to visit all basic blocks containing the elements of \p Spills.
1226/// Redundant spills will be found and put into \p SpillsToRm at the same
1227/// time. \p SpillBBToSpill will be populated as part of the process and
1228/// maps a basic block to the first store occurring in the basic block.
1229/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1230void HoistSpillHelper::getVisitOrders(
1231 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1232 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1233 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1234 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1235 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1236 // The set contains all the possible BB nodes to which we may hoist
1237 // original spills.
1238 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1239 // Save the BB nodes on the path from the first BB node containing
1240 // non-redundant spill to the Root node.
1241 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1242 // All the spills to be hoisted must originate from a single def instruction
1243 // to the OrigReg. It means the def instruction should dominate all the spills
1244 // to be hoisted. We choose the BB where the def instruction is located as
1245 // the Root.
1246 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1247 // For every node on the dominator tree with spill, walk up on the dominator
1248 // tree towards the Root node until it is reached. If there is other node
1249 // containing spill in the middle of the path, the previous spill saw will
1250 // be redundant and the node containing it will be removed. All the nodes on
1251 // the path starting from the first node with non-redundant spill to the Root
1252 // node will be added to the WorkSet, which will contain all the possible
1253 // locations where spills may be hoisted to after the loop below is done.
1254 for (const auto Spill : Spills) {
1255 MachineBasicBlock *Block = Spill->getParent();
1256 MachineDomTreeNode *Node = MDT[Block];
9
Calling 'MachineDominatorTree::operator[]'
31
Returning from 'MachineDominatorTree::operator[]'
32
'Node' initialized here
1257 MachineInstr *SpillToRm = nullptr;
1258 while (Node != RootIDomNode) {
33
Assuming 'Node' is not equal to 'RootIDomNode'
34
Loop condition is true. Entering loop body
1259 // If Node dominates Block, and it already contains a spill, the spill in
1260 // Block will be redundant.
1261 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
35
Assuming the condition is false
36
Assuming pointer value is null
1262 SpillToRm = SpillBBToSpill[MDT[Block]];
1263 break;
1264 /// If we see the Node already in WorkSet, the path from the Node to
1265 /// the Root node must already be traversed by another spill.
1266 /// Then no need to repeat.
1267 } else if (WorkSet.count(Node)) {
37
Assuming the condition is false
38
Taking false branch
1268 break;
1269 } else {
1270 NodesOnPath.insert(Node);
1271 }
1272 Node = Node->getIDom();
39
Called C++ object pointer is null
1273 }
1274 if (SpillToRm) {
1275 SpillsToRm.push_back(SpillToRm);
1276 } else {
1277 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1278 // set the initial status before hoisting start. The value of BBs
1279 // containing original spills is set to 0, in order to descriminate
1280 // with BBs containing hoisted spills which will be inserted to
1281 // SpillsToKeep later during hoisting.
1282 SpillsToKeep[MDT[Block]] = 0;
1283 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1284 }
1285 NodesOnPath.clear();
1286 }
1287
1288 // Sort the nodes in WorkSet in top-down order and save the nodes
1289 // in Orders. Orders will be used for hoisting in runHoistSpills.
1290 unsigned idx = 0;
1291 Orders.push_back(MDT.getBase().getNode(Root));
1292 do {
1293 MachineDomTreeNode *Node = Orders[idx++];
1294 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1295 unsigned NumChildren = Children.size();
1296 for (unsigned i = 0; i != NumChildren; ++i) {
1297 MachineDomTreeNode *Child = Children[i];
1298 if (WorkSet.count(Child))
1299 Orders.push_back(Child);
1300 }
1301 } while (idx != Orders.size());
1302 assert(Orders.size() == WorkSet.size() &&((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet"
) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1303, __PRETTY_FUNCTION__))
1303 "Orders have different size with WorkSet")((Orders.size() == WorkSet.size() && "Orders have different size with WorkSet"
) ? static_cast<void> (0) : __assert_fail ("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1303, __PRETTY_FUNCTION__))
;
1304
1305#ifndef NDEBUG
1306 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Orders size is " << Orders
.size() << "\n"; } } while (false)
;
1307 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1308 for (; RIt != Orders.rend(); RIt++)
1309 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock
()->getNumber() << ","; } } while (false)
;
1310 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\n"; } } while (false)
;
1311#endif
1312}
1313
1314/// Try to hoist spills according to BB hotness. The spills to removed will
1315/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1316/// \p SpillsToIns.
1317void HoistSpillHelper::runHoistSpills(
1318 LiveInterval &OrigLI, VNInfo &OrigVNI,
1319 SmallPtrSet<MachineInstr *, 16> &Spills,
1320 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1321 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1322 // Visit order of dominator tree nodes.
1323 SmallVector<MachineDomTreeNode *, 32> Orders;
1324 // SpillsToKeep contains all the nodes where spills are to be inserted
1325 // during hoisting. If the spill to be inserted is an original spill
1326 // (not a hoisted one), the value of the map entry is 0. If the spill
1327 // is a hoisted spill, the value of the map entry is the VReg to be used
1328 // as the source of the spill.
1329 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1330 // Map from BB to the first spill inside of it.
1331 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1332
1333 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1334
1335 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1336 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
8
Calling 'HoistSpillHelper::getVisitOrders'
1337 SpillBBToSpill);
1338
1339 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1340 // nodes set and the cost of all the spills inside those nodes.
1341 // The nodes set are the locations where spills are to be inserted
1342 // in the subtree of current node.
1343 using NodesCostPair =
1344 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1345 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1346
1347 // Iterate Orders set in reverse order, which will be a bottom-up order
1348 // in the dominator tree. Once we visit a dom tree node, we know its
1349 // children have already been visited and the spill locations in the
1350 // subtrees of all the children have been determined.
1351 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1352 for (; RIt != Orders.rend(); RIt++) {
1353 MachineBasicBlock *Block = (*RIt)->getBlock();
1354
1355 // If Block contains an original spill, simply continue.
1356 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1357 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1358 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1359 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1360 continue;
1361 }
1362
1363 // Collect spills in subtree of current node (*RIt) to
1364 // SpillsInSubTreeMap[*RIt].first.
1365 const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1366 unsigned NumChildren = Children.size();
1367 for (unsigned i = 0; i != NumChildren; ++i) {
1368 MachineDomTreeNode *Child = Children[i];
1369 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1370 continue;
1371 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1372 // should be placed before getting the begin and end iterators of
1373 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1374 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1375 // and the map grows and then the original buckets in the map are moved.
1376 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1377 SpillsInSubTreeMap[*RIt].first;
1378 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1379 SubTreeCost += SpillsInSubTreeMap[Child].second;
1380 auto BI = SpillsInSubTreeMap[Child].first.begin();
1381 auto EI = SpillsInSubTreeMap[Child].first.end();
1382 SpillsInSubTree.insert(BI, EI);
1383 SpillsInSubTreeMap.erase(Child);
1384 }
1385
1386 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1387 SpillsInSubTreeMap[*RIt].first;
1388 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1389 // No spills in subtree, simply continue.
1390 if (SpillsInSubTree.empty())
1391 continue;
1392
1393 // Check whether Block is a possible candidate to insert spill.
1394 unsigned LiveReg = 0;
1395 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1396 continue;
1397
1398 // If there are multiple spills that could be merged, bias a little
1399 // to hoist the spill.
1400 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1401 ? BranchProbability(9, 10)
1402 : BranchProbability(1, 1);
1403 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1404 // Hoist: Move spills to current Block.
1405 for (const auto SpillBB : SpillsInSubTree) {
1406 // When SpillBB is a BB contains original spill, insert the spill
1407 // to SpillsToRm.
1408 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1409 !SpillsToKeep[SpillBB]) {
1410 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1411 SpillsToRm.push_back(SpillToRm);
1412 }
1413 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1414 SpillsToKeep.erase(SpillBB);
1415 }
1416 // Current Block is the BB containing the new hoisted spill. Add it to
1417 // SpillsToKeep. LiveReg is the source of the new spill.
1418 SpillsToKeep[*RIt] = LiveReg;
1419 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1420 dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1421 for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1422 dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1423 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1424 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1425 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
;
1426 SpillsInSubTree.clear();
1427 SpillsInSubTree.insert(*RIt);
1428 SubTreeCost = MBFI.getBlockFreq(Block);
1429 }
1430 }
1431 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1432 // save them to SpillsToIns.
1433 for (const auto Ent : SpillsToKeep) {
1434 if (Ent.second)
1435 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1436 }
1437}
1438
1439/// For spills with equal values, remove redundant spills and hoist those left
1440/// to less hot spots.
1441///
1442/// Spills with equal values will be collected into the same set in
1443/// MergeableSpills when spill is inserted. These equal spills are originated
1444/// from the same defining instruction and are dominated by the instruction.
1445/// Before hoisting all the equal spills, redundant spills inside in the same
1446/// BB are first marked to be deleted. Then starting from the spills left, walk
1447/// up on the dominator tree towards the Root node where the define instruction
1448/// is located, mark the dominated spills to be deleted along the way and
1449/// collect the BB nodes on the path from non-dominated spills to the define
1450/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1451/// where we are considering to hoist the spills. We iterate the WorkSet in
1452/// bottom-up order, and for each node, we will decide whether to hoist spills
1453/// inside its subtree to that node. In this way, we can get benefit locally
1454/// even if hoisting all the equal spills to one cold place is impossible.
1455void HoistSpillHelper::hoistAllSpills() {
1456 SmallVector<unsigned, 4> NewVRegs;
1457 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1458
1459 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
2
Assuming 'i' is equal to 'e'
3
Loop condition is false. Execution continues on line 1467
1460 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1461 unsigned Original = VRM.getPreSplitReg(Reg);
1462 if (!MRI.def_empty(Reg))
1463 Virt2SiblingsMap[Original].insert(Reg);
1464 }
1465
1466 // Each entry in MergeableSpills contains a spill set with equal values.
1467 for (auto &Ent : MergeableSpills) {
1468 int Slot = Ent.first.first;
1469 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1470 VNInfo *OrigVNI = Ent.first.second;
1471 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1472 if (Ent.second.empty())
4
Taking false branch
1473 continue;
1474
1475 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
5
Assuming 'DebugFlag' is 0
6
Loop condition is false. Exiting loop
1476 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1477 << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1478 for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1479 dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1480 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1481 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
;
1482
1483 // SpillsToRm is the spill set to be removed from EqValSpills.
1484 SmallVector<MachineInstr *, 16> SpillsToRm;
1485 // SpillsToIns is the spill set to be newly inserted after hoisting.
1486 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1487
1488 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
7
Calling 'HoistSpillHelper::runHoistSpills'
1489
1490 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1491 dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1492 for (const auto Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1493 dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1494 dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1495 for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1496 dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1497 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1498 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
;
1499
1500 // Stack live range update.
1501 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1502 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1503 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1504 StackIntvl.getValNumInfo(0));
1505
1506 // Insert hoisted spills.
1507 for (auto const Insert : SpillsToIns) {
1508 MachineBasicBlock *BB = Insert.first;
1509 unsigned LiveReg = Insert.second;
1510 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1511 TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1512 MRI.getRegClass(LiveReg), &TRI);
1513 LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1514 ++NumSpills;
1515 }
1516
1517 // Remove redundant spills or change them to dead instructions.
1518 NumSpills -= SpillsToRm.size();
1519 for (auto const RMEnt : SpillsToRm) {
1520 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1521 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1522 MachineOperand &MO = RMEnt->getOperand(i - 1);
1523 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1524 RMEnt->RemoveOperand(i - 1);
1525 }
1526 }
1527 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1528 }
1529}
1530
1531/// For VirtReg clone, the \p New register should have the same physreg or
1532/// stackslot as the \p old register.
1533void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1534 if (VRM.hasPhys(Old))
1535 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1536 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1537 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1538 else
1539 llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/CodeGen/InlineSpiller.cpp"
, 1539)
;
1540}

/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineDominators.h

1//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
10// but for target-specific code rather than target-independent IR.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
15#define LLVM_CODEGEN_MACHINEDOMINATORS_H
16
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/Support/GenericDomTree.h"
23#include "llvm/Support/GenericDomTreeConstruction.h"
24#include <cassert>
25#include <memory>
26#include <vector>
27
28namespace llvm {
29
30template <>
31inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
32 MachineBasicBlock *MBB) {
33 this->Roots.push_back(MBB);
34}
35
36extern template class DomTreeNodeBase<MachineBasicBlock>;
37extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
38extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
39
40using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
41
42//===-------------------------------------
43/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
44/// compute a normal dominator tree.
45///
46class MachineDominatorTree : public MachineFunctionPass {
47 /// Helper structure used to hold all the basic blocks
48 /// involved in the split of a critical edge.
49 struct CriticalEdge {
50 MachineBasicBlock *FromBB;
51 MachineBasicBlock *ToBB;
52 MachineBasicBlock *NewBB;
53 };
54
55 /// Pile up all the critical edges to be split.
56 /// The splitting of a critical edge is local and thus, it is possible
57 /// to apply several of those changes at the same time.
58 mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
59
60 /// Remember all the basic blocks that are inserted during
61 /// edge splitting.
62 /// Invariant: NewBBs == all the basic blocks contained in the NewBB
63 /// field of all the elements of CriticalEdgesToSplit.
64 /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
65 /// such as BB == elt.NewBB.
66 mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
67
68 /// The DominatorTreeBase that is used to compute a normal dominator tree
69 std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
70
71 /// Apply all the recorded critical edges to the DT.
72 /// This updates the underlying DT information in a way that uses
73 /// the fast query path of DT as much as possible.
74 ///
75 /// \post CriticalEdgesToSplit.empty().
76 void applySplitCriticalEdges() const;
77
78public:
79 static char ID; // Pass ID, replacement for typeid
80
81 MachineDominatorTree();
82
83 DomTreeBase<MachineBasicBlock> &getBase() {
84 if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
85 applySplitCriticalEdges();
86 return *DT;
87 }
88
89 void getAnalysisUsage(AnalysisUsage &AU) const override;
90
91 /// getRoots - Return the root blocks of the current CFG. This may include
92 /// multiple blocks if we are computing post dominators. For forward
93 /// dominators, this will always be a single block (the entry node).
94 ///
95 inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
96 applySplitCriticalEdges();
97 return DT->getRoots();
98 }
99
100 inline MachineBasicBlock *getRoot() const {
101 applySplitCriticalEdges();
102 return DT->getRoot();
103 }
104
105 inline MachineDomTreeNode *getRootNode() const {
106 applySplitCriticalEdges();
107 return DT->getRootNode();
108 }
109
110 bool runOnMachineFunction(MachineFunction &F) override;
111
112 inline bool dominates(const MachineDomTreeNode* A,
113 const MachineDomTreeNode* B) const {
114 applySplitCriticalEdges();
115 return DT->dominates(A, B);
116 }
117
118 inline bool dominates(const MachineBasicBlock* A,
119 const MachineBasicBlock* B) const {
120 applySplitCriticalEdges();
121 return DT->dominates(A, B);
122 }
123
124 // dominates - Return true if A dominates B. This performs the
125 // special checks necessary if A and B are in the same basic block.
126 bool dominates(const MachineInstr *A, const MachineInstr *B) const {
127 applySplitCriticalEdges();
128 const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
129 if (BBA != BBB) return DT->dominates(BBA, BBB);
130
131 // Loop through the basic block until we find A or B.
132 MachineBasicBlock::const_iterator I = BBA->begin();
133 for (; &*I != A && &*I != B; ++I)
134 /*empty*/ ;
135
136 //if(!DT.IsPostDominators) {
137 // A dominates B if it is found first in the basic block.
138 return &*I == A;
139 //} else {
140 // // A post-dominates B if B is found first in the basic block.
141 // return &*I == B;
142 //}
143 }
144
145 inline bool properlyDominates(const MachineDomTreeNode* A,
146 const MachineDomTreeNode* B) const {
147 applySplitCriticalEdges();
148 return DT->properlyDominates(A, B);
149 }
150
151 inline bool properlyDominates(const MachineBasicBlock* A,
152 const MachineBasicBlock* B) const {
153 applySplitCriticalEdges();
154 return DT->properlyDominates(A, B);
155 }
156
157 /// findNearestCommonDominator - Find nearest common dominator basic block
158 /// for basic block A and B. If there is no such block then return NULL.
159 inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
160 MachineBasicBlock *B) {
161 applySplitCriticalEdges();
162 return DT->findNearestCommonDominator(A, B);
163 }
164
165 inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
166 applySplitCriticalEdges();
167 return DT->getNode(BB);
10
Calling 'DominatorTreeBase::getNode'
29
Returning from 'DominatorTreeBase::getNode'
30
Returning pointer
168 }
169
170 /// getNode - return the (Post)DominatorTree node for the specified basic
171 /// block. This is the same as using operator[] on this class.
172 ///
173 inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
174 applySplitCriticalEdges();
175 return DT->getNode(BB);
176 }
177
178 /// addNewBlock - Add a new node to the dominator tree information. This
179 /// creates a new node as a child of DomBB dominator node,linking it into
180 /// the children list of the immediate dominator.
181 inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
182 MachineBasicBlock *DomBB) {
183 applySplitCriticalEdges();
184 return DT->addNewBlock(BB, DomBB);
185 }
186
187 /// changeImmediateDominator - This method is used to update the dominator
188 /// tree information when a node's immediate dominator changes.
189 ///
190 inline void changeImmediateDominator(MachineBasicBlock *N,
191 MachineBasicBlock* NewIDom) {
192 applySplitCriticalEdges();
193 DT->changeImmediateDominator(N, NewIDom);
194 }
195
196 inline void changeImmediateDominator(MachineDomTreeNode *N,
197 MachineDomTreeNode* NewIDom) {
198 applySplitCriticalEdges();
199 DT->changeImmediateDominator(N, NewIDom);
200 }
201
202 /// eraseNode - Removes a node from the dominator tree. Block must not
203 /// dominate any other blocks. Removes node from its immediate dominator's
204 /// children list. Deletes dominator node associated with basic block BB.
205 inline void eraseNode(MachineBasicBlock *BB) {
206 applySplitCriticalEdges();
207 DT->eraseNode(BB);
208 }
209
210 /// splitBlock - BB is split and now it has one successor. Update dominator
211 /// tree to reflect this change.
212 inline void splitBlock(MachineBasicBlock* NewBB) {
213 applySplitCriticalEdges();
214 DT->splitBlock(NewBB);
215 }
216
217 /// isReachableFromEntry - Return true if A is dominated by the entry
218 /// block of the function containing it.
219 bool isReachableFromEntry(const MachineBasicBlock *A) {
220 applySplitCriticalEdges();
221 return DT->isReachableFromEntry(A);
222 }
223
224 void releaseMemory() override;
225
226 void verifyAnalysis() const override;
227
228 void print(raw_ostream &OS, const Module*) const override;
229
230 /// Record that the critical edge (FromBB, ToBB) has been
231 /// split with NewBB.
232 /// This is best to use this method instead of directly update the
233 /// underlying information, because this helps mitigating the
234 /// number of time the DT information is invalidated.
235 ///
236 /// \note Do not use this method with regular edges.
237 ///
238 /// \note To benefit from the compile time improvement incurred by this
239 /// method, the users of this method have to limit the queries to the DT
240 /// interface between two edges splitting. In other words, they have to
241 /// pack the splitting of critical edges as much as possible.
242 void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
243 MachineBasicBlock *ToBB,
244 MachineBasicBlock *NewBB) {
245 bool Inserted = NewBBs.insert(NewBB).second;
246 (void)Inserted;
247 assert(Inserted &&((Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? static_cast<void> (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineDominators.h"
, 248, __PRETTY_FUNCTION__))
248 "A basic block inserted via edge splitting cannot appear twice")((Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? static_cast<void> (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/CodeGen/MachineDominators.h"
, 248, __PRETTY_FUNCTION__))
;
249 CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
250 }
251};
252
253//===-------------------------------------
254/// DominatorTree GraphTraits specialization so the DominatorTree can be
255/// iterable by generic graph iterators.
256///
257
258template <class Node, class ChildIterator>
259struct MachineDomTreeGraphTraitsBase {
260 using NodeRef = Node *;
261 using ChildIteratorType = ChildIterator;
262
263 static NodeRef getEntryNode(NodeRef N) { return N; }
264 static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
265 static ChildIteratorType child_end(NodeRef N) { return N->end(); }
266};
267
268template <class T> struct GraphTraits;
269
270template <>
271struct GraphTraits<MachineDomTreeNode *>
272 : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
273 MachineDomTreeNode::iterator> {};
274
275template <>
276struct GraphTraits<const MachineDomTreeNode *>
277 : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
278 MachineDomTreeNode::const_iterator> {
279};
280
281template <> struct GraphTraits<MachineDominatorTree*>
282 : public GraphTraits<MachineDomTreeNode *> {
283 static NodeRef getEntryNode(MachineDominatorTree *DT) {
284 return DT->getRootNode();
285 }
286};
287
288} // end namespace llvm
289
290#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h

1//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines a set of templates that efficiently compute a dominator
11/// tree over a generic graph. This is used typically in LLVM for fast
12/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
13/// graph types.
14///
15/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
16/// on the graph's NodeRef. The NodeRef should be a pointer and,
17/// NodeRef->getParent() must return the parent node that is also a pointer.
18///
19/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
20///
21//===----------------------------------------------------------------------===//
22
23#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
24#define LLVM_SUPPORT_GENERICDOMTREE_H
25
26#include "llvm/ADT/DenseMap.h"
27#include "llvm/ADT/GraphTraits.h"
28#include "llvm/ADT/PointerIntPair.h"
29#include "llvm/ADT/STLExtras.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/Support/CFGUpdate.h"
33#include "llvm/Support/raw_ostream.h"
34#include <algorithm>
35#include <cassert>
36#include <cstddef>
37#include <iterator>
38#include <memory>
39#include <type_traits>
40#include <utility>
41#include <vector>
42
43namespace llvm {
44
45template <typename NodeT, bool IsPostDom>
46class DominatorTreeBase;
47
48namespace DomTreeBuilder {
49template <typename DomTreeT>
50struct SemiNCAInfo;
51} // namespace DomTreeBuilder
52
53/// Base class for the actual dominator tree node.
54template <class NodeT> class DomTreeNodeBase {
55 friend class PostDominatorTree;
56 friend class DominatorTreeBase<NodeT, false>;
57 friend class DominatorTreeBase<NodeT, true>;
58 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
59 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
60
61 NodeT *TheBB;
62 DomTreeNodeBase *IDom;
63 unsigned Level;
64 std::vector<DomTreeNodeBase *> Children;
65 mutable unsigned DFSNumIn = ~0;
66 mutable unsigned DFSNumOut = ~0;
67
68 public:
69 DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
70 : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
71
72 using iterator = typename std::vector<DomTreeNodeBase *>::iterator;
73 using const_iterator =
74 typename std::vector<DomTreeNodeBase *>::const_iterator;
75
76 iterator begin() { return Children.begin(); }
77 iterator end() { return Children.end(); }
78 const_iterator begin() const { return Children.begin(); }
79 const_iterator end() const { return Children.end(); }
80
81 NodeT *getBlock() const { return TheBB; }
82 DomTreeNodeBase *getIDom() const { return IDom; }
83 unsigned getLevel() const { return Level; }
84
85 const std::vector<DomTreeNodeBase *> &getChildren() const { return Children; }
86
87 std::unique_ptr<DomTreeNodeBase> addChild(
88 std::unique_ptr<DomTreeNodeBase> C) {
89 Children.push_back(C.get());
90 return C;
91 }
92
93 size_t getNumChildren() const { return Children.size(); }
94
95 void clearAllChildren() { Children.clear(); }
96
97 bool compare(const DomTreeNodeBase *Other) const {
98 if (getNumChildren() != Other->getNumChildren())
99 return true;
100
101 if (Level != Other->Level) return true;
102
103 SmallPtrSet<const NodeT *, 4> OtherChildren;
104 for (const DomTreeNodeBase *I : *Other) {
105 const NodeT *Nd = I->getBlock();
106 OtherChildren.insert(Nd);
107 }
108
109 for (const DomTreeNodeBase *I : *this) {
110 const NodeT *N = I->getBlock();
111 if (OtherChildren.count(N) == 0)
112 return true;
113 }
114 return false;
115 }
116
117 void setIDom(DomTreeNodeBase *NewIDom) {
118 assert(IDom && "No immediate dominator?")((IDom && "No immediate dominator?") ? static_cast<
void> (0) : __assert_fail ("IDom && \"No immediate dominator?\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 118, __PRETTY_FUNCTION__))
;
119 if (IDom == NewIDom) return;
120
121 auto I = find(IDom->Children, this);
122 assert(I != IDom->Children.end() &&((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 123, __PRETTY_FUNCTION__))
123 "Not in immediate dominator children set!")((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 123, __PRETTY_FUNCTION__))
;
124 // I am no longer your child...
125 IDom->Children.erase(I);
126
127 // Switch to new dominator
128 IDom = NewIDom;
129 IDom->Children.push_back(this);
130
131 UpdateLevel();
132 }
133
134 /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
135 /// in the dominator tree. They are only guaranteed valid if
136 /// updateDFSNumbers() has been called.
137 unsigned getDFSNumIn() const { return DFSNumIn; }
138 unsigned getDFSNumOut() const { return DFSNumOut; }
139
140private:
141 // Return true if this node is dominated by other. Use this only if DFS info
142 // is valid.
143 bool DominatedBy(const DomTreeNodeBase *other) const {
144 return this->DFSNumIn >= other->DFSNumIn &&
145 this->DFSNumOut <= other->DFSNumOut;
146 }
147
148 void UpdateLevel() {
149 assert(IDom)((IDom) ? static_cast<void> (0) : __assert_fail ("IDom"
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 149, __PRETTY_FUNCTION__))
;
150 if (Level == IDom->Level + 1) return;
151
152 SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
153
154 while (!WorkStack.empty()) {
155 DomTreeNodeBase *Current = WorkStack.pop_back_val();
156 Current->Level = Current->IDom->Level + 1;
157
158 for (DomTreeNodeBase *C : *Current) {
159 assert(C->IDom)((C->IDom) ? static_cast<void> (0) : __assert_fail (
"C->IDom", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 159, __PRETTY_FUNCTION__))
;
160 if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
161 }
162 }
163 }
164};
165
166template <class NodeT>
167raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
168 if (Node->getBlock())
169 Node->getBlock()->printAsOperand(O, false);
170 else
171 O << " <<exit node>>";
172
173 O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
174 << Node->getLevel() << "]\n";
175
176 return O;
177}
178
179template <class NodeT>
180void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
181 unsigned Lev) {
182 O.indent(2 * Lev) << "[" << Lev << "] " << N;
183 for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
184 E = N->end();
185 I != E; ++I)
186 PrintDomTree<NodeT>(*I, O, Lev + 1);
187}
188
189namespace DomTreeBuilder {
190// The routines below are provided in a separate header but referenced here.
191template <typename DomTreeT>
192void Calculate(DomTreeT &DT);
193
194template <typename DomTreeT>
195void CalculateWithUpdates(DomTreeT &DT,
196 ArrayRef<typename DomTreeT::UpdateType> Updates);
197
198template <typename DomTreeT>
199void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
200 typename DomTreeT::NodePtr To);
201
202template <typename DomTreeT>
203void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
204 typename DomTreeT::NodePtr To);
205
206template <typename DomTreeT>
207void ApplyUpdates(DomTreeT &DT,
208 ArrayRef<typename DomTreeT::UpdateType> Updates);
209
210template <typename DomTreeT>
211bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
212} // namespace DomTreeBuilder
213
214/// Core dominator tree base class.
215///
216/// This class is a generic template over graph nodes. It is instantiated for
217/// various graphs in the LLVM IR or in the code generator.
218template <typename NodeT, bool IsPostDom>
219class DominatorTreeBase {
220 public:
221 static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
222 "Currently DominatorTreeBase supports only pointer nodes");
223 using NodeType = NodeT;
224 using NodePtr = NodeT *;
225 using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
226 static_assert(std::is_pointer<ParentPtr>::value,
227 "Currently NodeT's parent must be a pointer type");
228 using ParentType = typename std::remove_pointer<ParentPtr>::type;
229 static constexpr bool IsPostDominator = IsPostDom;
230
231 using UpdateType = cfg::Update<NodePtr>;
232 using UpdateKind = cfg::UpdateKind;
233 static constexpr UpdateKind Insert = UpdateKind::Insert;
234 static constexpr UpdateKind Delete = UpdateKind::Delete;
235
236 enum class VerificationLevel { Fast, Basic, Full };
237
238protected:
239 // Dominators always have a single root, postdominators can have more.
240 SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
241
242 using DomTreeNodeMapType =
243 DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
244 DomTreeNodeMapType DomTreeNodes;
245 DomTreeNodeBase<NodeT> *RootNode;
246 ParentPtr Parent = nullptr;
247
248 mutable bool DFSInfoValid = false;
249 mutable unsigned int SlowQueries = 0;
250
251 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
252
253 public:
254 DominatorTreeBase() {}
255
256 DominatorTreeBase(DominatorTreeBase &&Arg)
257 : Roots(std::move(Arg.Roots)),
258 DomTreeNodes(std::move(Arg.DomTreeNodes)),
259 RootNode(Arg.RootNode),
260 Parent(Arg.Parent),
261 DFSInfoValid(Arg.DFSInfoValid),
262 SlowQueries(Arg.SlowQueries) {
263 Arg.wipe();
264 }
265
266 DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
267 Roots = std::move(RHS.Roots);
268 DomTreeNodes = std::move(RHS.DomTreeNodes);
269 RootNode = RHS.RootNode;
270 Parent = RHS.Parent;
271 DFSInfoValid = RHS.DFSInfoValid;
272 SlowQueries = RHS.SlowQueries;
273 RHS.wipe();
274 return *this;
275 }
276
277 DominatorTreeBase(const DominatorTreeBase &) = delete;
278 DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
279
280 /// getRoots - Return the root blocks of the current CFG. This may include
281 /// multiple blocks if we are computing post dominators. For forward
282 /// dominators, this will always be a single block (the entry node).
283 ///
284 const SmallVectorImpl<NodeT *> &getRoots() const { return Roots; }
285
286 /// isPostDominator - Returns true if analysis based of postdoms
287 ///
288 bool isPostDominator() const { return IsPostDominator; }
289
290 /// compare - Return false if the other dominator tree base matches this
291 /// dominator tree base. Otherwise return true.
292 bool compare(const DominatorTreeBase &Other) const {
293 if (Parent != Other.Parent) return true;
294
295 if (Roots.size() != Other.Roots.size())
296 return true;
297
298 if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
299 return true;
300
301 const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
302 if (DomTreeNodes.size() != OtherDomTreeNodes.size())
303 return true;
304
305 for (const auto &DomTreeNode : DomTreeNodes) {
306 NodeT *BB = DomTreeNode.first;
307 typename DomTreeNodeMapType::const_iterator OI =
308 OtherDomTreeNodes.find(BB);
309 if (OI == OtherDomTreeNodes.end())
310 return true;
311
312 DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
313 DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
314
315 if (MyNd.compare(&OtherNd))
316 return true;
317 }
318
319 return false;
320 }
321
322 void releaseMemory() { reset(); }
323
324 /// getNode - return the (Post)DominatorTree node for the specified basic
325 /// block. This is the same as using operator[] on this class. The result
326 /// may (but is not required to) be null for a forward (backwards)
327 /// statically unreachable block.
328 DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
329 auto I = DomTreeNodes.find(BB);
330 if (I != DomTreeNodes.end())
11
Assuming the condition is true
12
Taking true branch
331 return I->second.get();
13
Calling 'unique_ptr::get'
27
Returning from 'unique_ptr::get'
28
Returning pointer
332 return nullptr;
333 }
334
335 /// See getNode.
336 DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const {
337 return getNode(BB);
338 }
339
340 /// getRootNode - This returns the entry node for the CFG of the function. If
341 /// this tree represents the post-dominance relations for a function, however,
342 /// this root may be a node with the block == NULL. This is the case when
343 /// there are multiple exit nodes from a particular function. Consumers of
344 /// post-dominance information must be capable of dealing with this
345 /// possibility.
346 ///
347 DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
348 const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
349
350 /// Get all nodes dominated by R, including R itself.
351 void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
352 Result.clear();
353 const DomTreeNodeBase<NodeT> *RN = getNode(R);
354 if (!RN)
355 return; // If R is unreachable, it will not be present in the DOM tree.
356 SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
357 WL.push_back(RN);
358
359 while (!WL.empty()) {
360 const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
361 Result.push_back(N->getBlock());
362 WL.append(N->begin(), N->end());
363 }
364 }
365
366 /// properlyDominates - Returns true iff A dominates B and A != B.
367 /// Note that this is not a constant time operation!
368 ///
369 bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
370 const DomTreeNodeBase<NodeT> *B) const {
371 if (!A || !B)
372 return false;
373 if (A == B)
374 return false;
375 return dominates(A, B);
376 }
377
378 bool properlyDominates(const NodeT *A, const NodeT *B) const;
379
380 /// isReachableFromEntry - Return true if A is dominated by the entry
381 /// block of the function containing it.
382 bool isReachableFromEntry(const NodeT *A) const {
383 assert(!this->isPostDominator() &&((!this->isPostDominator() && "This is not implemented for post dominators"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 384, __PRETTY_FUNCTION__))
384 "This is not implemented for post dominators")((!this->isPostDominator() && "This is not implemented for post dominators"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 384, __PRETTY_FUNCTION__))
;
385 return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
386 }
387
388 bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
389
390 /// dominates - Returns true iff A dominates B. Note that this is not a
391 /// constant time operation!
392 ///
393 bool dominates(const DomTreeNodeBase<NodeT> *A,
394 const DomTreeNodeBase<NodeT> *B) const {
395 // A node trivially dominates itself.
396 if (B == A)
397 return true;
398
399 // An unreachable node is dominated by anything.
400 if (!isReachableFromEntry(B))
401 return true;
402
403 // And dominates nothing.
404 if (!isReachableFromEntry(A))
405 return false;
406
407 if (B->getIDom() == A) return true;
408
409 if (A->getIDom() == B) return false;
410
411 // A can only dominate B if it is higher in the tree.
412 if (A->getLevel() >= B->getLevel()) return false;
413
414 // Compare the result of the tree walk and the dfs numbers, if expensive
415 // checks are enabled.
416#ifdef EXPENSIVE_CHECKS
417 assert((!DFSInfoValid ||(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 419, __PRETTY_FUNCTION__))
418 (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 419, __PRETTY_FUNCTION__))
419 "Tree walk disagrees with dfs numbers!")(((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy
(A))) && "Tree walk disagrees with dfs numbers!") ? static_cast
<void> (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 419, __PRETTY_FUNCTION__))
;
420#endif
421
422 if (DFSInfoValid)
423 return B->DominatedBy(A);
424
425 // If we end up with too many slow queries, just update the
426 // DFS numbers on the theory that we are going to keep querying.
427 SlowQueries++;
428 if (SlowQueries > 32) {
429 updateDFSNumbers();
430 return B->DominatedBy(A);
431 }
432
433 return dominatedBySlowTreeWalk(A, B);
434 }
435
436 bool dominates(const NodeT *A, const NodeT *B) const;
437
438 NodeT *getRoot() const {
439 assert(this->Roots.size() == 1 && "Should always have entry node!")((this->Roots.size() == 1 && "Should always have entry node!"
) ? static_cast<void> (0) : __assert_fail ("this->Roots.size() == 1 && \"Should always have entry node!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 439, __PRETTY_FUNCTION__))
;
440 return this->Roots[0];
441 }
442
443 /// findNearestCommonDominator - Find nearest common dominator basic block
444 /// for basic block A and B. If there is no such block then return nullptr.
445 NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
446 assert(A && B && "Pointers are not valid")((A && B && "Pointers are not valid") ? static_cast
<void> (0) : __assert_fail ("A && B && \"Pointers are not valid\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 446, __PRETTY_FUNCTION__))
;
447 assert(A->getParent() == B->getParent() &&((A->getParent() == B->getParent() && "Two blocks are not in same function"
) ? static_cast<void> (0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 448, __PRETTY_FUNCTION__))
448 "Two blocks are not in same function")((A->getParent() == B->getParent() && "Two blocks are not in same function"
) ? static_cast<void> (0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 448, __PRETTY_FUNCTION__))
;
449
450 // If either A or B is a entry block then it is nearest common dominator
451 // (for forward-dominators).
452 if (!isPostDominator()) {
453 NodeT &Entry = A->getParent()->front();
454 if (A == &Entry || B == &Entry)
455 return &Entry;
456 }
457
458 DomTreeNodeBase<NodeT> *NodeA = getNode(A);
459 DomTreeNodeBase<NodeT> *NodeB = getNode(B);
460
461 if (!NodeA || !NodeB) return nullptr;
462
463 // Use level information to go up the tree until the levels match. Then
464 // continue going up til we arrive at the same node.
465 while (NodeA && NodeA != NodeB) {
466 if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
467
468 NodeA = NodeA->IDom;
469 }
470
471 return NodeA ? NodeA->getBlock() : nullptr;
472 }
473
474 const NodeT *findNearestCommonDominator(const NodeT *A,
475 const NodeT *B) const {
476 // Cast away the const qualifiers here. This is ok since
477 // const is re-introduced on the return type.
478 return findNearestCommonDominator(const_cast<NodeT *>(A),
479 const_cast<NodeT *>(B));
480 }
481
482 bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
483 return isPostDominator() && !A->getBlock();
484 }
485
486 //===--------------------------------------------------------------------===//
487 // API to update (Post)DominatorTree information based on modifications to
488 // the CFG...
489
490 /// Inform the dominator tree about a sequence of CFG edge insertions and
491 /// deletions and perform a batch update on the tree.
492 ///
493 /// This function should be used when there were multiple CFG updates after
494 /// the last dominator tree update. It takes care of performing the updates
495 /// in sync with the CFG and optimizes away the redundant operations that
496 /// cancel each other.
497 /// The functions expects the sequence of updates to be balanced. Eg.:
498 /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
499 /// logically it results in a single insertions.
500 /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
501 /// sense to insert the same edge twice.
502 ///
503 /// What's more, the functions assumes that it's safe to ask every node in the
504 /// CFG about its children and inverse children. This implies that deletions
505 /// of CFG edges must not delete the CFG nodes before calling this function.
506 ///
507 /// The applyUpdates function can reorder the updates and remove redundant
508 /// ones internally. The batch updater is also able to detect sequences of
509 /// zero and exactly one update -- it's optimized to do less work in these
510 /// cases.
511 ///
512 /// Note that for postdominators it automatically takes care of applying
513 /// updates on reverse edges internally (so there's no need to swap the
514 /// From and To pointers when constructing DominatorTree::UpdateType).
515 /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
516 /// with the same template parameter T.
517 ///
518 /// \param Updates An unordered sequence of updates to perform.
519 ///
520 void applyUpdates(ArrayRef<UpdateType> Updates) {
521 DomTreeBuilder::ApplyUpdates(*this, Updates);
522 }
523
524 /// Inform the dominator tree about a CFG edge insertion and update the tree.
525 ///
526 /// This function has to be called just before or just after making the update
527 /// on the actual CFG. There cannot be any other updates that the dominator
528 /// tree doesn't know about.
529 ///
530 /// Note that for postdominators it automatically takes care of inserting
531 /// a reverse edge internally (so there's no need to swap the parameters).
532 ///
533 void insertEdge(NodeT *From, NodeT *To) {
534 assert(From)((From) ? static_cast<void> (0) : __assert_fail ("From"
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 534, __PRETTY_FUNCTION__))
;
535 assert(To)((To) ? static_cast<void> (0) : __assert_fail ("To", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 535, __PRETTY_FUNCTION__))
;
536 assert(From->getParent() == Parent)((From->getParent() == Parent) ? static_cast<void> (
0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 536, __PRETTY_FUNCTION__))
;
537 assert(To->getParent() == Parent)((To->getParent() == Parent) ? static_cast<void> (0)
: __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 537, __PRETTY_FUNCTION__))
;
538 DomTreeBuilder::InsertEdge(*this, From, To);
539 }
540
541 /// Inform the dominator tree about a CFG edge deletion and update the tree.
542 ///
543 /// This function has to be called just after making the update on the actual
544 /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
545 /// DEBUG mode. There cannot be any other updates that the
546 /// dominator tree doesn't know about.
547 ///
548 /// Note that for postdominators it automatically takes care of deleting
549 /// a reverse edge internally (so there's no need to swap the parameters).
550 ///
551 void deleteEdge(NodeT *From, NodeT *To) {
552 assert(From)((From) ? static_cast<void> (0) : __assert_fail ("From"
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 552, __PRETTY_FUNCTION__))
;
553 assert(To)((To) ? static_cast<void> (0) : __assert_fail ("To", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 553, __PRETTY_FUNCTION__))
;
554 assert(From->getParent() == Parent)((From->getParent() == Parent) ? static_cast<void> (
0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 554, __PRETTY_FUNCTION__))
;
555 assert(To->getParent() == Parent)((To->getParent() == Parent) ? static_cast<void> (0)
: __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 555, __PRETTY_FUNCTION__))
;
556 DomTreeBuilder::DeleteEdge(*this, From, To);
557 }
558
559 /// Add a new node to the dominator tree information.
560 ///
561 /// This creates a new node as a child of DomBB dominator node, linking it
562 /// into the children list of the immediate dominator.
563 ///
564 /// \param BB New node in CFG.
565 /// \param DomBB CFG node that is dominator for BB.
566 /// \returns New dominator tree node that represents new CFG node.
567 ///
568 DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
569 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((getNode(BB) == nullptr && "Block already in dominator tree!"
) ? static_cast<void> (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 569, __PRETTY_FUNCTION__))
;
570 DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
571 assert(IDomNode && "Not immediate dominator specified for block!")((IDomNode && "Not immediate dominator specified for block!"
) ? static_cast<void> (0) : __assert_fail ("IDomNode && \"Not immediate dominator specified for block!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 571, __PRETTY_FUNCTION__))
;
572 DFSInfoValid = false;
573 return (DomTreeNodes[BB] = IDomNode->addChild(
574 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
575 }
576
577 /// Add a new node to the forward dominator tree and make it a new root.
578 ///
579 /// \param BB New node in CFG.
580 /// \returns New dominator tree node that represents new CFG node.
581 ///
582 DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
583 assert(getNode(BB) == nullptr && "Block already in dominator tree!")((getNode(BB) == nullptr && "Block already in dominator tree!"
) ? static_cast<void> (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 583, __PRETTY_FUNCTION__))
;
584 assert(!this->isPostDominator() &&((!this->isPostDominator() && "Cannot change root of post-dominator tree"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 585, __PRETTY_FUNCTION__))
585 "Cannot change root of post-dominator tree")((!this->isPostDominator() && "Cannot change root of post-dominator tree"
) ? static_cast<void> (0) : __assert_fail ("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 585, __PRETTY_FUNCTION__))
;
586 DFSInfoValid = false;
587 DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
588 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
589 if (Roots.empty()) {
590 addRoot(BB);
591 } else {
592 assert(Roots.size() == 1)((Roots.size() == 1) ? static_cast<void> (0) : __assert_fail
("Roots.size() == 1", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 592, __PRETTY_FUNCTION__))
;
593 NodeT *OldRoot = Roots.front();
594 auto &OldNode = DomTreeNodes[OldRoot];
595 OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
596 OldNode->IDom = NewNode;
597 OldNode->UpdateLevel();
598 Roots[0] = BB;
599 }
600 return RootNode = NewNode;
601 }
602
603 /// changeImmediateDominator - This method is used to update the dominator
604 /// tree information when a node's immediate dominator changes.
605 ///
606 void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
607 DomTreeNodeBase<NodeT> *NewIDom) {
608 assert(N && NewIDom && "Cannot change null node pointers!")((N && NewIDom && "Cannot change null node pointers!"
) ? static_cast<void> (0) : __assert_fail ("N && NewIDom && \"Cannot change null node pointers!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 608, __PRETTY_FUNCTION__))
;
609 DFSInfoValid = false;
610 N->setIDom(NewIDom);
611 }
612
613 void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
614 changeImmediateDominator(getNode(BB), getNode(NewBB));
615 }
616
617 /// eraseNode - Removes a node from the dominator tree. Block must not
618 /// dominate any other blocks. Removes node from its immediate dominator's
619 /// children list. Deletes dominator node associated with basic block BB.
620 void eraseNode(NodeT *BB) {
621 DomTreeNodeBase<NodeT> *Node = getNode(BB);
622 assert(Node && "Removing node that isn't in dominator tree.")((Node && "Removing node that isn't in dominator tree."
) ? static_cast<void> (0) : __assert_fail ("Node && \"Removing node that isn't in dominator tree.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 622, __PRETTY_FUNCTION__))
;
623 assert(Node->getChildren().empty() && "Node is not a leaf node.")((Node->getChildren().empty() && "Node is not a leaf node."
) ? static_cast<void> (0) : __assert_fail ("Node->getChildren().empty() && \"Node is not a leaf node.\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 623, __PRETTY_FUNCTION__))
;
624
625 DFSInfoValid = false;
626
627 // Remove node from immediate dominator's children list.
628 DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
629 if (IDom) {
630 const auto I = find(IDom->Children, Node);
631 assert(I != IDom->Children.end() &&((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 632, __PRETTY_FUNCTION__))
632 "Not in immediate dominator children set!")((I != IDom->Children.end() && "Not in immediate dominator children set!"
) ? static_cast<void> (0) : __assert_fail ("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 632, __PRETTY_FUNCTION__))
;
633 // I am no longer your child...
634 IDom->Children.erase(I);
635 }
636
637 DomTreeNodes.erase(BB);
638
639 if (!IsPostDom) return;
640
641 // Remember to update PostDominatorTree roots.
642 auto RIt = llvm::find(Roots, BB);
643 if (RIt != Roots.end()) {
644 std::swap(*RIt, Roots.back());
645 Roots.pop_back();
646 }
647 }
648
649 /// splitBlock - BB is split and now it has one successor. Update dominator
650 /// tree to reflect this change.
651 void splitBlock(NodeT *NewBB) {
652 if (IsPostDominator)
653 Split<Inverse<NodeT *>>(NewBB);
654 else
655 Split<NodeT *>(NewBB);
656 }
657
658 /// print - Convert to human readable form
659 ///
660 void print(raw_ostream &O) const {
661 O << "=============================--------------------------------\n";
662 if (IsPostDominator)
663 O << "Inorder PostDominator Tree: ";
664 else
665 O << "Inorder Dominator Tree: ";
666 if (!DFSInfoValid)
667 O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
668 O << "\n";
669
670 // The postdom tree can have a null root if there are no returns.
671 if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
672 O << "Roots: ";
673 for (const NodePtr Block : Roots) {
674 Block->printAsOperand(O, false);
675 O << " ";
676 }
677 O << "\n";
678 }
679
680public:
681 /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
682 /// dominator tree in dfs order.
683 void updateDFSNumbers() const {
684 if (DFSInfoValid) {
685 SlowQueries = 0;
686 return;
687 }
688
689 SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
690 typename DomTreeNodeBase<NodeT>::const_iterator>,
691 32> WorkStack;
692
693 const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
694 assert((!Parent || ThisRoot) && "Empty constructed DomTree")(((!Parent || ThisRoot) && "Empty constructed DomTree"
) ? static_cast<void> (0) : __assert_fail ("(!Parent || ThisRoot) && \"Empty constructed DomTree\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 694, __PRETTY_FUNCTION__))
;
695 if (!ThisRoot)
696 return;
697
698 // Both dominators and postdominators have a single root node. In the case
699 // case of PostDominatorTree, this node is a virtual root.
700 WorkStack.push_back({ThisRoot, ThisRoot->begin()});
701
702 unsigned DFSNum = 0;
703 ThisRoot->DFSNumIn = DFSNum++;
704
705 while (!WorkStack.empty()) {
706 const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
707 const auto ChildIt = WorkStack.back().second;
708
709 // If we visited all of the children of this node, "recurse" back up the
710 // stack setting the DFOutNum.
711 if (ChildIt == Node->end()) {
712 Node->DFSNumOut = DFSNum++;
713 WorkStack.pop_back();
714 } else {
715 // Otherwise, recursively visit this child.
716 const DomTreeNodeBase<NodeT> *Child = *ChildIt;
717 ++WorkStack.back().second;
718
719 WorkStack.push_back({Child, Child->begin()});
720 Child->DFSNumIn = DFSNum++;
721 }
722 }
723
724 SlowQueries = 0;
725 DFSInfoValid = true;
726 }
727
728 /// recalculate - compute a dominator tree for the given function
729 void recalculate(ParentType &Func) {
730 Parent = &Func;
731 DomTreeBuilder::Calculate(*this);
732 }
733
734 void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
735 Parent = &Func;
736 DomTreeBuilder::CalculateWithUpdates(*this, Updates);
737 }
738
739 /// verify - checks if the tree is correct. There are 3 level of verification:
740 /// - Full -- verifies if the tree is correct by making sure all the
741 /// properties (including the parent and the sibling property)
742 /// hold.
743 /// Takes O(N^3) time.
744 ///
745 /// - Basic -- checks if the tree is correct, but compares it to a freshly
746 /// constructed tree instead of checking the sibling property.
747 /// Takes O(N^2) time.
748 ///
749 /// - Fast -- checks basic tree structure and compares it with a freshly
750 /// constructed tree.
751 /// Takes O(N^2) time worst case, but is faster in practise (same
752 /// as tree construction).
753 bool verify(VerificationLevel VL = VerificationLevel::Full) const {
754 return DomTreeBuilder::Verify(*this, VL);
755 }
756
757protected:
758 void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
759
760 void reset() {
761 DomTreeNodes.clear();
762 Roots.clear();
763 RootNode = nullptr;
764 Parent = nullptr;
765 DFSInfoValid = false;
766 SlowQueries = 0;
767 }
768
769 // NewBB is split and now it has one successor. Update dominator tree to
770 // reflect this change.
771 template <class N>
772 void Split(typename GraphTraits<N>::NodeRef NewBB) {
773 using GraphT = GraphTraits<N>;
774 using NodeRef = typename GraphT::NodeRef;
775 assert(std::distance(GraphT::child_begin(NewBB),((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 777, __PRETTY_FUNCTION__))
776 GraphT::child_end(NewBB)) == 1 &&((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 777, __PRETTY_FUNCTION__))
777 "NewBB should have a single successor!")((std::distance(GraphT::child_begin(NewBB), GraphT::child_end
(NewBB)) == 1 && "NewBB should have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 777, __PRETTY_FUNCTION__))
;
778 NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
779
780 std::vector<NodeRef> PredBlocks;
781 for (const auto &Pred : children<Inverse<N>>(NewBB))
782 PredBlocks.push_back(Pred);
783
784 assert(!PredBlocks.empty() && "No predblocks?")((!PredBlocks.empty() && "No predblocks?") ? static_cast
<void> (0) : __assert_fail ("!PredBlocks.empty() && \"No predblocks?\""
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 784, __PRETTY_FUNCTION__))
;
785
786 bool NewBBDominatesNewBBSucc = true;
787 for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
788 if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
789 isReachableFromEntry(Pred)) {
790 NewBBDominatesNewBBSucc = false;
791 break;
792 }
793 }
794
795 // Find NewBB's immediate dominator and create new dominator tree node for
796 // NewBB.
797 NodeT *NewBBIDom = nullptr;
798 unsigned i = 0;
799 for (i = 0; i < PredBlocks.size(); ++i)
800 if (isReachableFromEntry(PredBlocks[i])) {
801 NewBBIDom = PredBlocks[i];
802 break;
803 }
804
805 // It's possible that none of the predecessors of NewBB are reachable;
806 // in that case, NewBB itself is unreachable, so nothing needs to be
807 // changed.
808 if (!NewBBIDom) return;
809
810 for (i = i + 1; i < PredBlocks.size(); ++i) {
811 if (isReachableFromEntry(PredBlocks[i]))
812 NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
813 }
814
815 // Create the new dominator tree node... and set the idom of NewBB.
816 DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
817
818 // If NewBB strictly dominates other blocks, then it is now the immediate
819 // dominator of NewBBSucc. Update the dominator tree as appropriate.
820 if (NewBBDominatesNewBBSucc) {
821 DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
822 changeImmediateDominator(NewBBSuccNode, NewBBNode);
823 }
824 }
825
826 private:
827 bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
828 const DomTreeNodeBase<NodeT> *B) const {
829 assert(A != B)((A != B) ? static_cast<void> (0) : __assert_fail ("A != B"
, "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 829, __PRETTY_FUNCTION__))
;
830 assert(isReachableFromEntry(B))((isReachableFromEntry(B)) ? static_cast<void> (0) : __assert_fail
("isReachableFromEntry(B)", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 830, __PRETTY_FUNCTION__))
;
831 assert(isReachableFromEntry(A))((isReachableFromEntry(A)) ? static_cast<void> (0) : __assert_fail
("isReachableFromEntry(A)", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/GenericDomTree.h"
, 831, __PRETTY_FUNCTION__))
;
832
833 const unsigned ALevel = A->getLevel();
834 const DomTreeNodeBase<NodeT> *IDom;
835
836 // Don't walk nodes above A's subtree. When we reach A's level, we must
837 // either find A or be in some other subtree not dominated by A.
838 while ((IDom = B->getIDom()) != nullptr && IDom->getLevel() >= ALevel)
839 B = IDom; // Walk up the tree
840
841 return B == A;
842 }
843
844 /// Wipe this tree's state without releasing any resources.
845 ///
846 /// This is essentially a post-move helper only. It leaves the object in an
847 /// assignable and destroyable state, but otherwise invalid.
848 void wipe() {
849 DomTreeNodes.clear();
850 RootNode = nullptr;
851 Parent = nullptr;
852 }
853};
854
855template <typename T>
856using DomTreeBase = DominatorTreeBase<T, false>;
857
858template <typename T>
859using PostDomTreeBase = DominatorTreeBase<T, true>;
860
861// These two functions are declared out of line as a workaround for building
862// with old (< r147295) versions of clang because of pr11642.
863template <typename NodeT, bool IsPostDom>
864bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
865 const NodeT *B) const {
866 if (A == B)
867 return true;
868
869 // Cast away the const qualifiers here. This is ok since
870 // this function doesn't actually return the values returned
871 // from getNode.
872 return dominates(getNode(const_cast<NodeT *>(A)),
873 getNode(const_cast<NodeT *>(B)));
874}
875template <typename NodeT, bool IsPostDom>
876bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
877 const NodeT *A, const NodeT *B) const {
878 if (A == B)
879 return false;
880
881 // Cast away the const qualifiers here. This is ok since
882 // this function doesn't actually return the values returned
883 // from getNode.
884 return dominates(getNode(const_cast<NodeT *>(A)),
885 getNode(const_cast<NodeT *>(B)));
886}
887
888} // end namespace llvm
889
890#endif // LLVM_SUPPORT_GENERICDOMTREE_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38
39namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
40{
41_GLIBCXX_BEGIN_NAMESPACE_VERSION
42
43 /**
44 * @addtogroup pointer_abstractions
45 * @{
46 */
47
48#if _GLIBCXX_USE_DEPRECATED1
49 template<typename> class auto_ptr;
50#endif
51
52 /// Primary template of default_delete, used by unique_ptr
53 template<typename _Tp>
54 struct default_delete
55 {
56 /// Default constructor
57 constexpr default_delete() noexcept = default;
58
59 /** @brief Converting constructor.
60 *
61 * Allows conversion from a deleter for arrays of another type, @p _Up,
62 * only if @p _Up* is convertible to @p _Tp*.
63 */
64 template<typename _Up, typename = typename
65 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
66 default_delete(const default_delete<_Up>&) noexcept { }
67
68 /// Calls @c delete @p __ptr
69 void
70 operator()(_Tp* __ptr) const
71 {
72 static_assert(!is_void<_Tp>::value,
73 "can't delete pointer to incomplete type");
74 static_assert(sizeof(_Tp)>0,
75 "can't delete pointer to incomplete type");
76 delete __ptr;
77 }
78 };
79
80 // _GLIBCXX_RESOLVE_LIB_DEFECTS
81 // DR 740 - omit specialization for array objects with a compile time length
82 /// Specialization for arrays, default_delete.
83 template<typename _Tp>
84 struct default_delete<_Tp[]>
85 {
86 public:
87 /// Default constructor
88 constexpr default_delete() noexcept = default;
89
90 /** @brief Converting constructor.
91 *
92 * Allows conversion from a deleter for arrays of another type, such as
93 * a const-qualified version of @p _Tp.
94 *
95 * Conversions from types derived from @c _Tp are not allowed because
96 * it is unsafe to @c delete[] an array of derived types through a
97 * pointer to the base type.
98 */
99 template<typename _Up, typename = typename
100 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
101 default_delete(const default_delete<_Up[]>&) noexcept { }
102
103 /// Calls @c delete[] @p __ptr
104 template<typename _Up>
105 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
106 operator()(_Up* __ptr) const
107 {
108 static_assert(sizeof(_Tp)>0,
109 "can't delete pointer to incomplete type");
110 delete [] __ptr;
111 }
112 };
113
114 /// 20.7.1.2 unique_ptr for single objects.
115 template <typename _Tp, typename _Dp = default_delete<_Tp> >
116 class unique_ptr
117 {
118 // use SFINAE to determine whether _Del::pointer exists
119 class _Pointer
120 {
121 template<typename _Up>
122 static typename _Up::pointer __test(typename _Up::pointer*);
123
124 template<typename _Up>
125 static _Tp* __test(...);
126
127 typedef typename remove_reference<_Dp>::type _Del;
128
129 public:
130 typedef decltype(__test<_Del>(0)) type;
131 };
132
133 typedef std::tuple<typename _Pointer::type, _Dp> __tuple_type;
134 __tuple_type _M_t;
135
136 public:
137 typedef typename _Pointer::type pointer;
138 typedef _Tp element_type;
139 typedef _Dp deleter_type;
140
141
142 // helper template for detecting a safe conversion from another
143 // unique_ptr
144 template<typename _Up, typename _Ep>
145 using __safe_conversion_up = __and_<
146 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
147 __not_<is_array<_Up>>,
148 __or_<__and_<is_reference<deleter_type>,
149 is_same<deleter_type, _Ep>>,
150 __and_<__not_<is_reference<deleter_type>>,
151 is_convertible<_Ep, deleter_type>>
152 >
153 >;
154
155 // Constructors.
156
157 /// Default constructor, creates a unique_ptr that owns nothing.
158 constexpr unique_ptr() noexcept
159 : _M_t()
160 { static_assert(!is_pointer<deleter_type>::value,
161 "constructed with null function pointer deleter"); }
162
163 /** Takes ownership of a pointer.
164 *
165 * @param __p A pointer to an object of @c element_type
166 *
167 * The deleter will be value-initialized.
168 */
169 explicit
170 unique_ptr(pointer __p) noexcept
171 : _M_t()
172 {
173 std::get<0>(_M_t) = __p;
174 static_assert(!is_pointer<deleter_type>::value,
175 "constructed with null function pointer deleter");
176 }
177
178 /** Takes ownership of a pointer.
179 *
180 * @param __p A pointer to an object of @c element_type
181 * @param __d A reference to a deleter.
182 *
183 * The deleter will be initialized with @p __d
184 */
185 unique_ptr(pointer __p,
186 typename conditional<is_reference<deleter_type>::value,
187 deleter_type, const deleter_type&>::type __d) noexcept
188 : _M_t(__p, __d) { }
189
190 /** Takes ownership of a pointer.
191 *
192 * @param __p A pointer to an object of @c element_type
193 * @param __d An rvalue reference to a deleter.
194 *
195 * The deleter will be initialized with @p std::move(__d)
196 */
197 unique_ptr(pointer __p,
198 typename remove_reference<deleter_type>::type&& __d) noexcept
199 : _M_t(std::move(__p), std::move(__d))
200 { static_assert(!std::is_reference<deleter_type>::value,
201 "rvalue deleter bound to reference"); }
202
203 /// Creates a unique_ptr that owns nothing.
204 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
205
206 // Move constructors.
207
208 /// Move constructor.
209 unique_ptr(unique_ptr&& __u) noexcept
210 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
211
212 /** @brief Converting constructor from another type
213 *
214 * Requires that the pointer owned by @p __u is convertible to the
215 * type of pointer owned by this object, @p __u does not own an array,
216 * and @p __u has a compatible deleter type.
217 */
218 template<typename _Up, typename _Ep, typename = _Require<
219 __safe_conversion_up<_Up, _Ep>,
220 typename conditional<is_reference<_Dp>::value,
221 is_same<_Ep, _Dp>,
222 is_convertible<_Ep, _Dp>>::type>>
223 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
224 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
225 { }
226
227#if _GLIBCXX_USE_DEPRECATED1
228 /// Converting constructor from @c auto_ptr
229 template<typename _Up, typename = _Require<
230 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
231 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
232#endif
233
234 /// Destructor, invokes the deleter if the stored pointer is not null.
235 ~unique_ptr() noexcept
236 {
237 auto& __ptr = std::get<0>(_M_t);
238 if (__ptr != nullptr)
239 get_deleter()(__ptr);
240 __ptr = pointer();
241 }
242
243 // Assignment.
244
245 /** @brief Move assignment operator.
246 *
247 * @param __u The object to transfer ownership from.
248 *
249 * Invokes the deleter first if this object owns a pointer.
250 */
251 unique_ptr&
252 operator=(unique_ptr&& __u) noexcept
253 {
254 reset(__u.release());
255 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
256 return *this;
257 }
258
259 /** @brief Assignment from another type.
260 *
261 * @param __u The object to transfer ownership from, which owns a
262 * convertible pointer to a non-array object.
263 *
264 * Invokes the deleter first if this object owns a pointer.
265 */
266 template<typename _Up, typename _Ep>
267 typename enable_if< __and_<
268 __safe_conversion_up<_Up, _Ep>,
269 is_assignable<deleter_type&, _Ep&&>
270 >::value,
271 unique_ptr&>::type
272 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
273 {
274 reset(__u.release());
275 get_deleter() = std::forward<_Ep>(__u.get_deleter());
276 return *this;
277 }
278
279 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
280 unique_ptr&
281 operator=(nullptr_t) noexcept
282 {
283 reset();
284 return *this;
285 }
286
287 // Observers.
288
289 /// Dereference the stored pointer.
290 typename add_lvalue_reference<element_type>::type
291 operator*() const
292 {
293 __glibcxx_assert(get() != pointer());
294 return *get();
295 }
296
297 /// Return the stored pointer.
298 pointer
299 operator->() const noexcept
300 {
301 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
302 return get();
303 }
304
305 /// Return the stored pointer.
306 pointer
307 get() const noexcept
308 { return std::get<0>(_M_t); }
14
Calling 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
25
Returning from 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
26
Returning pointer
309
310 /// Return a reference to the stored deleter.
311 deleter_type&
312 get_deleter() noexcept
313 { return std::get<1>(_M_t); }
314
315 /// Return a reference to the stored deleter.
316 const deleter_type&
317 get_deleter() const noexcept
318 { return std::get<1>(_M_t); }
319
320 /// Return @c true if the stored pointer is not null.
321 explicit operator bool() const noexcept
322 { return get() == pointer() ? false : true; }
323
324 // Modifiers.
325
326 /// Release ownership of any stored pointer.
327 pointer
328 release() noexcept
329 {
330 pointer __p = get();
331 std::get<0>(_M_t) = pointer();
332 return __p;
333 }
334
335 /** @brief Replace the stored pointer.
336 *
337 * @param __p The new pointer to store.
338 *
339 * The deleter will be invoked if a pointer is already owned.
340 */
341 void
342 reset(pointer __p = pointer()) noexcept
343 {
344 using std::swap;
345 swap(std::get<0>(_M_t), __p);
346 if (__p != pointer())
347 get_deleter()(__p);
348 }
349
350 /// Exchange the pointer and deleter with another object.
351 void
352 swap(unique_ptr& __u) noexcept
353 {
354 using std::swap;
355 swap(_M_t, __u._M_t);
356 }
357
358 // Disable copy from lvalue.
359 unique_ptr(const unique_ptr&) = delete;
360 unique_ptr& operator=(const unique_ptr&) = delete;
361 };
362
363 /// 20.7.1.3 unique_ptr for array objects with a runtime length
364 // [unique.ptr.runtime]
365 // _GLIBCXX_RESOLVE_LIB_DEFECTS
366 // DR 740 - omit specialization for array objects with a compile time length
367 template<typename _Tp, typename _Dp>
368 class unique_ptr<_Tp[], _Dp>
369 {
370 // use SFINAE to determine whether _Del::pointer exists
371 class _Pointer
372 {
373 template<typename _Up>
374 static typename _Up::pointer __test(typename _Up::pointer*);
375
376 template<typename _Up>
377 static _Tp* __test(...);
378
379 typedef typename remove_reference<_Dp>::type _Del;
380
381 public:
382 typedef decltype(__test<_Del>(0)) type;
383 };
384
385 typedef std::tuple<typename _Pointer::type, _Dp> __tuple_type;
386 __tuple_type _M_t;
387
388 template<typename _Up>
389 using __remove_cv = typename remove_cv<_Up>::type;
390
391 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
392 template<typename _Up>
393 using __is_derived_Tp
394 = __and_< is_base_of<_Tp, _Up>,
395 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
396
397
398 public:
399 typedef typename _Pointer::type pointer;
400 typedef _Tp element_type;
401 typedef _Dp deleter_type;
402
403 // helper template for detecting a safe conversion from another
404 // unique_ptr
405 template<typename _Up, typename _Ep,
406 typename _Up_up = unique_ptr<_Up, _Ep>,
407 typename _Up_element_type = typename _Up_up::element_type>
408 using __safe_conversion_up = __and_<
409 is_array<_Up>,
410 is_same<pointer, element_type*>,
411 is_same<typename _Up_up::pointer, _Up_element_type*>,
412 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
413 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
414 __and_<__not_<is_reference<deleter_type>>,
415 is_convertible<_Ep, deleter_type>>>
416 >;
417
418 // helper template for detecting a safe conversion from a raw pointer
419 template<typename _Up>
420 using __safe_conversion_raw = __and_<
421 __or_<__or_<is_same<_Up, pointer>,
422 is_same<_Up, nullptr_t>>,
423 __and_<is_pointer<_Up>,
424 is_same<pointer, element_type*>,
425 is_convertible<
426 typename remove_pointer<_Up>::type(*)[],
427 element_type(*)[]>
428 >
429 >
430 >;
431
432 // Constructors.
433
434 /// Default constructor, creates a unique_ptr that owns nothing.
435 constexpr unique_ptr() noexcept
436 : _M_t()
437 { static_assert(!std::is_pointer<deleter_type>::value,
438 "constructed with null function pointer deleter"); }
439
440 /** Takes ownership of a pointer.
441 *
442 * @param __p A pointer to an array of a type safely convertible
443 * to an array of @c element_type
444 *
445 * The deleter will be value-initialized.
446 */
447 template<typename _Up,
448 typename = typename enable_if<
449 __safe_conversion_raw<_Up>::value, bool>::type>
450 explicit
451 unique_ptr(_Up __p) noexcept
452 : _M_t(__p, deleter_type())
453 { static_assert(!is_pointer<deleter_type>::value,
454 "constructed with null function pointer deleter"); }
455
456 /** Takes ownership of a pointer.
457 *
458 * @param __p A pointer to an array of a type safely convertible
459 * to an array of @c element_type
460 * @param __d A reference to a deleter.
461 *
462 * The deleter will be initialized with @p __d
463 */
464 template<typename _Up,
465 typename = typename enable_if<
466 __safe_conversion_raw<_Up>::value, bool>::type>
467 unique_ptr(_Up __p,
468 typename conditional<is_reference<deleter_type>::value,
469 deleter_type, const deleter_type&>::type __d) noexcept
470 : _M_t(__p, __d) { }
471
472 /** Takes ownership of a pointer.
473 *
474 * @param __p A pointer to an array of a type safely convertible
475 * to an array of @c element_type
476 * @param __d A reference to a deleter.
477 *
478 * The deleter will be initialized with @p std::move(__d)
479 */
480 template<typename _Up,
481 typename = typename enable_if<
482 __safe_conversion_raw<_Up>::value, bool>::type>
483 unique_ptr(_Up __p, typename
484 remove_reference<deleter_type>::type&& __d) noexcept
485 : _M_t(std::move(__p), std::move(__d))
486 { static_assert(!is_reference<deleter_type>::value,
487 "rvalue deleter bound to reference"); }
488
489 /// Move constructor.
490 unique_ptr(unique_ptr&& __u) noexcept
491 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
492
493 /// Creates a unique_ptr that owns nothing.
494 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
495
496 template<typename _Up, typename _Ep,
497 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
498 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
499 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
500 { }
501
502 /// Destructor, invokes the deleter if the stored pointer is not null.
503 ~unique_ptr()
504 {
505 auto& __ptr = std::get<0>(_M_t);
506 if (__ptr != nullptr)
507 get_deleter()(__ptr);
508 __ptr = pointer();
509 }
510
511 // Assignment.
512
513 /** @brief Move assignment operator.
514 *
515 * @param __u The object to transfer ownership from.
516 *
517 * Invokes the deleter first if this object owns a pointer.
518 */
519 unique_ptr&
520 operator=(unique_ptr&& __u) noexcept
521 {
522 reset(__u.release());
523 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
524 return *this;
525 }
526
527 /** @brief Assignment from another type.
528 *
529 * @param __u The object to transfer ownership from, which owns a
530 * convertible pointer to an array object.
531 *
532 * Invokes the deleter first if this object owns a pointer.
533 */
534 template<typename _Up, typename _Ep>
535 typename
536 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
537 is_assignable<deleter_type&, _Ep&&>
538 >::value,
539 unique_ptr&>::type
540 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
541 {
542 reset(__u.release());
543 get_deleter() = std::forward<_Ep>(__u.get_deleter());
544 return *this;
545 }
546
547 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
548 unique_ptr&
549 operator=(nullptr_t) noexcept
550 {
551 reset();
552 return *this;
553 }
554
555 // Observers.
556
557 /// Access an element of owned array.
558 typename std::add_lvalue_reference<element_type>::type
559 operator[](size_t __i) const
560 {
561 __glibcxx_assert(get() != pointer());
562 return get()[__i];
563 }
564
565 /// Return the stored pointer.
566 pointer
567 get() const noexcept
568 { return std::get<0>(_M_t); }
569
570 /// Return a reference to the stored deleter.
571 deleter_type&
572 get_deleter() noexcept
573 { return std::get<1>(_M_t); }
574
575 /// Return a reference to the stored deleter.
576 const deleter_type&
577 get_deleter() const noexcept
578 { return std::get<1>(_M_t); }
579
580 /// Return @c true if the stored pointer is not null.
581 explicit operator bool() const noexcept
582 { return get() == pointer() ? false : true; }
583
584 // Modifiers.
585
586 /// Release ownership of any stored pointer.
587 pointer
588 release() noexcept
589 {
590 pointer __p = get();
591 std::get<0>(_M_t) = pointer();
592 return __p;
593 }
594
595 /** @brief Replace the stored pointer.
596 *
597 * @param __p The new pointer to store.
598 *
599 * The deleter will be invoked if a pointer is already owned.
600 */
601 template <typename _Up,
602 typename = _Require<
603 __or_<is_same<_Up, pointer>,
604 __and_<is_same<pointer, element_type*>,
605 is_pointer<_Up>,
606 is_convertible<
607 typename remove_pointer<_Up>::type(*)[],
608 element_type(*)[]
609 >
610 >
611 >
612 >>
613 void
614 reset(_Up __p) noexcept
615 {
616 pointer __ptr = __p;
617 using std::swap;
618 swap(std::get<0>(_M_t), __ptr);
619 if (__ptr != nullptr)
620 get_deleter()(__ptr);
621 }
622
623 void reset(nullptr_t = nullptr) noexcept
624 {
625 reset(pointer());
626 }
627
628 /// Exchange the pointer and deleter with another object.
629 void
630 swap(unique_ptr& __u) noexcept
631 {
632 using std::swap;
633 swap(_M_t, __u._M_t);
634 }
635
636 // Disable copy from lvalue.
637 unique_ptr(const unique_ptr&) = delete;
638 unique_ptr& operator=(const unique_ptr&) = delete;
639 };
640
641 template<typename _Tp, typename _Dp>
642 inline void
643 swap(unique_ptr<_Tp, _Dp>& __x,
644 unique_ptr<_Tp, _Dp>& __y) noexcept
645 { __x.swap(__y); }
646
647 template<typename _Tp, typename _Dp,
648 typename _Up, typename _Ep>
649 inline bool
650 operator==(const unique_ptr<_Tp, _Dp>& __x,
651 const unique_ptr<_Up, _Ep>& __y)
652 { return __x.get() == __y.get(); }
653
654 template<typename _Tp, typename _Dp>
655 inline bool
656 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
657 { return !__x; }
658
659 template<typename _Tp, typename _Dp>
660 inline bool
661 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
662 { return !__x; }
663
664 template<typename _Tp, typename _Dp,
665 typename _Up, typename _Ep>
666 inline bool
667 operator!=(const unique_ptr<_Tp, _Dp>& __x,
668 const unique_ptr<_Up, _Ep>& __y)
669 { return __x.get() != __y.get(); }
670
671 template<typename _Tp, typename _Dp>
672 inline bool
673 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
674 { return (bool)__x; }
675
676 template<typename _Tp, typename _Dp>
677 inline bool
678 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
679 { return (bool)__x; }
680
681 template<typename _Tp, typename _Dp,
682 typename _Up, typename _Ep>
683 inline bool
684 operator<(const unique_ptr<_Tp, _Dp>& __x,
685 const unique_ptr<_Up, _Ep>& __y)
686 {
687 typedef typename
688 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
689 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
690 return std::less<_CT>()(__x.get(), __y.get());
691 }
692
693 template<typename _Tp, typename _Dp>
694 inline bool
695 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
696 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
697 nullptr); }
698
699 template<typename _Tp, typename _Dp>
700 inline bool
701 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
702 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
703 __x.get()); }
704
705 template<typename _Tp, typename _Dp,
706 typename _Up, typename _Ep>
707 inline bool
708 operator<=(const unique_ptr<_Tp, _Dp>& __x,
709 const unique_ptr<_Up, _Ep>& __y)
710 { return !(__y < __x); }
711
712 template<typename _Tp, typename _Dp>
713 inline bool
714 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
715 { return !(nullptr < __x); }
716
717 template<typename _Tp, typename _Dp>
718 inline bool
719 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
720 { return !(__x < nullptr); }
721
722 template<typename _Tp, typename _Dp,
723 typename _Up, typename _Ep>
724 inline bool
725 operator>(const unique_ptr<_Tp, _Dp>& __x,
726 const unique_ptr<_Up, _Ep>& __y)
727 { return (__y < __x); }
728
729 template<typename _Tp, typename _Dp>
730 inline bool
731 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
732 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
733 __x.get()); }
734
735 template<typename _Tp, typename _Dp>
736 inline bool
737 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
738 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
739 nullptr); }
740
741 template<typename _Tp, typename _Dp,
742 typename _Up, typename _Ep>
743 inline bool
744 operator>=(const unique_ptr<_Tp, _Dp>& __x,
745 const unique_ptr<_Up, _Ep>& __y)
746 { return !(__x < __y); }
747
748 template<typename _Tp, typename _Dp>
749 inline bool
750 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
751 { return !(__x < nullptr); }
752
753 template<typename _Tp, typename _Dp>
754 inline bool
755 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
756 { return !(nullptr < __x); }
757
758 /// std::hash specialization for unique_ptr.
759 template<typename _Tp, typename _Dp>
760 struct hash<unique_ptr<_Tp, _Dp>>
761 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>
762 {
763 size_t
764 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
765 {
766 typedef unique_ptr<_Tp, _Dp> _UP;
767 return std::hash<typename _UP::pointer>()(__u.get());
768 }
769 };
770
771#if __cplusplus201103L > 201103L
772
773#define __cpp_lib_make_unique 201304
774
775 template<typename _Tp>
776 struct _MakeUniq
777 { typedef unique_ptr<_Tp> __single_object; };
778
779 template<typename _Tp>
780 struct _MakeUniq<_Tp[]>
781 { typedef unique_ptr<_Tp[]> __array; };
782
783 template<typename _Tp, size_t _Bound>
784 struct _MakeUniq<_Tp[_Bound]>
785 { struct __invalid_type { }; };
786
787 /// std::make_unique for single objects
788 template<typename _Tp, typename... _Args>
789 inline typename _MakeUniq<_Tp>::__single_object
790 make_unique(_Args&&... __args)
791 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
792
793 /// std::make_unique for arrays of unknown bound
794 template<typename _Tp>
795 inline typename _MakeUniq<_Tp>::__array
796 make_unique(size_t __num)
797 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
798
799 /// Disable std::make_unique for arrays of known bound
800 template<typename _Tp, typename... _Args>
801 inline typename _MakeUniq<_Tp>::__invalid_type
802 make_unique(_Args&&...) = delete;
803#endif
804
805 // @} group pointer_abstractions
806
807_GLIBCXX_END_NAMESPACE_VERSION
808} // namespace
809
810#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41
42namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
43{
44_GLIBCXX_BEGIN_NAMESPACE_VERSION
45
46 /**
47 * @addtogroup utilities
48 * @{
49 */
50
51 template<std::size_t _Idx, typename _Head, bool _IsEmptyNotFinal>
52 struct _Head_base;
53
54 template<std::size_t _Idx, typename _Head>
55 struct _Head_base<_Idx, _Head, true>
56 : public _Head
57 {
58 constexpr _Head_base()
59 : _Head() { }
60
61 constexpr _Head_base(const _Head& __h)
62 : _Head(__h) { }
63
64 constexpr _Head_base(const _Head_base&) = default;
65 constexpr _Head_base(_Head_base&&) = default;
66
67 template<typename _UHead>
68 constexpr _Head_base(_UHead&& __h)
69 : _Head(std::forward<_UHead>(__h)) { }
70
71 _Head_base(allocator_arg_t, __uses_alloc0)
72 : _Head() { }
73
74 template<typename _Alloc>
75 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
76 : _Head(allocator_arg, *__a._M_a) { }
77
78 template<typename _Alloc>
79 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
80 : _Head(*__a._M_a) { }
81
82 template<typename _UHead>
83 _Head_base(__uses_alloc0, _UHead&& __uhead)
84 : _Head(std::forward<_UHead>(__uhead)) { }
85
86 template<typename _Alloc, typename _UHead>
87 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
88 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
89
90 template<typename _Alloc, typename _UHead>
91 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
92 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
93
94 static constexpr _Head&
95 _M_head(_Head_base& __b) noexcept { return __b; }
96
97 static constexpr const _Head&
98 _M_head(const _Head_base& __b) noexcept { return __b; }
99 };
100
101 template<std::size_t _Idx, typename _Head>
102 struct _Head_base<_Idx, _Head, false>
103 {
104 constexpr _Head_base()
105 : _M_head_impl() { }
106
107 constexpr _Head_base(const _Head& __h)
108 : _M_head_impl(__h) { }
109
110 constexpr _Head_base(const _Head_base&) = default;
111 constexpr _Head_base(_Head_base&&) = default;
112
113 template<typename _UHead>
114 constexpr _Head_base(_UHead&& __h)
115 : _M_head_impl(std::forward<_UHead>(__h)) { }
116
117 _Head_base(allocator_arg_t, __uses_alloc0)
118 : _M_head_impl() { }
119
120 template<typename _Alloc>
121 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
122 : _M_head_impl(allocator_arg, *__a._M_a) { }
123
124 template<typename _Alloc>
125 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
126 : _M_head_impl(*__a._M_a) { }
127
128 template<typename _UHead>
129 _Head_base(__uses_alloc0, _UHead&& __uhead)
130 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
131
132 template<typename _Alloc, typename _UHead>
133 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
134 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
135 { }
136
137 template<typename _Alloc, typename _UHead>
138 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
139 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
140
141 static constexpr _Head&
142 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
143
144 static constexpr const _Head&
145 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
18
Returning pointer (reference to field '_M_head_impl')
146
147 _Head _M_head_impl;
148 };
149
150 /**
151 * Contains the actual implementation of the @c tuple template, stored
152 * as a recursive inheritance hierarchy from the first element (most
153 * derived class) to the last (least derived class). The @c Idx
154 * parameter gives the 0-based index of the element stored at this
155 * point in the hierarchy; we use it to implement a constant-time
156 * get() operation.
157 */
158 template<std::size_t _Idx, typename... _Elements>
159 struct _Tuple_impl;
160
161 template<typename _Tp>
162 struct __is_empty_non_tuple : is_empty<_Tp> { };
163
164 // Using EBO for elements that are tuples causes ambiguous base errors.
165 template<typename _El0, typename... _El>
166 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
167
168 // Use the Empty Base-class Optimization for empty, non-final types.
169 template<typename _Tp>
170 using __empty_not_final
171 = typename conditional<__is_final(_Tp), false_type,
172 __is_empty_non_tuple<_Tp>>::type;
173
174 /**
175 * Recursive tuple implementation. Here we store the @c Head element
176 * and derive from a @c Tuple_impl containing the remaining elements
177 * (which contains the @c Tail).
178 */
179 template<std::size_t _Idx, typename _Head, typename... _Tail>
180 struct _Tuple_impl<_Idx, _Head, _Tail...>
181 : public _Tuple_impl<_Idx + 1, _Tail...>,
182 private _Head_base<_Idx, _Head, __empty_not_final<_Head>::value>
183 {
184 template<std::size_t, typename...> friend class _Tuple_impl;
185
186 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
187 typedef _Head_base<_Idx, _Head, __empty_not_final<_Head>::value> _Base;
188
189 static constexpr _Head&
190 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
191
192 static constexpr const _Head&
193 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
17
Calling '_Head_base::_M_head'
19
Returning from '_Head_base::_M_head'
20
Returning pointer (reference to field '_M_head_impl')
194
195 static constexpr _Inherited&
196 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
197
198 static constexpr const _Inherited&
199 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
200
201 constexpr _Tuple_impl()
202 : _Inherited(), _Base() { }
203
204 explicit
205 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
206 : _Inherited(__tail...), _Base(__head) { }
207
208 template<typename _UHead, typename... _UTail, typename = typename
209 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
210 explicit
211 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
212 : _Inherited(std::forward<_UTail>(__tail)...),
213 _Base(std::forward<_UHead>(__head)) { }
214
215 constexpr _Tuple_impl(const _Tuple_impl&) = default;
216
217 constexpr
218 _Tuple_impl(_Tuple_impl&& __in)
219 noexcept(__and_<is_nothrow_move_constructible<_Head>,
220 is_nothrow_move_constructible<_Inherited>>::value)
221 : _Inherited(std::move(_M_tail(__in))),
222 _Base(std::forward<_Head>(_M_head(__in))) { }
223
224 template<typename... _UElements>
225 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
226 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
227 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
228
229 template<typename _UHead, typename... _UTails>
230 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
231 : _Inherited(std::move
232 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
233 _Base(std::forward<_UHead>
234 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
235
236 template<typename _Alloc>
237 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
238 : _Inherited(__tag, __a),
239 _Base(__tag, __use_alloc<_Head>(__a)) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
243 const _Head& __head, const _Tail&... __tail)
244 : _Inherited(__tag, __a, __tail...),
245 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
246
247 template<typename _Alloc, typename _UHead, typename... _UTail,
248 typename = typename enable_if<sizeof...(_Tail)
249 == sizeof...(_UTail)>::type>
250 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
251 _UHead&& __head, _UTail&&... __tail)
252 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
253 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
254 std::forward<_UHead>(__head)) { }
255
256 template<typename _Alloc>
257 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
258 const _Tuple_impl& __in)
259 : _Inherited(__tag, __a, _M_tail(__in)),
260 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
261
262 template<typename _Alloc>
263 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
264 _Tuple_impl&& __in)
265 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
266 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
267 std::forward<_Head>(_M_head(__in))) { }
268
269 template<typename _Alloc, typename... _UElements>
270 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
271 const _Tuple_impl<_Idx, _UElements...>& __in)
272 : _Inherited(__tag, __a,
273 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
274 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
275 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
276
277 template<typename _Alloc, typename _UHead, typename... _UTails>
278 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
279 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
280 : _Inherited(__tag, __a, std::move
281 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
282 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
283 std::forward<_UHead>
284 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
285
286 _Tuple_impl&
287 operator=(const _Tuple_impl& __in)
288 {
289 _M_head(*this) = _M_head(__in);
290 _M_tail(*this) = _M_tail(__in);
291 return *this;
292 }
293
294 _Tuple_impl&
295 operator=(_Tuple_impl&& __in)
296 noexcept(__and_<is_nothrow_move_assignable<_Head>,
297 is_nothrow_move_assignable<_Inherited>>::value)
298 {
299 _M_head(*this) = std::forward<_Head>(_M_head(__in));
300 _M_tail(*this) = std::move(_M_tail(__in));
301 return *this;
302 }
303
304 template<typename... _UElements>
305 _Tuple_impl&
306 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
307 {
308 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
309 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
310 return *this;
311 }
312
313 template<typename _UHead, typename... _UTails>
314 _Tuple_impl&
315 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
316 {
317 _M_head(*this) = std::forward<_UHead>
318 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
319 _M_tail(*this) = std::move
320 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
321 return *this;
322 }
323
324 protected:
325 void
326 _M_swap(_Tuple_impl& __in)
327 noexcept(__is_nothrow_swappable<_Head>::value
328 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
329 {
330 using std::swap;
331 swap(_M_head(*this), _M_head(__in));
332 _Inherited::_M_swap(_M_tail(__in));
333 }
334 };
335
336 // Basis case of inheritance recursion.
337 template<std::size_t _Idx, typename _Head>
338 struct _Tuple_impl<_Idx, _Head>
339 : private _Head_base<_Idx, _Head, __empty_not_final<_Head>::value>
340 {
341 template<std::size_t, typename...> friend class _Tuple_impl;
342
343 typedef _Head_base<_Idx, _Head, __empty_not_final<_Head>::value> _Base;
344
345 static constexpr _Head&
346 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
347
348 static constexpr const _Head&
349 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
350
351 constexpr _Tuple_impl()
352 : _Base() { }
353
354 explicit
355 constexpr _Tuple_impl(const _Head& __head)
356 : _Base(__head) { }
357
358 template<typename _UHead>
359 explicit
360 constexpr _Tuple_impl(_UHead&& __head)
361 : _Base(std::forward<_UHead>(__head)) { }
362
363 constexpr _Tuple_impl(const _Tuple_impl&) = default;
364
365 constexpr
366 _Tuple_impl(_Tuple_impl&& __in)
367 noexcept(is_nothrow_move_constructible<_Head>::value)
368 : _Base(std::forward<_Head>(_M_head(__in))) { }
369
370 template<typename _UHead>
371 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
372 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
373
374 template<typename _UHead>
375 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
376 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
377 { }
378
379 template<typename _Alloc>
380 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
381 : _Base(__tag, __use_alloc<_Head>(__a)) { }
382
383 template<typename _Alloc>
384 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
385 const _Head& __head)
386 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
387
388 template<typename _Alloc, typename _UHead>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 _UHead&& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
392 std::forward<_UHead>(__head)) { }
393
394 template<typename _Alloc>
395 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
396 const _Tuple_impl& __in)
397 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 _Tuple_impl&& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
403 std::forward<_Head>(_M_head(__in))) { }
404
405 template<typename _Alloc, typename _UHead>
406 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
407 const _Tuple_impl<_Idx, _UHead>& __in)
408 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
409 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
410
411 template<typename _Alloc, typename _UHead>
412 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
413 _Tuple_impl<_Idx, _UHead>&& __in)
414 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
415 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
416 { }
417
418 _Tuple_impl&
419 operator=(const _Tuple_impl& __in)
420 {
421 _M_head(*this) = _M_head(__in);
422 return *this;
423 }
424
425 _Tuple_impl&
426 operator=(_Tuple_impl&& __in)
427 noexcept(is_nothrow_move_assignable<_Head>::value)
428 {
429 _M_head(*this) = std::forward<_Head>(_M_head(__in));
430 return *this;
431 }
432
433 template<typename _UHead>
434 _Tuple_impl&
435 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
436 {
437 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
438 return *this;
439 }
440
441 template<typename _UHead>
442 _Tuple_impl&
443 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
444 {
445 _M_head(*this)
446 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
447 return *this;
448 }
449
450 protected:
451 void
452 _M_swap(_Tuple_impl& __in)
453 noexcept(__is_nothrow_swappable<_Head>::value)
454 {
455 using std::swap;
456 swap(_M_head(*this), _M_head(__in));
457 }
458 };
459
460 template<typename... _Elements>
461 class tuple;
462
463 // Concept utility functions, reused in conditionally-explicit
464 // constructors.
465 template<bool, typename... _Elements>
466 struct _TC
467 {
468 template<typename... _UElements>
469 static constexpr bool _ConstructibleTuple()
470 {
471 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
472 }
473
474 template<typename... _UElements>
475 static constexpr bool _ImplicitlyConvertibleTuple()
476 {
477 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
478 }
479
480 template<typename... _UElements>
481 static constexpr bool _MoveConstructibleTuple()
482 {
483 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
484 }
485
486 template<typename... _UElements>
487 static constexpr bool _ImplicitlyMoveConvertibleTuple()
488 {
489 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
490 }
491
492 template<typename _SrcTuple>
493 static constexpr bool _NonNestedTuple()
494 {
495 return __and_<__not_<is_same<tuple<_Elements...>,
496 typename remove_cv<
497 typename remove_reference<_SrcTuple>::type
498 >::type>>,
499 __not_<is_convertible<_SrcTuple, _Elements...>>,
500 __not_<is_constructible<_Elements..., _SrcTuple>>
501 >::value;
502 }
503 template<typename... _UElements>
504 static constexpr bool _NotSameTuple()
505 {
506 return __not_<is_same<tuple<_Elements...>,
507 typename remove_const<
508 typename remove_reference<_UElements...>::type
509 >::type>>::value;
510 }
511 };
512
513 template<typename... _Elements>
514 struct _TC<false, _Elements...>
515 {
516 template<typename... _UElements>
517 static constexpr bool _ConstructibleTuple()
518 {
519 return false;
520 }
521
522 template<typename... _UElements>
523 static constexpr bool _ImplicitlyConvertibleTuple()
524 {
525 return false;
526 }
527
528 template<typename... _UElements>
529 static constexpr bool _MoveConstructibleTuple()
530 {
531 return false;
532 }
533
534 template<typename... _UElements>
535 static constexpr bool _ImplicitlyMoveConvertibleTuple()
536 {
537 return false;
538 }
539
540 template<typename... _UElements>
541 static constexpr bool _NonNestedTuple()
542 {
543 return true;
544 }
545 template<typename... _UElements>
546 static constexpr bool _NotSameTuple()
547 {
548 return true;
549 }
550 };
551
552 /// Primary class template, tuple
553 template<typename... _Elements>
554 class tuple : public _Tuple_impl<0, _Elements...>
555 {
556 typedef _Tuple_impl<0, _Elements...> _Inherited;
557
558 // Used for constraining the default constructor so
559 // that it becomes dependent on the constraints.
560 template<typename _Dummy>
561 struct _TC2
562 {
563 static constexpr bool _DefaultConstructibleTuple()
564 {
565 return __and_<is_default_constructible<_Elements>...>::value;
566 }
567 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
568 {
569 return __and_<__is_implicitly_default_constructible<_Elements>...>
570 ::value;
571 }
572 };
573
574 public:
575 template<typename _Dummy = void,
576 typename enable_if<_TC2<_Dummy>::
577 _ImplicitlyDefaultConstructibleTuple(),
578 bool>::type = true>
579 constexpr tuple()
580 : _Inherited() { }
581
582 template<typename _Dummy = void,
583 typename enable_if<_TC2<_Dummy>::
584 _DefaultConstructibleTuple()
585 &&
586 !_TC2<_Dummy>::
587 _ImplicitlyDefaultConstructibleTuple(),
588 bool>::type = false>
589 explicit constexpr tuple()
590 : _Inherited() { }
591
592 // Shortcut for the cases where constructors taking _Elements...
593 // need to be constrained.
594 template<typename _Dummy> using _TCC =
595 _TC<is_same<_Dummy, void>::value,
596 _Elements...>;
597
598 template<typename _Dummy = void,
599 typename enable_if<
600 _TCC<_Dummy>::template
601 _ConstructibleTuple<_Elements...>()
602 && _TCC<_Dummy>::template
603 _ImplicitlyConvertibleTuple<_Elements...>()
604 && (sizeof...(_Elements) >= 1),
605 bool>::type=true>
606 constexpr tuple(const _Elements&... __elements)
607 : _Inherited(__elements...) { }
608
609 template<typename _Dummy = void,
610 typename enable_if<
611 _TCC<_Dummy>::template
612 _ConstructibleTuple<_Elements...>()
613 && !_TCC<_Dummy>::template
614 _ImplicitlyConvertibleTuple<_Elements...>()
615 && (sizeof...(_Elements) >= 1),
616 bool>::type=false>
617 explicit constexpr tuple(const _Elements&... __elements)
618 : _Inherited(__elements...) { }
619
620 // Shortcut for the cases where constructors taking _UElements...
621 // need to be constrained.
622 template<typename... _UElements> using _TMC =
623 _TC<(sizeof...(_Elements) == sizeof...(_UElements)),
624 _Elements...>;
625
626 template<typename... _UElements, typename
627 enable_if<
628 _TC<sizeof...(_UElements) == 1, _Elements...>::template
629 _NotSameTuple<_UElements...>()
630 && _TMC<_UElements...>::template
631 _MoveConstructibleTuple<_UElements...>()
632 && _TMC<_UElements...>::template
633 _ImplicitlyMoveConvertibleTuple<_UElements...>()
634 && (sizeof...(_Elements) >= 1),
635 bool>::type=true>
636 constexpr tuple(_UElements&&... __elements)
637 : _Inherited(std::forward<_UElements>(__elements)...) { }
638
639 template<typename... _UElements, typename
640 enable_if<
641 _TC<sizeof...(_UElements) == 1, _Elements...>::template
642 _NotSameTuple<_UElements...>()
643 && _TMC<_UElements...>::template
644 _MoveConstructibleTuple<_UElements...>()
645 && !_TMC<_UElements...>::template
646 _ImplicitlyMoveConvertibleTuple<_UElements...>()
647 && (sizeof...(_Elements) >= 1),
648 bool>::type=false>
649 explicit constexpr tuple(_UElements&&... __elements)
650 : _Inherited(std::forward<_UElements>(__elements)...) { }
651
652 constexpr tuple(const tuple&) = default;
653
654 constexpr tuple(tuple&&) = default;
655
656 // Shortcut for the cases where constructors taking tuples
657 // must avoid creating temporaries.
658 template<typename _Dummy> using _TNTC =
659 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
660 _Elements...>;
661
662 template<typename... _UElements, typename _Dummy = void, typename
663 enable_if<_TMC<_UElements...>::template
664 _ConstructibleTuple<_UElements...>()
665 && _TMC<_UElements...>::template
666 _ImplicitlyConvertibleTuple<_UElements...>()
667 && _TNTC<_Dummy>::template
668 _NonNestedTuple<const tuple<_UElements...>&>(),
669 bool>::type=true>
670 constexpr tuple(const tuple<_UElements...>& __in)
671 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
672 { }
673
674 template<typename... _UElements, typename _Dummy = void, typename
675 enable_if<_TMC<_UElements...>::template
676 _ConstructibleTuple<_UElements...>()
677 && !_TMC<_UElements...>::template
678 _ImplicitlyConvertibleTuple<_UElements...>()
679 && _TNTC<_Dummy>::template
680 _NonNestedTuple<const tuple<_UElements...>&>(),
681 bool>::type=false>
682 explicit constexpr tuple(const tuple<_UElements...>& __in)
683 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
684 { }
685
686 template<typename... _UElements, typename _Dummy = void, typename
687 enable_if<_TMC<_UElements...>::template
688 _MoveConstructibleTuple<_UElements...>()
689 && _TMC<_UElements...>::template
690 _ImplicitlyMoveConvertibleTuple<_UElements...>()
691 && _TNTC<_Dummy>::template
692 _NonNestedTuple<tuple<_UElements...>&&>(),
693 bool>::type=true>
694 constexpr tuple(tuple<_UElements...>&& __in)
695 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
696
697 template<typename... _UElements, typename _Dummy = void, typename
698 enable_if<_TMC<_UElements...>::template
699 _MoveConstructibleTuple<_UElements...>()
700 && !_TMC<_UElements...>::template
701 _ImplicitlyMoveConvertibleTuple<_UElements...>()
702 && _TNTC<_Dummy>::template
703 _NonNestedTuple<tuple<_UElements...>&&>(),
704 bool>::type=false>
705 explicit constexpr tuple(tuple<_UElements...>&& __in)
706 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
707
708 // Allocator-extended constructors.
709
710 template<typename _Alloc>
711 tuple(allocator_arg_t __tag, const _Alloc& __a)
712 : _Inherited(__tag, __a) { }
713
714 template<typename _Alloc, typename _Dummy = void,
715 typename enable_if<
716 _TCC<_Dummy>::template
717 _ConstructibleTuple<_Elements...>()
718 && _TCC<_Dummy>::template
719 _ImplicitlyConvertibleTuple<_Elements...>(),
720 bool>::type=true>
721 tuple(allocator_arg_t __tag, const _Alloc& __a,
722 const _Elements&... __elements)
723 : _Inherited(__tag, __a, __elements...) { }
724
725 template<typename _Alloc, typename _Dummy = void,
726 typename enable_if<
727 _TCC<_Dummy>::template
728 _ConstructibleTuple<_Elements...>()
729 && !_TCC<_Dummy>::template
730 _ImplicitlyConvertibleTuple<_Elements...>(),
731 bool>::type=false>
732 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
733 const _Elements&... __elements)
734 : _Inherited(__tag, __a, __elements...) { }
735
736 template<typename _Alloc, typename... _UElements, typename
737 enable_if<_TMC<_UElements...>::template
738 _MoveConstructibleTuple<_UElements...>()
739 && _TMC<_UElements...>::template
740 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
741 bool>::type=true>
742 tuple(allocator_arg_t __tag, const _Alloc& __a,
743 _UElements&&... __elements)
744 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
745 { }
746
747 template<typename _Alloc, typename... _UElements, typename
748 enable_if<_TMC<_UElements...>::template
749 _MoveConstructibleTuple<_UElements...>()
750 && !_TMC<_UElements...>::template
751 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
752 bool>::type=false>
753 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
754 _UElements&&... __elements)
755 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
756 { }
757
758 template<typename _Alloc>
759 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
760 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
761
762 template<typename _Alloc>
763 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
764 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
765
766 template<typename _Alloc, typename... _UElements, typename
767 enable_if<_TMC<_UElements...>::template
768 _ConstructibleTuple<_UElements...>()
769 && _TMC<_UElements...>::template
770 _ImplicitlyConvertibleTuple<_UElements...>(),
771 bool>::type=true>
772 tuple(allocator_arg_t __tag, const _Alloc& __a,
773 const tuple<_UElements...>& __in)
774 : _Inherited(__tag, __a,
775 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
776 { }
777
778 template<typename _Alloc, typename... _UElements, typename
779 enable_if<_TMC<_UElements...>::template
780 _ConstructibleTuple<_UElements...>()
781 && !_TMC<_UElements...>::template
782 _ImplicitlyConvertibleTuple<_UElements...>(),
783 bool>::type=false>
784 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
785 const tuple<_UElements...>& __in)
786 : _Inherited(__tag, __a,
787 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
788 { }
789
790 template<typename _Alloc, typename... _UElements, typename
791 enable_if<_TMC<_UElements...>::template
792 _MoveConstructibleTuple<_UElements...>()
793 && _TMC<_UElements...>::template
794 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
795 bool>::type=true>
796 tuple(allocator_arg_t __tag, const _Alloc& __a,
797 tuple<_UElements...>&& __in)
798 : _Inherited(__tag, __a,
799 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
800 { }
801
802 template<typename _Alloc, typename... _UElements, typename
803 enable_if<_TMC<_UElements...>::template
804 _MoveConstructibleTuple<_UElements...>()
805 && !_TMC<_UElements...>::template
806 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
807 bool>::type=false>
808 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
809 tuple<_UElements...>&& __in)
810 : _Inherited(__tag, __a,
811 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
812 { }
813
814 tuple&
815 operator=(const tuple& __in)
816 {
817 static_cast<_Inherited&>(*this) = __in;
818 return *this;
819 }
820
821 tuple&
822 operator=(tuple&& __in)
823 noexcept(is_nothrow_move_assignable<_Inherited>::value)
824 {
825 static_cast<_Inherited&>(*this) = std::move(__in);
826 return *this;
827 }
828
829 template<typename... _UElements, typename = typename
830 enable_if<sizeof...(_UElements)
831 == sizeof...(_Elements)>::type>
832 tuple&
833 operator=(const tuple<_UElements...>& __in)
834 {
835 static_cast<_Inherited&>(*this) = __in;
836 return *this;
837 }
838
839 template<typename... _UElements, typename = typename
840 enable_if<sizeof...(_UElements)
841 == sizeof...(_Elements)>::type>
842 tuple&
843 operator=(tuple<_UElements...>&& __in)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 void
850 swap(tuple& __in)
851 noexcept(noexcept(__in._M_swap(__in)))
852 { _Inherited::_M_swap(__in); }
853 };
854
855 // Explicit specialization, zero-element tuple.
856 template<>
857 class tuple<>
858 {
859 public:
860 void swap(tuple&) noexcept { /* no-op */ }
861 };
862
863 /// Partial specialization, 2-element tuple.
864 /// Includes construction and assignment from a pair.
865 template<typename _T1, typename _T2>
866 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
867 {
868 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
869
870 public:
871 template <typename _U1 = _T1,
872 typename _U2 = _T2,
873 typename enable_if<__and_<
874 __is_implicitly_default_constructible<_U1>,
875 __is_implicitly_default_constructible<_U2>>
876 ::value, bool>::type = true>
877
878 constexpr tuple()
879 : _Inherited() { }
880
881 template <typename _U1 = _T1,
882 typename _U2 = _T2,
883 typename enable_if<
884 __and_<
885 is_default_constructible<_U1>,
886 is_default_constructible<_U2>,
887 __not_<
888 __and_<__is_implicitly_default_constructible<_U1>,
889 __is_implicitly_default_constructible<_U2>>>>
890 ::value, bool>::type = false>
891
892 explicit constexpr tuple()
893 : _Inherited() { }
894
895 // Shortcut for the cases where constructors taking _T1, _T2
896 // need to be constrained.
897 template<typename _Dummy> using _TCC =
898 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
899
900 template<typename _Dummy = void, typename
901 enable_if<_TCC<_Dummy>::template
902 _ConstructibleTuple<_T1, _T2>()
903 && _TCC<_Dummy>::template
904 _ImplicitlyConvertibleTuple<_T1, _T2>(),
905 bool>::type = true>
906 constexpr tuple(const _T1& __a1, const _T2& __a2)
907 : _Inherited(__a1, __a2) { }
908
909 template<typename _Dummy = void, typename
910 enable_if<_TCC<_Dummy>::template
911 _ConstructibleTuple<_T1, _T2>()
912 && !_TCC<_Dummy>::template
913 _ImplicitlyConvertibleTuple<_T1, _T2>(),
914 bool>::type = false>
915 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
916 : _Inherited(__a1, __a2) { }
917
918 // Shortcut for the cases where constructors taking _U1, _U2
919 // need to be constrained.
920 using _TMC = _TC<true, _T1, _T2>;
921
922 template<typename _U1, typename _U2, typename
923 enable_if<_TMC::template
924 _MoveConstructibleTuple<_U1, _U2>()
925 && _TMC::template
926 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
927 && !is_same<typename decay<_U1>::type,
928 allocator_arg_t>::value,
929 bool>::type = true>
930 constexpr tuple(_U1&& __a1, _U2&& __a2)
931 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
932
933 template<typename _U1, typename _U2, typename
934 enable_if<_TMC::template
935 _MoveConstructibleTuple<_U1, _U2>()
936 && !_TMC::template
937 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
938 && !is_same<typename decay<_U1>::type,
939 allocator_arg_t>::value,
940 bool>::type = false>
941 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
942 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
943
944 constexpr tuple(const tuple&) = default;
945
946 constexpr tuple(tuple&&) = default;
947
948 template<typename _U1, typename _U2, typename
949 enable_if<_TMC::template
950 _ConstructibleTuple<_U1, _U2>()
951 && _TMC::template
952 _ImplicitlyConvertibleTuple<_U1, _U2>(),
953 bool>::type = true>
954 constexpr tuple(const tuple<_U1, _U2>& __in)
955 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
956
957 template<typename _U1, typename _U2, typename
958 enable_if<_TMC::template
959 _ConstructibleTuple<_U1, _U2>()
960 && !_TMC::template
961 _ImplicitlyConvertibleTuple<_U1, _U2>(),
962 bool>::type = false>
963 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
964 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
965
966 template<typename _U1, typename _U2, typename
967 enable_if<_TMC::template
968 _MoveConstructibleTuple<_U1, _U2>()
969 && _TMC::template
970 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
971 bool>::type = true>
972 constexpr tuple(tuple<_U1, _U2>&& __in)
973 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
974
975 template<typename _U1, typename _U2, typename
976 enable_if<_TMC::template
977 _MoveConstructibleTuple<_U1, _U2>()
978 && !_TMC::template
979 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
980 bool>::type = false>
981 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
982 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
983
984 template<typename _U1, typename _U2, typename
985 enable_if<_TMC::template
986 _ConstructibleTuple<_U1, _U2>()
987 && _TMC::template
988 _ImplicitlyConvertibleTuple<_U1, _U2>(),
989 bool>::type = true>
990 constexpr tuple(const pair<_U1, _U2>& __in)
991 : _Inherited(__in.first, __in.second) { }
992
993 template<typename _U1, typename _U2, typename
994 enable_if<_TMC::template
995 _ConstructibleTuple<_U1, _U2>()
996 && !_TMC::template
997 _ImplicitlyConvertibleTuple<_U1, _U2>(),
998 bool>::type = false>
999 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1000 : _Inherited(__in.first, __in.second) { }
1001
1002 template<typename _U1, typename _U2, typename
1003 enable_if<_TMC::template
1004 _MoveConstructibleTuple<_U1, _U2>()
1005 && _TMC::template
1006 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1007 bool>::type = true>
1008 constexpr tuple(pair<_U1, _U2>&& __in)
1009 : _Inherited(std::forward<_U1>(__in.first),
1010 std::forward<_U2>(__in.second)) { }
1011
1012 template<typename _U1, typename _U2, typename
1013 enable_if<_TMC::template
1014 _MoveConstructibleTuple<_U1, _U2>()
1015 && !_TMC::template
1016 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1017 bool>::type = false>
1018 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1019 : _Inherited(std::forward<_U1>(__in.first),
1020 std::forward<_U2>(__in.second)) { }
1021
1022 // Allocator-extended constructors.
1023
1024 template<typename _Alloc>
1025 tuple(allocator_arg_t __tag, const _Alloc& __a)
1026 : _Inherited(__tag, __a) { }
1027
1028 template<typename _Alloc, typename _Dummy = void,
1029 typename enable_if<
1030 _TCC<_Dummy>::template
1031 _ConstructibleTuple<_T1, _T2>()
1032 && _TCC<_Dummy>::template
1033 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1034 bool>::type=true>
1035
1036 tuple(allocator_arg_t __tag, const _Alloc& __a,
1037 const _T1& __a1, const _T2& __a2)
1038 : _Inherited(__tag, __a, __a1, __a2) { }
1039
1040 template<typename _Alloc, typename _Dummy = void,
1041 typename enable_if<
1042 _TCC<_Dummy>::template
1043 _ConstructibleTuple<_T1, _T2>()
1044 && !_TCC<_Dummy>::template
1045 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1046 bool>::type=false>
1047
1048 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1049 const _T1& __a1, const _T2& __a2)
1050 : _Inherited(__tag, __a, __a1, __a2) { }
1051
1052 template<typename _Alloc, typename _U1, typename _U2, typename
1053 enable_if<_TMC::template
1054 _MoveConstructibleTuple<_U1, _U2>()
1055 && _TMC::template
1056 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1057 bool>::type = true>
1058 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1059 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1060 std::forward<_U2>(__a2)) { }
1061
1062 template<typename _Alloc, typename _U1, typename _U2, typename
1063 enable_if<_TMC::template
1064 _MoveConstructibleTuple<_U1, _U2>()
1065 && !_TMC::template
1066 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1067 bool>::type = false>
1068 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1069 _U1&& __a1, _U2&& __a2)
1070 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1071 std::forward<_U2>(__a2)) { }
1072
1073 template<typename _Alloc>
1074 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1075 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1076
1077 template<typename _Alloc>
1078 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1079 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1080
1081 template<typename _Alloc, typename _U1, typename _U2, typename
1082 enable_if<_TMC::template
1083 _ConstructibleTuple<_U1, _U2>()
1084 && _TMC::template
1085 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1086 bool>::type = true>
1087 tuple(allocator_arg_t __tag, const _Alloc& __a,
1088 const tuple<_U1, _U2>& __in)
1089 : _Inherited(__tag, __a,
1090 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1091 { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _ConstructibleTuple<_U1, _U2>()
1096 && !_TMC::template
1097 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1098 bool>::type = false>
1099 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1100 const tuple<_U1, _U2>& __in)
1101 : _Inherited(__tag, __a,
1102 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1103 { }
1104
1105 template<typename _Alloc, typename _U1, typename _U2, typename
1106 enable_if<_TMC::template
1107 _MoveConstructibleTuple<_U1, _U2>()
1108 && _TMC::template
1109 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1110 bool>::type = true>
1111 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1112 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1113 { }
1114
1115 template<typename _Alloc, typename _U1, typename _U2, typename
1116 enable_if<_TMC::template
1117 _MoveConstructibleTuple<_U1, _U2>()
1118 && !_TMC::template
1119 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1120 bool>::type = false>
1121 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1122 tuple<_U1, _U2>&& __in)
1123 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1124 { }
1125
1126 template<typename _Alloc, typename _U1, typename _U2, typename
1127 enable_if<_TMC::template
1128 _ConstructibleTuple<_U1, _U2>()
1129 && _TMC::template
1130 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1131 bool>::type = true>
1132 tuple(allocator_arg_t __tag, const _Alloc& __a,
1133 const pair<_U1, _U2>& __in)
1134 : _Inherited(__tag, __a, __in.first, __in.second) { }
1135
1136 template<typename _Alloc, typename _U1, typename _U2, typename
1137 enable_if<_TMC::template
1138 _ConstructibleTuple<_U1, _U2>()
1139 && !_TMC::template
1140 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1141 bool>::type = false>
1142 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1143 const pair<_U1, _U2>& __in)
1144 : _Inherited(__tag, __a, __in.first, __in.second) { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1154 std::forward<_U2>(__in.second)) { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 pair<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1165 std::forward<_U2>(__in.second)) { }
1166
1167 tuple&
1168 operator=(const tuple& __in)
1169 {
1170 static_cast<_Inherited&>(*this) = __in;
1171 return *this;
1172 }
1173
1174 tuple&
1175 operator=(tuple&& __in)
1176 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1177 {
1178 static_cast<_Inherited&>(*this) = std::move(__in);
1179 return *this;
1180 }
1181
1182 template<typename _U1, typename _U2>
1183 tuple&
1184 operator=(const tuple<_U1, _U2>& __in)
1185 {
1186 static_cast<_Inherited&>(*this) = __in;
1187 return *this;
1188 }
1189
1190 template<typename _U1, typename _U2>
1191 tuple&
1192 operator=(tuple<_U1, _U2>&& __in)
1193 {
1194 static_cast<_Inherited&>(*this) = std::move(__in);
1195 return *this;
1196 }
1197
1198 template<typename _U1, typename _U2>
1199 tuple&
1200 operator=(const pair<_U1, _U2>& __in)
1201 {
1202 this->_M_head(*this) = __in.first;
1203 this->_M_tail(*this)._M_head(*this) = __in.second;
1204 return *this;
1205 }
1206
1207 template<typename _U1, typename _U2>
1208 tuple&
1209 operator=(pair<_U1, _U2>&& __in)
1210 {
1211 this->_M_head(*this) = std::forward<_U1>(__in.first);
1212 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1213 return *this;
1214 }
1215
1216 void
1217 swap(tuple& __in)
1218 noexcept(noexcept(__in._M_swap(__in)))
1219 { _Inherited::_M_swap(__in); }
1220 };
1221
1222
1223 /**
1224 * Recursive case for tuple_element: strip off the first element in
1225 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1226 */
1227 template<std::size_t __i, typename _Head, typename... _Tail>
1228 struct tuple_element<__i, tuple<_Head, _Tail...> >
1229 : tuple_element<__i - 1, tuple<_Tail...> > { };
1230
1231 /**
1232 * Basis case for tuple_element: The first element is the one we're seeking.
1233 */
1234 template<typename _Head, typename... _Tail>
1235 struct tuple_element<0, tuple<_Head, _Tail...> >
1236 {
1237 typedef _Head type;
1238 };
1239
1240 /// class tuple_size
1241 template<typename... _Elements>
1242 struct tuple_size<tuple<_Elements...>>
1243 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1244
1245 template<std::size_t __i, typename _Head, typename... _Tail>
1246 constexpr _Head&
1247 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1248 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1249
1250 template<std::size_t __i, typename _Head, typename... _Tail>
1251 constexpr const _Head&
1252 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1253 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
16
Calling '_Tuple_impl::_M_head'
21
Returning from '_Tuple_impl::_M_head'
22
Returning pointer (reference to field '_M_head_impl')
1254
1255 /// Return a reference to the ith element of a tuple.
1256 template<std::size_t __i, typename... _Elements>
1257 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1258 get(tuple<_Elements...>& __t) noexcept
1259 { return std::__get_helper<__i>(__t); }
1260
1261 /// Return a const reference to the ith element of a const tuple.
1262 template<std::size_t __i, typename... _Elements>
1263 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1264 get(const tuple<_Elements...>& __t) noexcept
1265 { return std::__get_helper<__i>(__t); }
15
Calling '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
23
Returning from '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
24
Returning pointer (reference to field '_M_head_impl')
1266
1267 /// Return an rvalue reference to the ith element of a tuple rvalue.
1268 template<std::size_t __i, typename... _Elements>
1269 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1270 get(tuple<_Elements...>&& __t) noexcept
1271 {
1272 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1273 return std::forward<__element_type&&>(std::get<__i>(__t));
1274 }
1275
1276#if __cplusplus201103L > 201103L
1277
1278#define __cpp_lib_tuples_by_type 201304
1279
1280 template<typename _Head, size_t __i, typename... _Tail>
1281 constexpr _Head&
1282 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1283 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1284
1285 template<typename _Head, size_t __i, typename... _Tail>
1286 constexpr const _Head&
1287 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1288 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1289
1290 /// Return a reference to the unique element of type _Tp of a tuple.
1291 template <typename _Tp, typename... _Types>
1292 constexpr _Tp&
1293 get(tuple<_Types...>& __t) noexcept
1294 { return std::__get_helper2<_Tp>(__t); }
1295
1296 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1297 template <typename _Tp, typename... _Types>
1298 constexpr _Tp&&
1299 get(tuple<_Types...>&& __t) noexcept
1300 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1301
1302 /// Return a const reference to the unique element of type _Tp of a tuple.
1303 template <typename _Tp, typename... _Types>
1304 constexpr const _Tp&
1305 get(const tuple<_Types...>& __t) noexcept
1306 { return std::__get_helper2<_Tp>(__t); }
1307#endif
1308
1309 // This class performs the comparison operations on tuples
1310 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1311 struct __tuple_compare
1312 {
1313 static constexpr bool
1314 __eq(const _Tp& __t, const _Up& __u)
1315 {
1316 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1317 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1318 }
1319
1320 static constexpr bool
1321 __less(const _Tp& __t, const _Up& __u)
1322 {
1323 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1324 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1325 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1326 }
1327 };
1328
1329 template<typename _Tp, typename _Up, size_t __size>
1330 struct __tuple_compare<_Tp, _Up, __size, __size>
1331 {
1332 static constexpr bool
1333 __eq(const _Tp&, const _Up&) { return true; }
1334
1335 static constexpr bool
1336 __less(const _Tp&, const _Up&) { return false; }
1337 };
1338
1339 template<typename... _TElements, typename... _UElements>
1340 constexpr bool
1341 operator==(const tuple<_TElements...>& __t,
1342 const tuple<_UElements...>& __u)
1343 {
1344 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1345 "tuple objects can only be compared if they have equal sizes.");
1346 using __compare = __tuple_compare<tuple<_TElements...>,
1347 tuple<_UElements...>,
1348 0, sizeof...(_TElements)>;
1349 return __compare::__eq(__t, __u);
1350 }
1351
1352 template<typename... _TElements, typename... _UElements>
1353 constexpr bool
1354 operator<(const tuple<_TElements...>& __t,
1355 const tuple<_UElements...>& __u)
1356 {
1357 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1358 "tuple objects can only be compared if they have equal sizes.");
1359 using __compare = __tuple_compare<tuple<_TElements...>,
1360 tuple<_UElements...>,
1361 0, sizeof...(_TElements)>;
1362 return __compare::__less(__t, __u);
1363 }
1364
1365 template<typename... _TElements, typename... _UElements>
1366 constexpr bool
1367 operator!=(const tuple<_TElements...>& __t,
1368 const tuple<_UElements...>& __u)
1369 { return !(__t == __u); }
1370
1371 template<typename... _TElements, typename... _UElements>
1372 constexpr bool
1373 operator>(const tuple<_TElements...>& __t,
1374 const tuple<_UElements...>& __u)
1375 { return __u < __t; }
1376
1377 template<typename... _TElements, typename... _UElements>
1378 constexpr bool
1379 operator<=(const tuple<_TElements...>& __t,
1380 const tuple<_UElements...>& __u)
1381 { return !(__u < __t); }
1382
1383 template<typename... _TElements, typename... _UElements>
1384 constexpr bool
1385 operator>=(const tuple<_TElements...>& __t,
1386 const tuple<_UElements...>& __u)
1387 { return !(__t < __u); }
1388
1389 // NB: DR 705.
1390 template<typename... _Elements>
1391 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1392 make_tuple(_Elements&&... __args)
1393 {
1394 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1395 __result_type;
1396 return __result_type(std::forward<_Elements>(__args)...);
1397 }
1398
1399 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1400 // 2275. Why is forward_as_tuple not constexpr?
1401 template<typename... _Elements>
1402 constexpr tuple<_Elements&&...>
1403 forward_as_tuple(_Elements&&... __args) noexcept
1404 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1405
1406 template<typename... _Tps>
1407 struct __is_tuple_like_impl<tuple<_Tps...>> : true_type
1408 { };
1409
1410 // Internal type trait that allows us to sfinae-protect tuple_cat.
1411 template<typename _Tp>
1412 struct __is_tuple_like
1413 : public __is_tuple_like_impl<typename std::remove_cv
1414 <typename std::remove_reference<_Tp>::type>::type>::type
1415 { };
1416
1417 template<size_t, typename, typename, size_t>
1418 struct __make_tuple_impl;
1419
1420 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1421 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1422 : __make_tuple_impl<_Idx + 1,
1423 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1424 _Tuple, _Nm>
1425 { };
1426
1427 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1428 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1429 {
1430 typedef tuple<_Tp...> __type;
1431 };
1432
1433 template<typename _Tuple>
1434 struct __do_make_tuple
1435 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1436 { };
1437
1438 // Returns the std::tuple equivalent of a tuple-like type.
1439 template<typename _Tuple>
1440 struct __make_tuple
1441 : public __do_make_tuple<typename std::remove_cv
1442 <typename std::remove_reference<_Tuple>::type>::type>
1443 { };
1444
1445 // Combines several std::tuple's into a single one.
1446 template<typename...>
1447 struct __combine_tuples;
1448
1449 template<>
1450 struct __combine_tuples<>
1451 {
1452 typedef tuple<> __type;
1453 };
1454
1455 template<typename... _Ts>
1456 struct __combine_tuples<tuple<_Ts...>>
1457 {
1458 typedef tuple<_Ts...> __type;
1459 };
1460
1461 template<typename... _T1s, typename... _T2s, typename... _Rem>
1462 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1463 {
1464 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1465 _Rem...>::__type __type;
1466 };
1467
1468 // Computes the result type of tuple_cat given a set of tuple-like types.
1469 template<typename... _Tpls>
1470 struct __tuple_cat_result
1471 {
1472 typedef typename __combine_tuples
1473 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1474 };
1475
1476 // Helper to determine the index set for the first tuple-like
1477 // type of a given set.
1478 template<typename...>
1479 struct __make_1st_indices;
1480
1481 template<>
1482 struct __make_1st_indices<>
1483 {
1484 typedef std::_Index_tuple<> __type;
1485 };
1486
1487 template<typename _Tp, typename... _Tpls>
1488 struct __make_1st_indices<_Tp, _Tpls...>
1489 {
1490 typedef typename std::_Build_index_tuple<std::tuple_size<
1491 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1492 };
1493
1494 // Performs the actual concatenation by step-wise expanding tuple-like
1495 // objects into the elements, which are finally forwarded into the
1496 // result tuple.
1497 template<typename _Ret, typename _Indices, typename... _Tpls>
1498 struct __tuple_concater;
1499
1500 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1501 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1502 {
1503 template<typename... _Us>
1504 static constexpr _Ret
1505 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1506 {
1507 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1508 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1509 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1510 std::forward<_Us>(__us)...,
1511 std::get<_Is>(std::forward<_Tp>(__tp))...);
1512 }
1513 };
1514
1515 template<typename _Ret>
1516 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1517 {
1518 template<typename... _Us>
1519 static constexpr _Ret
1520 _S_do(_Us&&... __us)
1521 {
1522 return _Ret(std::forward<_Us>(__us)...);
1523 }
1524 };
1525
1526 /// tuple_cat
1527 template<typename... _Tpls, typename = typename
1528 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1529 constexpr auto
1530 tuple_cat(_Tpls&&... __tpls)
1531 -> typename __tuple_cat_result<_Tpls...>::__type
1532 {
1533 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1534 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1535 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1536 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1537 }
1538
1539 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1540 // 2301. Why is tie not constexpr?
1541 /// tie
1542 template<typename... _Elements>
1543 constexpr tuple<_Elements&...>
1544 tie(_Elements&... __args) noexcept
1545 { return tuple<_Elements&...>(__args...); }
1546
1547 /// swap
1548 template<typename... _Elements>
1549 inline void
1550 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1551 noexcept(noexcept(__x.swap(__y)))
1552 { __x.swap(__y); }
1553
1554 // A class (and instance) which can be used in 'tie' when an element
1555 // of a tuple is not required
1556 struct _Swallow_assign
1557 {
1558 template<class _Tp>
1559 const _Swallow_assign&
1560 operator=(const _Tp&) const
1561 { return *this; }
1562 };
1563
1564 const _Swallow_assign ignore{};
1565
1566 /// Partial specialization for tuples
1567 template<typename... _Types, typename _Alloc>
1568 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1569
1570 // See stl_pair.h...
1571 template<class _T1, class _T2>
1572 template<typename... _Args1, typename... _Args2>
1573 inline
1574 pair<_T1, _T2>::
1575 pair(piecewise_construct_t,
1576 tuple<_Args1...> __first, tuple<_Args2...> __second)
1577 : pair(__first, __second,
1578 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1579 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1580 { }
1581
1582 template<class _T1, class _T2>
1583 template<typename... _Args1, std::size_t... _Indexes1,
1584 typename... _Args2, std::size_t... _Indexes2>
1585 inline
1586 pair<_T1, _T2>::
1587 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1588 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1589 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1590 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1591 { }
1592
1593 /// @}
1594
1595_GLIBCXX_END_NAMESPACE_VERSION
1596} // namespace std
1597
1598#endif // C++11
1599
1600#endif // _GLIBCXX_TUPLE