Bug Summary

File:lib/CodeGen/InlineSpiller.cpp
Warning:line 1238, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InlineSpiller.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp -faddrsig

/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp

1//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#include "LiveRangeCalc.h"
16#include "Spiller.h"
17#include "SplitKit.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/CodeGen/LiveInterval.h"
29#include "llvm/CodeGen/LiveIntervals.h"
30#include "llvm/CodeGen/LiveRangeEdit.h"
31#include "llvm/CodeGen/LiveStacks.h"
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
34#include "llvm/CodeGen/MachineDominators.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineFunctionPass.h"
37#include "llvm/CodeGen/MachineInstr.h"
38#include "llvm/CodeGen/MachineInstrBuilder.h"
39#include "llvm/CodeGen/MachineInstrBundle.h"
40#include "llvm/CodeGen/MachineLoopInfo.h"
41#include "llvm/CodeGen/MachineOperand.h"
42#include "llvm/CodeGen/MachineRegisterInfo.h"
43#include "llvm/CodeGen/SlotIndexes.h"
44#include "llvm/CodeGen/TargetInstrInfo.h"
45#include "llvm/CodeGen/TargetOpcodes.h"
46#include "llvm/CodeGen/TargetRegisterInfo.h"
47#include "llvm/CodeGen/TargetSubtargetInfo.h"
48#include "llvm/CodeGen/VirtRegMap.h"
49#include "llvm/Config/llvm-config.h"
50#include "llvm/Support/BlockFrequency.h"
51#include "llvm/Support/BranchProbability.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Compiler.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/ErrorHandling.h"
56#include "llvm/Support/raw_ostream.h"
57#include <cassert>
58#include <iterator>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65#define DEBUG_TYPE"regalloc" "regalloc"
66
67STATISTIC(NumSpilledRanges, "Number of spilled live ranges")static llvm::Statistic NumSpilledRanges = {"regalloc", "NumSpilledRanges"
, "Number of spilled live ranges", {0}, {false}}
;
68STATISTIC(NumSnippets, "Number of spilled snippets")static llvm::Statistic NumSnippets = {"regalloc", "NumSnippets"
, "Number of spilled snippets", {0}, {false}}
;
69STATISTIC(NumSpills, "Number of spills inserted")static llvm::Statistic NumSpills = {"regalloc", "NumSpills", "Number of spills inserted"
, {0}, {false}}
;
70STATISTIC(NumSpillsRemoved, "Number of spills removed")static llvm::Statistic NumSpillsRemoved = {"regalloc", "NumSpillsRemoved"
, "Number of spills removed", {0}, {false}}
;
71STATISTIC(NumReloads, "Number of reloads inserted")static llvm::Statistic NumReloads = {"regalloc", "NumReloads"
, "Number of reloads inserted", {0}, {false}}
;
72STATISTIC(NumReloadsRemoved, "Number of reloads removed")static llvm::Statistic NumReloadsRemoved = {"regalloc", "NumReloadsRemoved"
, "Number of reloads removed", {0}, {false}}
;
73STATISTIC(NumFolded, "Number of folded stack accesses")static llvm::Statistic NumFolded = {"regalloc", "NumFolded", "Number of folded stack accesses"
, {0}, {false}}
;
74STATISTIC(NumFoldedLoads, "Number of folded loads")static llvm::Statistic NumFoldedLoads = {"regalloc", "NumFoldedLoads"
, "Number of folded loads", {0}, {false}}
;
75STATISTIC(NumRemats, "Number of rematerialized defs for spilling")static llvm::Statistic NumRemats = {"regalloc", "NumRemats", "Number of rematerialized defs for spilling"
, {0}, {false}}
;
76
77static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78 cl::desc("Disable inline spill hoisting"));
79
80namespace {
81
82class HoistSpillHelper : private LiveRangeEdit::Delegate {
83 MachineFunction &MF;
84 LiveIntervals &LIS;
85 LiveStacks &LSS;
86 AliasAnalysis *AA;
87 MachineDominatorTree &MDT;
88 MachineLoopInfo &Loops;
89 VirtRegMap &VRM;
90 MachineRegisterInfo &MRI;
91 const TargetInstrInfo &TII;
92 const TargetRegisterInfo &TRI;
93 const MachineBlockFrequencyInfo &MBFI;
94
95 InsertPointAnalysis IPA;
96
97 // Map from StackSlot to the LiveInterval of the original register.
98 // Note the LiveInterval of the original register may have been deleted
99 // after it is spilled. We keep a copy here to track the range where
100 // spills can be moved.
101 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
102
103 // Map from pair of (StackSlot and Original VNI) to a set of spills which
104 // have the same stackslot and have equal values defined by Original VNI.
105 // These spills are mergeable and are hoist candiates.
106 using MergeableSpillsMap =
107 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
108 MergeableSpillsMap MergeableSpills;
109
110 /// This is the map from original register to a set containing all its
111 /// siblings. To hoist a spill to another BB, we need to find out a live
112 /// sibling there and use it as the source of the new spill.
113 DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
114
115 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
116 MachineBasicBlock &BB, unsigned &LiveReg);
117
118 void rmRedundantSpills(
119 SmallPtrSet<MachineInstr *, 16> &Spills,
120 SmallVectorImpl<MachineInstr *> &SpillsToRm,
121 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
122
123 void getVisitOrders(
124 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
125 SmallVectorImpl<MachineDomTreeNode *> &Orders,
126 SmallVectorImpl<MachineInstr *> &SpillsToRm,
127 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
128 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
129
130 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
131 SmallPtrSet<MachineInstr *, 16> &Spills,
132 SmallVectorImpl<MachineInstr *> &SpillsToRm,
133 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
134
135public:
136 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
137 VirtRegMap &vrm)
138 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
139 LSS(pass.getAnalysis<LiveStacks>()),
140 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
141 MDT(pass.getAnalysis<MachineDominatorTree>()),
142 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
143 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
144 TRI(*mf.getSubtarget().getRegisterInfo()),
145 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
146 IPA(LIS, mf.getNumBlockIDs()) {}
147
148 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
149 unsigned Original);
150 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
151 void hoistAllSpills();
152 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
153};
154
155class InlineSpiller : public Spiller {
156 MachineFunction &MF;
157 LiveIntervals &LIS;
158 LiveStacks &LSS;
159 AliasAnalysis *AA;
160 MachineDominatorTree &MDT;
161 MachineLoopInfo &Loops;
162 VirtRegMap &VRM;
163 MachineRegisterInfo &MRI;
164 const TargetInstrInfo &TII;
165 const TargetRegisterInfo &TRI;
166 const MachineBlockFrequencyInfo &MBFI;
167
168 // Variables that are valid during spill(), but used by multiple methods.
169 LiveRangeEdit *Edit;
170 LiveInterval *StackInt;
171 int StackSlot;
172 unsigned Original;
173
174 // All registers to spill to StackSlot, including the main register.
175 SmallVector<unsigned, 8> RegsToSpill;
176
177 // All COPY instructions to/from snippets.
178 // They are ignored since both operands refer to the same stack slot.
179 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
180
181 // Values that failed to remat at some point.
182 SmallPtrSet<VNInfo*, 8> UsedValues;
183
184 // Dead defs generated during spilling.
185 SmallVector<MachineInstr*, 8> DeadDefs;
186
187 // Object records spills information and does the hoisting.
188 HoistSpillHelper HSpiller;
189
190 ~InlineSpiller() override = default;
191
192public:
193 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
194 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
195 LSS(pass.getAnalysis<LiveStacks>()),
196 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
197 MDT(pass.getAnalysis<MachineDominatorTree>()),
198 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
199 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
200 TRI(*mf.getSubtarget().getRegisterInfo()),
201 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
202 HSpiller(pass, mf, vrm) {}
203
204 void spill(LiveRangeEdit &) override;
205 void postOptimization() override;
206
207private:
208 bool isSnippet(const LiveInterval &SnipLI);
209 void collectRegsToSpill();
210
211 bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
212
213 bool isSibling(unsigned Reg);
214 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
215 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
216
217 void markValueUsed(LiveInterval*, VNInfo*);
218 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
219 void reMaterializeAll();
220
221 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
222 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
223 MachineInstr *LoadMI = nullptr);
224 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
225 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
226
227 void spillAroundUses(unsigned Reg);
228 void spillAll();
229};
230
231} // end anonymous namespace
232
233Spiller::~Spiller() = default;
234
235void Spiller::anchor() {}
236
237Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
238 MachineFunction &mf,
239 VirtRegMap &vrm) {
240 return new InlineSpiller(pass, mf, vrm);
241}
242
243//===----------------------------------------------------------------------===//
244// Snippets
245//===----------------------------------------------------------------------===//
246
247// When spilling a virtual register, we also spill any snippets it is connected
248// to. The snippets are small live ranges that only have a single real use,
249// leftovers from live range splitting. Spilling them enables memory operand
250// folding or tightens the live range around the single use.
251//
252// This minimizes register pressure and maximizes the store-to-load distance for
253// spill slots which can be important in tight loops.
254
255/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
256/// otherwise return 0.
257static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
258 if (!MI.isFullCopy())
259 return 0;
260 if (MI.getOperand(0).getReg() == Reg)
261 return MI.getOperand(1).getReg();
262 if (MI.getOperand(1).getReg() == Reg)
263 return MI.getOperand(0).getReg();
264 return 0;
265}
266
267/// isSnippet - Identify if a live interval is a snippet that should be spilled.
268/// It is assumed that SnipLI is a virtual register with the same original as
269/// Edit->getReg().
270bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
271 unsigned Reg = Edit->getReg();
272
273 // A snippet is a tiny live range with only a single instruction using it
274 // besides copies to/from Reg or spills/fills. We accept:
275 //
276 // %snip = COPY %Reg / FILL fi#
277 // %snip = USE %snip
278 // %Reg = COPY %snip / SPILL %snip, fi#
279 //
280 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
281 return false;
282
283 MachineInstr *UseMI = nullptr;
284
285 // Check that all uses satisfy our criteria.
286 for (MachineRegisterInfo::reg_instr_nodbg_iterator
287 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
288 E = MRI.reg_instr_nodbg_end(); RI != E; ) {
289 MachineInstr &MI = *RI++;
290
291 // Allow copies to/from Reg.
292 if (isFullCopyOf(MI, Reg))
293 continue;
294
295 // Allow stack slot loads.
296 int FI;
297 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
298 continue;
299
300 // Allow stack slot stores.
301 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
302 continue;
303
304 // Allow a single additional instruction.
305 if (UseMI && &MI != UseMI)
306 return false;
307 UseMI = &MI;
308 }
309 return true;
310}
311
312/// collectRegsToSpill - Collect live range snippets that only have a single
313/// real use.
314void InlineSpiller::collectRegsToSpill() {
315 unsigned Reg = Edit->getReg();
316
317 // Main register always spills.
318 RegsToSpill.assign(1, Reg);
319 SnippetCopies.clear();
320
321 // Snippets all have the same original, so there can't be any for an original
322 // register.
323 if (Original == Reg)
324 return;
325
326 for (MachineRegisterInfo::reg_instr_iterator
327 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
328 MachineInstr &MI = *RI++;
329 unsigned SnipReg = isFullCopyOf(MI, Reg);
330 if (!isSibling(SnipReg))
331 continue;
332 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
333 if (!isSnippet(SnipLI))
334 continue;
335 SnippetCopies.insert(&MI);
336 if (isRegToSpill(SnipReg))
337 continue;
338 RegsToSpill.push_back(SnipReg);
339 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\talso spill snippet " <<
SnipLI << '\n'; } } while (false)
;
340 ++NumSnippets;
341 }
342}
343
344bool InlineSpiller::isSibling(unsigned Reg) {
345 return TargetRegisterInfo::isVirtualRegister(Reg) &&
346 VRM.getOriginal(Reg) == Original;
347}
348
349/// It is beneficial to spill to earlier place in the same BB in case
350/// as follows:
351/// There is an alternative def earlier in the same MBB.
352/// Hoist the spill as far as possible in SpillMBB. This can ease
353/// register pressure:
354///
355/// x = def
356/// y = use x
357/// s = copy x
358///
359/// Hoisting the spill of s to immediately after the def removes the
360/// interference between x and y:
361///
362/// x = def
363/// spill x
364/// y = use killed x
365///
366/// This hoist only helps when the copy kills its source.
367///
368bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
369 MachineInstr &CopyMI) {
370 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
371#ifndef NDEBUG
372 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
373 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy")(static_cast <bool> (VNI && VNI->def == Idx.
getRegSlot() && "Not defined by copy") ? void (0) : __assert_fail
("VNI && VNI->def == Idx.getRegSlot() && \"Not defined by copy\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 373, __extension__ __PRETTY_FUNCTION__))
;
374#endif
375
376 unsigned SrcReg = CopyMI.getOperand(1).getReg();
377 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
378 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
379 LiveQueryResult SrcQ = SrcLI.Query(Idx);
380 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
381 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
382 return false;
383
384 // Conservatively extend the stack slot range to the range of the original
385 // value. We may be able to do better with stack slot coloring by being more
386 // careful here.
387 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 387, __extension__ __PRETTY_FUNCTION__))
;
388 LiveInterval &OrigLI = LIS.getInterval(Original);
389 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
390 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
391 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
392 << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tmerged orig valno " <<
OrigVNI->id << ": " << *StackInt << '\n'
; } } while (false)
;
393
394 // We are going to spill SrcVNI immediately after its def, so clear out
395 // any later spills of the same value.
396 eliminateRedundantSpills(SrcLI, SrcVNI);
397
398 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
399 MachineBasicBlock::iterator MII;
400 if (SrcVNI->isPHIDef())
401 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
402 else {
403 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
404 assert(DefMI && "Defining instruction disappeared")(static_cast <bool> (DefMI && "Defining instruction disappeared"
) ? void (0) : __assert_fail ("DefMI && \"Defining instruction disappeared\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 404, __extension__ __PRETTY_FUNCTION__))
;
405 MII = DefMI;
406 ++MII;
407 }
408 // Insert spill without kill flag immediately after def.
409 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
410 MRI.getRegClass(SrcReg), &TRI);
411 --MII; // Point to store instruction.
412 LIS.InsertMachineInstrInMaps(*MII);
413 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\thoisted: " << SrcVNI
->def << '\t' << *MII; } } while (false)
;
414
415 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
416 ++NumSpills;
417 return true;
418}
419
420/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
421/// redundant spills of this value in SLI.reg and sibling copies.
422void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
423 assert(VNI && "Missing value")(static_cast <bool> (VNI && "Missing value") ? void
(0) : __assert_fail ("VNI && \"Missing value\"", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 423, __extension__ __PRETTY_FUNCTION__))
;
424 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
425 WorkList.push_back(std::make_pair(&SLI, VNI));
426 assert(StackInt && "No stack slot assigned yet.")(static_cast <bool> (StackInt && "No stack slot assigned yet."
) ? void (0) : __assert_fail ("StackInt && \"No stack slot assigned yet.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 426, __extension__ __PRETTY_FUNCTION__))
;
427
428 do {
429 LiveInterval *LI;
430 std::tie(LI, VNI) = WorkList.pop_back_val();
431 unsigned Reg = LI->reg;
432 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
433 << VNI->def << " in " << *LI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def <<
" in " << *LI << '\n'; } } while (false)
;
434
435 // Regs to spill are taken care of.
436 if (isRegToSpill(Reg))
437 continue;
438
439 // Add all of VNI's live range to StackInt.
440 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
441 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged to stack int: " <<
*StackInt << '\n'; } } while (false)
;
442
443 // Find all spills and copies of VNI.
444 for (MachineRegisterInfo::use_instr_nodbg_iterator
445 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
446 UI != E; ) {
447 MachineInstr &MI = *UI++;
448 if (!MI.isCopy() && !MI.mayStore())
449 continue;
450 SlotIndex Idx = LIS.getInstructionIndex(MI);
451 if (LI->getVNInfoAt(Idx) != VNI)
452 continue;
453
454 // Follow sibling copies down the dominator tree.
455 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
456 if (isSibling(DstReg)) {
457 LiveInterval &DstLI = LIS.getInterval(DstReg);
458 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
459 assert(DstVNI && "Missing defined value")(static_cast <bool> (DstVNI && "Missing defined value"
) ? void (0) : __assert_fail ("DstVNI && \"Missing defined value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 459, __extension__ __PRETTY_FUNCTION__))
;
460 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot")(static_cast <bool> (DstVNI->def == Idx.getRegSlot()
&& "Wrong copy def slot") ? void (0) : __assert_fail
("DstVNI->def == Idx.getRegSlot() && \"Wrong copy def slot\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 460, __extension__ __PRETTY_FUNCTION__))
;
461 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
462 }
463 continue;
464 }
465
466 // Erase spills.
467 int FI;
468 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
469 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Redundant spill " << Idx
<< '\t' << MI; } } while (false)
;
470 // eliminateDeadDefs won't normally remove stores, so switch opcode.
471 MI.setDesc(TII.get(TargetOpcode::KILL));
472 DeadDefs.push_back(&MI);
473 ++NumSpillsRemoved;
474 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
475 --NumSpills;
476 }
477 }
478 } while (!WorkList.empty());
479}
480
481//===----------------------------------------------------------------------===//
482// Rematerialization
483//===----------------------------------------------------------------------===//
484
485/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
486/// instruction cannot be eliminated. See through snippet copies
487void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
488 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
489 WorkList.push_back(std::make_pair(LI, VNI));
490 do {
491 std::tie(LI, VNI) = WorkList.pop_back_val();
492 if (!UsedValues.insert(VNI).second)
493 continue;
494
495 if (VNI->isPHIDef()) {
496 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
497 for (MachineBasicBlock *P : MBB->predecessors()) {
498 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
499 if (PVNI)
500 WorkList.push_back(std::make_pair(LI, PVNI));
501 }
502 continue;
503 }
504
505 // Follow snippet copies.
506 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
507 if (!SnippetCopies.count(MI))
508 continue;
509 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
510 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy")(static_cast <bool> (isRegToSpill(SnipLI.reg) &&
"Unexpected register in copy") ? void (0) : __assert_fail ("isRegToSpill(SnipLI.reg) && \"Unexpected register in copy\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 510, __extension__ __PRETTY_FUNCTION__))
;
511 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
512 assert(SnipVNI && "Snippet undefined before copy")(static_cast <bool> (SnipVNI && "Snippet undefined before copy"
) ? void (0) : __assert_fail ("SnipVNI && \"Snippet undefined before copy\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 512, __extension__ __PRETTY_FUNCTION__))
;
513 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
514 } while (!WorkList.empty());
515}
516
517/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
518bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
519 // Analyze instruction
520 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
521 MIBundleOperands::VirtRegInfo RI =
522 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
523
524 if (!RI.Reads)
525 return false;
526
527 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
528 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
529
530 if (!ParentVNI) {
531 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tadding <undef> flags: "
; } } while (false)
;
532 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
533 MachineOperand &MO = MI.getOperand(i);
534 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
535 MO.setIsUndef();
536 }
537 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << UseIdx << '\t' <<
MI; } } while (false)
;
538 return true;
539 }
540
541 if (SnippetCopies.count(&MI))
542 return false;
543
544 LiveInterval &OrigLI = LIS.getInterval(Original);
545 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
546 LiveRangeEdit::Remat RM(ParentVNI);
547 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
548
549 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
550 markValueUsed(&VirtReg, ParentVNI);
551 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat for " <<
UseIdx << '\t' << MI; } } while (false)
;
552 return false;
553 }
554
555 // If the instruction also writes VirtReg.reg, it had better not require the
556 // same register for uses and defs.
557 if (RI.Tied) {
558 markValueUsed(&VirtReg, ParentVNI);
559 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tcannot remat tied reg: " <<
UseIdx << '\t' << MI; } } while (false)
;
560 return false;
561 }
562
563 // Before rematerializing into a register for a single instruction, try to
564 // fold a load into the instruction. That avoids allocating a new register.
565 if (RM.OrigMI->canFoldAsLoad() &&
566 foldMemoryOperand(Ops, RM.OrigMI)) {
567 Edit->markRematerialized(RM.ParentVNI);
568 ++NumFoldedLoads;
569 return true;
570 }
571
572 // Allocate a new register for the remat.
573 unsigned NewVReg = Edit->createFrom(Original);
574
575 // Finally we can rematerialize OrigMI before MI.
576 SlotIndex DefIdx =
577 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
578
579 // We take the DebugLoc from MI, since OrigMI may be attributed to a
580 // different source location.
581 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
582 NewMI->setDebugLoc(MI.getDebugLoc());
583
584 (void)DefIdx;
585 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
586 << *LIS.getInstructionFromIndex(DefIdx))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\tremat: " << DefIdx <<
'\t' << *LIS.getInstructionFromIndex(DefIdx); } } while
(false)
;
587
588 // Replace operands
589 for (const auto &OpPair : Ops) {
590 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
591 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
592 MO.setReg(NewVReg);
593 MO.setIsKill();
594 }
595 }
596 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\t " << UseIdx <<
'\t' << MI << '\n'; } } while (false)
;
597
598 ++NumRemats;
599 return true;
600}
601
602/// reMaterializeAll - Try to rematerialize as many uses as possible,
603/// and trim the live ranges after.
604void InlineSpiller::reMaterializeAll() {
605 if (!Edit->anyRematerializable(AA))
606 return;
607
608 UsedValues.clear();
609
610 // Try to remat before all uses of snippets.
611 bool anyRemat = false;
612 for (unsigned Reg : RegsToSpill) {
613 LiveInterval &LI = LIS.getInterval(Reg);
614 for (MachineRegisterInfo::reg_bundle_iterator
615 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
616 RegI != E; ) {
617 MachineInstr &MI = *RegI++;
618
619 // Debug values are not allowed to affect codegen.
620 if (MI.isDebugValue())
621 continue;
622
623 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 624, __extension__ __PRETTY_FUNCTION__))
624 "instruction that isn't a DBG_VALUE")(static_cast <bool> (!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI.isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 624, __extension__ __PRETTY_FUNCTION__))
;
625
626 anyRemat |= reMaterializeFor(LI, MI);
627 }
628 }
629 if (!anyRemat)
630 return;
631
632 // Remove any values that were completely rematted.
633 for (unsigned Reg : RegsToSpill) {
634 LiveInterval &LI = LIS.getInterval(Reg);
635 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
636 I != E; ++I) {
637 VNInfo *VNI = *I;
638 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
639 continue;
640 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
641 MI->addRegisterDead(Reg, &TRI);
642 if (!MI->allDefsAreDead())
643 continue;
644 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "All defs dead: " << *MI
; } } while (false)
;
645 DeadDefs.push_back(MI);
646 }
647 }
648
649 // Eliminate dead code after remat. Note that some snippet copies may be
650 // deleted here.
651 if (DeadDefs.empty())
652 return;
653 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n"; } } while (false)
;
654 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
655
656 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
657 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
658 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
659 // removed, PHI VNI are still left in the LiveInterval.
660 // So to get rid of unused reg, we need to check whether it has non-dbg
661 // reference instead of whether it has non-empty interval.
662 unsigned ResultPos = 0;
663 for (unsigned Reg : RegsToSpill) {
664 if (MRI.reg_nodbg_empty(Reg)) {
665 Edit->eraseVirtReg(Reg);
666 continue;
667 }
668
669 assert(LIS.hasInterval(Reg) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
670 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
671 "Empty and not used live-range?!")(static_cast <bool> (LIS.hasInterval(Reg) && (!
LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!") ? void (0) : __assert_fail
("LIS.hasInterval(Reg) && (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) && \"Empty and not used live-range?!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
;
672
673 RegsToSpill[ResultPos++] = Reg;
674 }
675 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
676 LLVM_DEBUG(dbgs() << RegsToSpill.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
677 << " registers to spill after remat.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"
; } } while (false)
;
678}
679
680//===----------------------------------------------------------------------===//
681// Spilling
682//===----------------------------------------------------------------------===//
683
684/// If MI is a load or store of StackSlot, it can be removed.
685bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
686 int FI = 0;
687 unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
688 bool IsLoad = InstrReg;
689 if (!IsLoad)
690 InstrReg = TII.isStoreToStackSlot(*MI, FI);
691
692 // We have a stack access. Is it the right register and slot?
693 if (InstrReg != Reg || FI != StackSlot)
694 return false;
695
696 if (!IsLoad)
697 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
698
699 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Coalescing stack access: " <<
*MI; } } while (false)
;
700 LIS.RemoveMachineInstrFromMaps(*MI);
701 MI->eraseFromParent();
702
703 if (IsLoad) {
704 ++NumReloadsRemoved;
705 --NumReloads;
706 } else {
707 ++NumSpillsRemoved;
708 --NumSpills;
709 }
710
711 return true;
712}
713
714#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
715LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__))
716// Dump the range of instructions from B to E with their slot indexes.
717static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
718 MachineBasicBlock::iterator E,
719 LiveIntervals const &LIS,
720 const char *const header,
721 unsigned VReg =0) {
722 char NextLine = '\n';
723 char SlotIndent = '\t';
724
725 if (std::next(B) == E) {
726 NextLine = ' ';
727 SlotIndent = ' ';
728 }
729
730 dbgs() << '\t' << header << ": " << NextLine;
731
732 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
733 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
734
735 // If a register was passed in and this instruction has it as a
736 // destination that is marked as an early clobber, print the
737 // early-clobber slot index.
738 if (VReg) {
739 MachineOperand *MO = I->findRegisterDefOperand(VReg);
740 if (MO && MO->isEarlyClobber())
741 Idx = Idx.getRegSlot(true);
742 }
743
744 dbgs() << SlotIndent << Idx << '\t' << *I;
745 }
746}
747#endif
748
749/// foldMemoryOperand - Try folding stack slot references in Ops into their
750/// instructions.
751///
752/// @param Ops Operand indices from analyzeVirtReg().
753/// @param LoadMI Load instruction to use instead of stack slot when non-null.
754/// @return True on success.
755bool InlineSpiller::
756foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
757 MachineInstr *LoadMI) {
758 if (Ops.empty())
759 return false;
760 // Don't attempt folding in bundles.
761 MachineInstr *MI = Ops.front().first;
762 if (Ops.back().first != MI || MI->isBundled())
763 return false;
764
765 bool WasCopy = MI->isCopy();
766 unsigned ImpReg = 0;
767
768 // Spill subregs if the target allows it.
769 // We always want to spill subregs for stackmap/patchpoint pseudos.
770 bool SpillSubRegs = TII.isSubregFoldable() ||
771 MI->getOpcode() == TargetOpcode::STATEPOINT ||
772 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
773 MI->getOpcode() == TargetOpcode::STACKMAP;
774
775 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
776 // operands.
777 SmallVector<unsigned, 8> FoldOps;
778 for (const auto &OpPair : Ops) {
779 unsigned Idx = OpPair.second;
780 assert(MI == OpPair.first && "Instruction conflict during operand folding")(static_cast <bool> (MI == OpPair.first && "Instruction conflict during operand folding"
) ? void (0) : __assert_fail ("MI == OpPair.first && \"Instruction conflict during operand folding\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 780, __extension__ __PRETTY_FUNCTION__))
;
781 MachineOperand &MO = MI->getOperand(Idx);
782 if (MO.isImplicit()) {
783 ImpReg = MO.getReg();
784 continue;
785 }
786
787 if (!SpillSubRegs && MO.getSubReg())
788 return false;
789 // We cannot fold a load instruction into a def.
790 if (LoadMI && MO.isDef())
791 return false;
792 // Tied use operands should not be passed to foldMemoryOperand.
793 if (!MI->isRegTiedToDefOperand(Idx))
794 FoldOps.push_back(Idx);
795 }
796
797 // If we only have implicit uses, we won't be able to fold that.
798 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
799 if (FoldOps.empty())
800 return false;
801
802 MachineInstrSpan MIS(MI);
803
804 MachineInstr *FoldMI =
805 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
806 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
807 if (!FoldMI)
808 return false;
809
810 // Remove LIS for any dead defs in the original MI not in FoldMI.
811 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
812 if (!MO->isReg())
813 continue;
814 unsigned Reg = MO->getReg();
815 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
816 MRI.isReserved(Reg)) {
817 continue;
818 }
819 // Skip non-Defs, including undef uses and internal reads.
820 if (MO->isUse())
821 continue;
822 MIBundleOperands::PhysRegInfo RI =
823 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
824 if (RI.FullyDefined)
825 continue;
826 // FoldMI does not define this physreg. Remove the LI segment.
827 assert(MO->isDead() && "Cannot fold physreg def")(static_cast <bool> (MO->isDead() && "Cannot fold physreg def"
) ? void (0) : __assert_fail ("MO->isDead() && \"Cannot fold physreg def\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
;
828 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
829 LIS.removePhysRegDefAt(Reg, Idx);
830 }
831
832 int FI;
833 if (TII.isStoreToStackSlot(*MI, FI) &&
834 HSpiller.rmFromMergeableSpills(*MI, FI))
835 --NumSpills;
836 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
837 MI->eraseFromParent();
838
839 // Insert any new instructions other than FoldMI into the LIS maps.
840 assert(!MIS.empty() && "Unexpected empty span of instructions!")(static_cast <bool> (!MIS.empty() && "Unexpected empty span of instructions!"
) ? void (0) : __assert_fail ("!MIS.empty() && \"Unexpected empty span of instructions!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 840, __extension__ __PRETTY_FUNCTION__))
;
841 for (MachineInstr &MI : MIS)
842 if (&MI != FoldMI)
843 LIS.InsertMachineInstrInMaps(MI);
844
845 // TII.foldMemoryOperand may have left some implicit operands on the
846 // instruction. Strip them.
847 if (ImpReg)
848 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
849 MachineOperand &MO = FoldMI->getOperand(i - 1);
850 if (!MO.isReg() || !MO.isImplicit())
851 break;
852 if (MO.getReg() == ImpReg)
853 FoldMI->RemoveOperand(i - 1);
854 }
855
856 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
857 "folded"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MIS.end(), LIS, "folded"); } } while (false)
;
858
859 if (!WasCopy)
860 ++NumFolded;
861 else if (Ops.front().second == 0) {
862 ++NumSpills;
863 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
864 } else
865 ++NumReloads;
866 return true;
867}
868
869void InlineSpiller::insertReload(unsigned NewVReg,
870 SlotIndex Idx,
871 MachineBasicBlock::iterator MI) {
872 MachineBasicBlock &MBB = *MI->getParent();
873
874 MachineInstrSpan MIS(MI);
875 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
876 MRI.getRegClass(NewVReg), &TRI);
877
878 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
879
880 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
881 NewVReg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(MIS.begin(
), MI, LIS, "reload", NewVReg); } } while (false)
;
882 ++NumReloads;
883}
884
885/// Check if \p Def fully defines a VReg with an undefined value.
886/// If that's the case, that means the value of VReg is actually
887/// not relevant.
888static bool isFullUndefDef(const MachineInstr &Def) {
889 if (!Def.isImplicitDef())
890 return false;
891 assert(Def.getNumOperands() == 1 &&(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 892, __extension__ __PRETTY_FUNCTION__))
892 "Implicit def with more than one definition")(static_cast <bool> (Def.getNumOperands() == 1 &&
"Implicit def with more than one definition") ? void (0) : __assert_fail
("Def.getNumOperands() == 1 && \"Implicit def with more than one definition\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 892, __extension__ __PRETTY_FUNCTION__))
;
893 // We can say that the VReg defined by Def is undef, only if it is
894 // fully defined by Def. Otherwise, some of the lanes may not be
895 // undef and the value of the VReg matters.
896 return !Def.getOperand(0).getSubReg();
897}
898
899/// insertSpill - Insert a spill of NewVReg after MI.
900void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
901 MachineBasicBlock::iterator MI) {
902 MachineBasicBlock &MBB = *MI->getParent();
903
904 MachineInstrSpan MIS(MI);
905 bool IsRealSpill = true;
906 if (isFullUndefDef(*MI)) {
907 // Don't spill undef value.
908 // Anything works for undef, in particular keeping the memory
909 // uninitialized is a viable option and it saves code size and
910 // run time.
911 BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
912 .addReg(NewVReg, getKillRegState(isKill));
913 IsRealSpill = false;
914 } else
915 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
916 MRI.getRegClass(NewVReg), &TRI);
917
918 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
919
920 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
921 "spill"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dumpMachineInstrRangeWithSlotIndex(std::next(
MI), MIS.end(), LIS, "spill"); } } while (false)
;
922 ++NumSpills;
923 if (IsRealSpill)
924 HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
925}
926
927/// spillAroundUses - insert spill code around each use of Reg.
928void InlineSpiller::spillAroundUses(unsigned Reg) {
929 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "spillAroundUses " << printReg
(Reg) << '\n'; } } while (false)
;
930 LiveInterval &OldLI = LIS.getInterval(Reg);
931
932 // Iterate over instructions using Reg.
933 for (MachineRegisterInfo::reg_bundle_iterator
934 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
935 RegI != E; ) {
936 MachineInstr *MI = &*(RegI++);
937
938 // Debug values are not allowed to affect codegen.
939 if (MI->isDebugValue()) {
940 // Modify DBG_VALUE now that the value is in a spill slot.
941 MachineBasicBlock *MBB = MI->getParent();
942 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Modifying debug info due to spill:\t"
<< *MI; } } while (false)
;
943 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
944 MBB->erase(MI);
945 continue;
946 }
947
948 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "(static_cast <bool> (!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 949, __extension__ __PRETTY_FUNCTION__))
949 "instruction that isn't a DBG_VALUE")(static_cast <bool> (!MI->isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE") ? void (0) : __assert_fail
("!MI->isDebugInstr() && \"Did not expect to find a use in debug \" \"instruction that isn't a DBG_VALUE\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 949, __extension__ __PRETTY_FUNCTION__))
;
950
951 // Ignore copies to/from snippets. We'll delete them.
952 if (SnippetCopies.count(MI))
953 continue;
954
955 // Stack slot accesses may coalesce away.
956 if (coalesceStackAccess(MI, Reg))
957 continue;
958
959 // Analyze instruction.
960 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
961 MIBundleOperands::VirtRegInfo RI =
962 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
963
964 // Find the slot index where this instruction reads and writes OldLI.
965 // This is usually the def slot, except for tied early clobbers.
966 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
967 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
968 if (SlotIndex::isSameInstr(Idx, VNI->def))
969 Idx = VNI->def;
970
971 // Check for a sibling copy.
972 unsigned SibReg = isFullCopyOf(*MI, Reg);
973 if (SibReg && isSibling(SibReg)) {
974 // This may actually be a copy between snippets.
975 if (isRegToSpill(SibReg)) {
976 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Found new snippet copy: " <<
*MI; } } while (false)
;
977 SnippetCopies.insert(MI);
978 continue;
979 }
980 if (RI.Writes) {
981 if (hoistSpillInsideBB(OldLI, *MI)) {
982 // This COPY is now dead, the value is already in the stack slot.
983 MI->getOperand(0).setIsDead();
984 DeadDefs.push_back(MI);
985 continue;
986 }
987 } else {
988 // This is a reload for a sib-reg copy. Drop spills downstream.
989 LiveInterval &SibLI = LIS.getInterval(SibReg);
990 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
991 // The COPY will fold to a reload below.
992 }
993 }
994
995 // Attempt to fold memory ops.
996 if (foldMemoryOperand(Ops))
997 continue;
998
999 // Create a new virtual register for spill/fill.
1000 // FIXME: Infer regclass from instruction alone.
1001 unsigned NewVReg = Edit->createFrom(Reg);
1002
1003 if (RI.Reads)
1004 insertReload(NewVReg, Idx, MI);
1005
1006 // Rewrite instruction operands.
1007 bool hasLiveDef = false;
1008 for (const auto &OpPair : Ops) {
1009 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1010 MO.setReg(NewVReg);
1011 if (MO.isUse()) {
1012 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1013 MO.setIsKill();
1014 } else {
1015 if (!MO.isDead())
1016 hasLiveDef = true;
1017 }
1018 }
1019 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\trewrite: " << Idx <<
'\t' << *MI << '\n'; } } while (false)
;
1020
1021 // FIXME: Use a second vreg if instruction has no tied ops.
1022 if (RI.Writes)
1023 if (hasLiveDef)
1024 insertSpill(NewVReg, true, MI);
1025 }
1026}
1027
1028/// spillAll - Spill all registers remaining after rematerialization.
1029void InlineSpiller::spillAll() {
1030 // Update LiveStacks now that we are committed to spilling.
1031 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1032 StackSlot = VRM.assignVirt2StackSlot(Original);
1033 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1034 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1035 } else
1036 StackInt = &LSS.getInterval(StackSlot);
1037
1038 if (Original != Edit->getReg())
1039 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1040
1041 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values")(static_cast <bool> (StackInt->getNumValNums() == 1 &&
"Bad stack interval values") ? void (0) : __assert_fail ("StackInt->getNumValNums() == 1 && \"Bad stack interval values\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1041, __extension__ __PRETTY_FUNCTION__))
;
1042 for (unsigned Reg : RegsToSpill)
1043 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1044 StackInt->getValNumInfo(0));
1045 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Merged spilled regs: " <<
*StackInt << '\n'; } } while (false)
;
1046
1047 // Spill around uses of all RegsToSpill.
1048 for (unsigned Reg : RegsToSpill)
1049 spillAroundUses(Reg);
1050
1051 // Hoisted spills may cause dead code.
1052 if (!DeadDefs.empty()) {
1053 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n"; } } while (false)
;
1054 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1055 }
1056
1057 // Finally delete the SnippetCopies.
1058 for (unsigned Reg : RegsToSpill) {
1059 for (MachineRegisterInfo::reg_instr_iterator
1060 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1061 RI != E; ) {
1062 MachineInstr &MI = *(RI++);
1063 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy")(static_cast <bool> (SnippetCopies.count(&MI) &&
"Remaining use wasn't a snippet copy") ? void (0) : __assert_fail
("SnippetCopies.count(&MI) && \"Remaining use wasn't a snippet copy\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1063, __extension__ __PRETTY_FUNCTION__))
;
1064 // FIXME: Do this with a LiveRangeEdit callback.
1065 LIS.RemoveMachineInstrFromMaps(MI);
1066 MI.eraseFromParent();
1067 }
1068 }
1069
1070 // Delete all spilled registers.
1071 for (unsigned Reg : RegsToSpill)
1072 Edit->eraseVirtReg(Reg);
1073}
1074
1075void InlineSpiller::spill(LiveRangeEdit &edit) {
1076 ++NumSpilledRanges;
1077 Edit = &edit;
1078 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())(static_cast <bool> (!TargetRegisterInfo::isStackSlot(edit
.getReg()) && "Trying to spill a stack slot.") ? void
(0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1079, __extension__ __PRETTY_FUNCTION__))
1079 && "Trying to spill a stack slot.")(static_cast <bool> (!TargetRegisterInfo::isStackSlot(edit
.getReg()) && "Trying to spill a stack slot.") ? void
(0) : __assert_fail ("!TargetRegisterInfo::isStackSlot(edit.getReg()) && \"Trying to spill a stack slot.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1079, __extension__ __PRETTY_FUNCTION__))
;
1080 // Share a stack slot among all descendants of Original.
1081 Original = VRM.getOriginal(edit.getReg());
1082 StackSlot = VRM.getStackSlot(Original);
1083 StackInt = nullptr;
1084
1085 LLVM_DEBUG(dbgs() << "Inline spilling "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1086 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1087 << ':' << edit.getParent() << "\nFrom original "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
1088 << printReg(Original) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Inline spilling " << TRI
.getRegClassName(MRI.getRegClass(edit.getReg())) << ':'
<< edit.getParent() << "\nFrom original " <<
printReg(Original) << '\n'; } } while (false)
;
1089 assert(edit.getParent().isSpillable() &&(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1090, __extension__ __PRETTY_FUNCTION__))
1090 "Attempting to spill already spilled value.")(static_cast <bool> (edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.") ? void (0) : __assert_fail
("edit.getParent().isSpillable() && \"Attempting to spill already spilled value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1090, __extension__ __PRETTY_FUNCTION__))
;
1091 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs")(static_cast <bool> (DeadDefs.empty() && "Previous spill didn't remove dead defs"
) ? void (0) : __assert_fail ("DeadDefs.empty() && \"Previous spill didn't remove dead defs\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1091, __extension__ __PRETTY_FUNCTION__))
;
1092
1093 collectRegsToSpill();
1094 reMaterializeAll();
1095
1096 // Remat may handle everything.
1097 if (!RegsToSpill.empty())
1098 spillAll();
1099
1100 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1101}
1102
1103/// Optimizations after all the reg selections and spills are done.
1104void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1
Calling 'HoistSpillHelper::hoistAllSpills'
1105
1106/// When a spill is inserted, add the spill to MergeableSpills map.
1107void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1108 unsigned Original) {
1109 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1110 LiveInterval &OrigLI = LIS.getInterval(Original);
1111 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1112 // LiveInterval may be cleared after all its references are spilled.
1113 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1114 auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1115 LI->assign(OrigLI, Allocator);
1116 StackSlotToOrigLI[StackSlot] = std::move(LI);
1117 }
1118 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1119 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1120 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1121 MergeableSpills[MIdx].insert(&Spill);
1122}
1123
1124/// When a spill is removed, remove the spill from MergeableSpills map.
1125/// Return true if the spill is removed successfully.
1126bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1127 int StackSlot) {
1128 auto It = StackSlotToOrigLI.find(StackSlot);
1129 if (It == StackSlotToOrigLI.end())
1130 return false;
1131 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1132 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1133 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1134 return MergeableSpills[MIdx].erase(&Spill);
1135}
1136
1137/// Check BB to see if it is a possible target BB to place a hoisted spill,
1138/// i.e., there should be a living sibling of OrigReg at the insert point.
1139bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1140 MachineBasicBlock &BB, unsigned &LiveReg) {
1141 SlotIndex Idx;
1142 unsigned OrigReg = OrigLI.reg;
1143 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1144 if (MI != BB.end())
1145 Idx = LIS.getInstructionIndex(*MI);
1146 else
1147 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1148 SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1149 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI")(static_cast <bool> (OrigLI.getVNInfoAt(Idx) == &OrigVNI
&& "Unexpected VNI") ? void (0) : __assert_fail ("OrigLI.getVNInfoAt(Idx) == &OrigVNI && \"Unexpected VNI\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1149, __extension__ __PRETTY_FUNCTION__))
;
1150
1151 for (auto const SibReg : Siblings) {
1152 LiveInterval &LI = LIS.getInterval(SibReg);
1153 VNInfo *VNI = LI.getVNInfoAt(Idx);
1154 if (VNI) {
1155 LiveReg = SibReg;
1156 return true;
1157 }
1158 }
1159 return false;
1160}
1161
1162/// Remove redundant spills in the same BB. Save those redundant spills in
1163/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1164void HoistSpillHelper::rmRedundantSpills(
1165 SmallPtrSet<MachineInstr *, 16> &Spills,
1166 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1167 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1168 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1169 // another spill inside. If a BB contains more than one spill, only keep the
1170 // earlier spill with smaller SlotIndex.
1171 for (const auto CurrentSpill : Spills) {
1172 MachineBasicBlock *Block = CurrentSpill->getParent();
1173 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1174 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1175 if (PrevSpill) {
1176 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1177 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1178 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1179 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1180 SpillsToRm.push_back(SpillToRm);
1181 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1182 } else {
1183 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1184 }
1185 }
1186 for (const auto SpillToRm : SpillsToRm)
1187 Spills.erase(SpillToRm);
1188}
1189
1190/// Starting from \p Root find a top-down traversal order of the dominator
1191/// tree to visit all basic blocks containing the elements of \p Spills.
1192/// Redundant spills will be found and put into \p SpillsToRm at the same
1193/// time. \p SpillBBToSpill will be populated as part of the process and
1194/// maps a basic block to the first store occurring in the basic block.
1195/// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1196void HoistSpillHelper::getVisitOrders(
1197 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1198 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1199 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1200 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1201 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1202 // The set contains all the possible BB nodes to which we may hoist
1203 // original spills.
1204 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1205 // Save the BB nodes on the path from the first BB node containing
1206 // non-redundant spill to the Root node.
1207 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1208 // All the spills to be hoisted must originate from a single def instruction
1209 // to the OrigReg. It means the def instruction should dominate all the spills
1210 // to be hoisted. We choose the BB where the def instruction is located as
1211 // the Root.
1212 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1213 // For every node on the dominator tree with spill, walk up on the dominator
1214 // tree towards the Root node until it is reached. If there is other node
1215 // containing spill in the middle of the path, the previous spill saw will
1216 // be redundant and the node containing it will be removed. All the nodes on
1217 // the path starting from the first node with non-redundant spill to the Root
1218 // node will be added to the WorkSet, which will contain all the possible
1219 // locations where spills may be hoisted to after the loop below is done.
1220 for (const auto Spill : Spills) {
1221 MachineBasicBlock *Block = Spill->getParent();
1222 MachineDomTreeNode *Node = MDT[Block];
7
Calling 'MachineDominatorTree::operator[]'
24
Returning from 'MachineDominatorTree::operator[]'
25
'Node' initialized here
1223 MachineInstr *SpillToRm = nullptr;
1224 while (Node != RootIDomNode) {
26
Assuming 'Node' is not equal to 'RootIDomNode'
27
Loop condition is true. Entering loop body
1225 // If Node dominates Block, and it already contains a spill, the spill in
1226 // Block will be redundant.
1227 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
28
Assuming the condition is false
29
Assuming pointer value is null
1228 SpillToRm = SpillBBToSpill[MDT[Block]];
1229 break;
1230 /// If we see the Node already in WorkSet, the path from the Node to
1231 /// the Root node must already be traversed by another spill.
1232 /// Then no need to repeat.
1233 } else if (WorkSet.count(Node)) {
30
Assuming the condition is false
31
Taking false branch
1234 break;
1235 } else {
1236 NodesOnPath.insert(Node);
1237 }
1238 Node = Node->getIDom();
32
Called C++ object pointer is null
1239 }
1240 if (SpillToRm) {
1241 SpillsToRm.push_back(SpillToRm);
1242 } else {
1243 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1244 // set the initial status before hoisting start. The value of BBs
1245 // containing original spills is set to 0, in order to descriminate
1246 // with BBs containing hoisted spills which will be inserted to
1247 // SpillsToKeep later during hoisting.
1248 SpillsToKeep[MDT[Block]] = 0;
1249 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1250 }
1251 NodesOnPath.clear();
1252 }
1253
1254 // Sort the nodes in WorkSet in top-down order and save the nodes
1255 // in Orders. Orders will be used for hoisting in runHoistSpills.
1256 unsigned idx = 0;
1257 Orders.push_back(MDT.getBase().getNode(Root));
1258 do {
1259 MachineDomTreeNode *Node = Orders[idx++];
1260 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1261 unsigned NumChildren = Children.size();
1262 for (unsigned i = 0; i != NumChildren; ++i) {
1263 MachineDomTreeNode *Child = Children[i];
1264 if (WorkSet.count(Child))
1265 Orders.push_back(Child);
1266 }
1267 } while (idx != Orders.size());
1268 assert(Orders.size() == WorkSet.size() &&(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1269, __extension__ __PRETTY_FUNCTION__))
1269 "Orders have different size with WorkSet")(static_cast <bool> (Orders.size() == WorkSet.size() &&
"Orders have different size with WorkSet") ? void (0) : __assert_fail
("Orders.size() == WorkSet.size() && \"Orders have different size with WorkSet\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1269, __extension__ __PRETTY_FUNCTION__))
;
1270
1271#ifndef NDEBUG
1272 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Orders size is " << Orders
.size() << "\n"; } } while (false)
;
1273 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1274 for (; RIt != Orders.rend(); RIt++)
1275 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "BB" << (*RIt)->getBlock
()->getNumber() << ","; } } while (false)
;
1276 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "\n"; } } while (false)
;
1277#endif
1278}
1279
1280/// Try to hoist spills according to BB hotness. The spills to removed will
1281/// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1282/// \p SpillsToIns.
1283void HoistSpillHelper::runHoistSpills(
1284 LiveInterval &OrigLI, VNInfo &OrigVNI,
1285 SmallPtrSet<MachineInstr *, 16> &Spills,
1286 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1287 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1288 // Visit order of dominator tree nodes.
1289 SmallVector<MachineDomTreeNode *, 32> Orders;
1290 // SpillsToKeep contains all the nodes where spills are to be inserted
1291 // during hoisting. If the spill to be inserted is an original spill
1292 // (not a hoisted one), the value of the map entry is 0. If the spill
1293 // is a hoisted spill, the value of the map entry is the VReg to be used
1294 // as the source of the spill.
1295 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1296 // Map from BB to the first spill inside of it.
1297 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1298
1299 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1300
1301 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1302 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
6
Calling 'HoistSpillHelper::getVisitOrders'
1303 SpillBBToSpill);
1304
1305 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1306 // nodes set and the cost of all the spills inside those nodes.
1307 // The nodes set are the locations where spills are to be inserted
1308 // in the subtree of current node.
1309 using NodesCostPair =
1310 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1311 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1312
1313 // Iterate Orders set in reverse order, which will be a bottom-up order
1314 // in the dominator tree. Once we visit a dom tree node, we know its
1315 // children have already been visited and the spill locations in the
1316 // subtrees of all the children have been determined.
1317 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1318 for (; RIt != Orders.rend(); RIt++) {
1319 MachineBasicBlock *Block = (*RIt)->getBlock();
1320
1321 // If Block contains an original spill, simply continue.
1322 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1323 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1324 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1325 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1326 continue;
1327 }
1328
1329 // Collect spills in subtree of current node (*RIt) to
1330 // SpillsInSubTreeMap[*RIt].first.
1331 const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1332 unsigned NumChildren = Children.size();
1333 for (unsigned i = 0; i != NumChildren; ++i) {
1334 MachineDomTreeNode *Child = Children[i];
1335 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1336 continue;
1337 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1338 // should be placed before getting the begin and end iterators of
1339 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1340 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1341 // and the map grows and then the original buckets in the map are moved.
1342 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1343 SpillsInSubTreeMap[*RIt].first;
1344 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1345 SubTreeCost += SpillsInSubTreeMap[Child].second;
1346 auto BI = SpillsInSubTreeMap[Child].first.begin();
1347 auto EI = SpillsInSubTreeMap[Child].first.end();
1348 SpillsInSubTree.insert(BI, EI);
1349 SpillsInSubTreeMap.erase(Child);
1350 }
1351
1352 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1353 SpillsInSubTreeMap[*RIt].first;
1354 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1355 // No spills in subtree, simply continue.
1356 if (SpillsInSubTree.empty())
1357 continue;
1358
1359 // Check whether Block is a possible candidate to insert spill.
1360 unsigned LiveReg = 0;
1361 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1362 continue;
1363
1364 // If there are multiple spills that could be merged, bias a little
1365 // to hoist the spill.
1366 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1367 ? BranchProbability(9, 10)
1368 : BranchProbability(1, 1);
1369 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1370 // Hoist: Move spills to current Block.
1371 for (const auto SpillBB : SpillsInSubTree) {
1372 // When SpillBB is a BB contains original spill, insert the spill
1373 // to SpillsToRm.
1374 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1375 !SpillsToKeep[SpillBB]) {
1376 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1377 SpillsToRm.push_back(SpillToRm);
1378 }
1379 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1380 SpillsToKeep.erase(SpillBB);
1381 }
1382 // Current Block is the BB containing the new hoisted spill. Add it to
1383 // SpillsToKeep. LiveReg is the source of the new spill.
1384 SpillsToKeep[*RIt] = LiveReg;
1385 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1386 dbgs() << "spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1387 for (const auto Rspill : SpillsInSubTree)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1388 dbgs() << Rspill->getBlock()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1389 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1390 << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
1391 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "spills in BB: "; for (const
auto Rspill : SpillsInSubTree) dbgs() << Rspill->getBlock
()->getNumber() << " "; dbgs() << "were promoted to BB"
<< (*RIt)->getBlock()->getNumber() << "\n"
; }; } } while (false)
;
1392 SpillsInSubTree.clear();
1393 SpillsInSubTree.insert(*RIt);
1394 SubTreeCost = MBFI.getBlockFreq(Block);
1395 }
1396 }
1397 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1398 // save them to SpillsToIns.
1399 for (const auto Ent : SpillsToKeep) {
1400 if (Ent.second)
1401 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1402 }
1403}
1404
1405/// For spills with equal values, remove redundant spills and hoist those left
1406/// to less hot spots.
1407///
1408/// Spills with equal values will be collected into the same set in
1409/// MergeableSpills when spill is inserted. These equal spills are originated
1410/// from the same defining instruction and are dominated by the instruction.
1411/// Before hoisting all the equal spills, redundant spills inside in the same
1412/// BB are first marked to be deleted. Then starting from the spills left, walk
1413/// up on the dominator tree towards the Root node where the define instruction
1414/// is located, mark the dominated spills to be deleted along the way and
1415/// collect the BB nodes on the path from non-dominated spills to the define
1416/// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1417/// where we are considering to hoist the spills. We iterate the WorkSet in
1418/// bottom-up order, and for each node, we will decide whether to hoist spills
1419/// inside its subtree to that node. In this way, we can get benefit locally
1420/// even if hoisting all the equal spills to one cold place is impossible.
1421void HoistSpillHelper::hoistAllSpills() {
1422 SmallVector<unsigned, 4> NewVRegs;
1423 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1424
1425 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
2
Assuming 'i' is equal to 'e'
3
Loop condition is false. Execution continues on line 1433
1426 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1427 unsigned Original = VRM.getPreSplitReg(Reg);
1428 if (!MRI.def_empty(Reg))
1429 Virt2SiblingsMap[Original].insert(Reg);
1430 }
1431
1432 // Each entry in MergeableSpills contains a spill set with equal values.
1433 for (auto &Ent : MergeableSpills) {
1434 int Slot = Ent.first.first;
1435 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1436 VNInfo *OrigVNI = Ent.first.second;
1437 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1438 if (Ent.second.empty())
4
Taking false branch
1439 continue;
1440
1441 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1442 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1443 << "Equal spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1444 for (const auto spill : EqValSpills)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1445 dbgs() << spill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1446 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
1447 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "\nFor Slot" << Slot <<
" and VN" << OrigVNI->id << ":\n" << "Equal spills in BB: "
; for (const auto spill : EqValSpills) dbgs() << spill->
getParent()->getNumber() << " "; dbgs() << "\n"
; }; } } while (false)
;
1448
1449 // SpillsToRm is the spill set to be removed from EqValSpills.
1450 SmallVector<MachineInstr *, 16> SpillsToRm;
1451 // SpillsToIns is the spill set to be newly inserted after hoisting.
1452 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1453
1454 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
5
Calling 'HoistSpillHelper::runHoistSpills'
1455
1456 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1457 dbgs() << "Finally inserted spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1458 for (const auto Ispill : SpillsToIns)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1459 dbgs() << Ispill.first->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1460 dbgs() << "\nFinally removed spills in BB: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1461 for (const auto Rspill : SpillsToRm)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1462 dbgs() << Rspill->getParent()->getNumber() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1463 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
1464 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { { dbgs() << "Finally inserted spills in BB: "
; for (const auto Ispill : SpillsToIns) dbgs() << Ispill
.first->getNumber() << " "; dbgs() << "\nFinally removed spills in BB: "
; for (const auto Rspill : SpillsToRm) dbgs() << Rspill
->getParent()->getNumber() << " "; dbgs() <<
"\n"; }; } } while (false)
;
1465
1466 // Stack live range update.
1467 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1468 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1469 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1470 StackIntvl.getValNumInfo(0));
1471
1472 // Insert hoisted spills.
1473 for (auto const Insert : SpillsToIns) {
1474 MachineBasicBlock *BB = Insert.first;
1475 unsigned LiveReg = Insert.second;
1476 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1477 TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1478 MRI.getRegClass(LiveReg), &TRI);
1479 LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1480 ++NumSpills;
1481 }
1482
1483 // Remove redundant spills or change them to dead instructions.
1484 NumSpills -= SpillsToRm.size();
1485 for (auto const RMEnt : SpillsToRm) {
1486 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1487 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1488 MachineOperand &MO = RMEnt->getOperand(i - 1);
1489 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1490 RMEnt->RemoveOperand(i - 1);
1491 }
1492 }
1493 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1494 }
1495}
1496
1497/// For VirtReg clone, the \p New register should have the same physreg or
1498/// stackslot as the \p old register.
1499void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1500 if (VRM.hasPhys(Old))
1501 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1502 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1503 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1504 else
1505 llvm_unreachable("VReg should be assigned either physreg or stackslot")::llvm::llvm_unreachable_internal("VReg should be assigned either physreg or stackslot"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/InlineSpiller.cpp"
, 1505)
;
1506}

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/CodeGen/MachineDominators.h

1//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
11// but for target-specific code rather than target-independent IR.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
16#define LLVM_CODEGEN_MACHINEDOMINATORS_H
17
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/CodeGen/MachineBasicBlock.h"
21#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/Support/GenericDomTree.h"
24#include "llvm/Support/GenericDomTreeConstruction.h"
25#include <cassert>
26#include <memory>
27#include <vector>
28
29namespace llvm {
30
31template <>
32inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
33 MachineBasicBlock *MBB) {
34 this->Roots.push_back(MBB);
35}
36
37extern template class DomTreeNodeBase<MachineBasicBlock>;
38extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
39extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
40
41using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
42
43//===-------------------------------------
44/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
45/// compute a normal dominator tree.
46///
47class MachineDominatorTree : public MachineFunctionPass {
48 /// Helper structure used to hold all the basic blocks
49 /// involved in the split of a critical edge.
50 struct CriticalEdge {
51 MachineBasicBlock *FromBB;
52 MachineBasicBlock *ToBB;
53 MachineBasicBlock *NewBB;
54 };
55
56 /// Pile up all the critical edges to be split.
57 /// The splitting of a critical edge is local and thus, it is possible
58 /// to apply several of those changes at the same time.
59 mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
60
61 /// Remember all the basic blocks that are inserted during
62 /// edge splitting.
63 /// Invariant: NewBBs == all the basic blocks contained in the NewBB
64 /// field of all the elements of CriticalEdgesToSplit.
65 /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
66 /// such as BB == elt.NewBB.
67 mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
68
69 /// The DominatorTreeBase that is used to compute a normal dominator tree
70 std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
71
72 /// Apply all the recorded critical edges to the DT.
73 /// This updates the underlying DT information in a way that uses
74 /// the fast query path of DT as much as possible.
75 ///
76 /// \post CriticalEdgesToSplit.empty().
77 void applySplitCriticalEdges() const;
78
79public:
80 static char ID; // Pass ID, replacement for typeid
81
82 MachineDominatorTree();
83
84 DomTreeBase<MachineBasicBlock> &getBase() {
85 if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
86 applySplitCriticalEdges();
87 return *DT;
88 }
89
90 void getAnalysisUsage(AnalysisUsage &AU) const override;
91
92 /// getRoots - Return the root blocks of the current CFG. This may include
93 /// multiple blocks if we are computing post dominators. For forward
94 /// dominators, this will always be a single block (the entry node).
95 ///
96 inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
97 applySplitCriticalEdges();
98 return DT->getRoots();
99 }
100
101 inline MachineBasicBlock *getRoot() const {
102 applySplitCriticalEdges();
103 return DT->getRoot();
104 }
105
106 inline MachineDomTreeNode *getRootNode() const {
107 applySplitCriticalEdges();
108 return DT->getRootNode();
109 }
110
111 bool runOnMachineFunction(MachineFunction &F) override;
112
113 inline bool dominates(const MachineDomTreeNode* A,
114 const MachineDomTreeNode* B) const {
115 applySplitCriticalEdges();
116 return DT->dominates(A, B);
117 }
118
119 inline bool dominates(const MachineBasicBlock* A,
120 const MachineBasicBlock* B) const {
121 applySplitCriticalEdges();
122 return DT->dominates(A, B);
123 }
124
125 // dominates - Return true if A dominates B. This performs the
126 // special checks necessary if A and B are in the same basic block.
127 bool dominates(const MachineInstr *A, const MachineInstr *B) const {
128 applySplitCriticalEdges();
129 const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
130 if (BBA != BBB) return DT->dominates(BBA, BBB);
131
132 // Loop through the basic block until we find A or B.
133 MachineBasicBlock::const_iterator I = BBA->begin();
134 for (; &*I != A && &*I != B; ++I)
135 /*empty*/ ;
136
137 //if(!DT.IsPostDominators) {
138 // A dominates B if it is found first in the basic block.
139 return &*I == A;
140 //} else {
141 // // A post-dominates B if B is found first in the basic block.
142 // return &*I == B;
143 //}
144 }
145
146 inline bool properlyDominates(const MachineDomTreeNode* A,
147 const MachineDomTreeNode* B) const {
148 applySplitCriticalEdges();
149 return DT->properlyDominates(A, B);
150 }
151
152 inline bool properlyDominates(const MachineBasicBlock* A,
153 const MachineBasicBlock* B) const {
154 applySplitCriticalEdges();
155 return DT->properlyDominates(A, B);
156 }
157
158 /// findNearestCommonDominator - Find nearest common dominator basic block
159 /// for basic block A and B. If there is no such block then return NULL.
160 inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
161 MachineBasicBlock *B) {
162 applySplitCriticalEdges();
163 return DT->findNearestCommonDominator(A, B);
164 }
165
166 inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
167 applySplitCriticalEdges();
168 return DT->getNode(BB);
8
Calling 'DominatorTreeBase::getNode'
23
Returning from 'DominatorTreeBase::getNode'
169 }
170
171 /// getNode - return the (Post)DominatorTree node for the specified basic
172 /// block. This is the same as using operator[] on this class.
173 ///
174 inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
175 applySplitCriticalEdges();
176 return DT->getNode(BB);
177 }
178
179 /// addNewBlock - Add a new node to the dominator tree information. This
180 /// creates a new node as a child of DomBB dominator node,linking it into
181 /// the children list of the immediate dominator.
182 inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
183 MachineBasicBlock *DomBB) {
184 applySplitCriticalEdges();
185 return DT->addNewBlock(BB, DomBB);
186 }
187
188 /// changeImmediateDominator - This method is used to update the dominator
189 /// tree information when a node's immediate dominator changes.
190 ///
191 inline void changeImmediateDominator(MachineBasicBlock *N,
192 MachineBasicBlock* NewIDom) {
193 applySplitCriticalEdges();
194 DT->changeImmediateDominator(N, NewIDom);
195 }
196
197 inline void changeImmediateDominator(MachineDomTreeNode *N,
198 MachineDomTreeNode* NewIDom) {
199 applySplitCriticalEdges();
200 DT->changeImmediateDominator(N, NewIDom);
201 }
202
203 /// eraseNode - Removes a node from the dominator tree. Block must not
204 /// dominate any other blocks. Removes node from its immediate dominator's
205 /// children list. Deletes dominator node associated with basic block BB.
206 inline void eraseNode(MachineBasicBlock *BB) {
207 applySplitCriticalEdges();
208 DT->eraseNode(BB);
209 }
210
211 /// splitBlock - BB is split and now it has one successor. Update dominator
212 /// tree to reflect this change.
213 inline void splitBlock(MachineBasicBlock* NewBB) {
214 applySplitCriticalEdges();
215 DT->splitBlock(NewBB);
216 }
217
218 /// isReachableFromEntry - Return true if A is dominated by the entry
219 /// block of the function containing it.
220 bool isReachableFromEntry(const MachineBasicBlock *A) {
221 applySplitCriticalEdges();
222 return DT->isReachableFromEntry(A);
223 }
224
225 void releaseMemory() override;
226
227 void verifyAnalysis() const override;
228
229 void print(raw_ostream &OS, const Module*) const override;
230
231 /// Record that the critical edge (FromBB, ToBB) has been
232 /// split with NewBB.
233 /// This is best to use this method instead of directly update the
234 /// underlying information, because this helps mitigating the
235 /// number of time the DT information is invalidated.
236 ///
237 /// \note Do not use this method with regular edges.
238 ///
239 /// \note To benefit from the compile time improvement incurred by this
240 /// method, the users of this method have to limit the queries to the DT
241 /// interface between two edges splitting. In other words, they have to
242 /// pack the splitting of critical edges as much as possible.
243 void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
244 MachineBasicBlock *ToBB,
245 MachineBasicBlock *NewBB) {
246 bool Inserted = NewBBs.insert(NewBB).second;
247 (void)Inserted;
248 assert(Inserted &&(static_cast <bool> (Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? void (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/CodeGen/MachineDominators.h"
, 249, __extension__ __PRETTY_FUNCTION__))
249 "A basic block inserted via edge splitting cannot appear twice")(static_cast <bool> (Inserted && "A basic block inserted via edge splitting cannot appear twice"
) ? void (0) : __assert_fail ("Inserted && \"A basic block inserted via edge splitting cannot appear twice\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/CodeGen/MachineDominators.h"
, 249, __extension__ __PRETTY_FUNCTION__))
;
250 CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
251 }
252};
253
254//===-------------------------------------
255/// DominatorTree GraphTraits specialization so the DominatorTree can be
256/// iterable by generic graph iterators.
257///
258
259template <class Node, class ChildIterator>
260struct MachineDomTreeGraphTraitsBase {
261 using NodeRef = Node *;
262 using ChildIteratorType = ChildIterator;
263
264 static NodeRef getEntryNode(NodeRef N) { return N; }
265 static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
266 static ChildIteratorType child_end(NodeRef N) { return N->end(); }
267};
268
269template <class T> struct GraphTraits;
270
271template <>
272struct GraphTraits<MachineDomTreeNode *>
273 : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
274 MachineDomTreeNode::iterator> {};
275
276template <>
277struct GraphTraits<const MachineDomTreeNode *>
278 : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
279 MachineDomTreeNode::const_iterator> {
280};
281
282template <> struct GraphTraits<MachineDominatorTree*>
283 : public GraphTraits<MachineDomTreeNode *> {
284 static NodeRef getEntryNode(MachineDominatorTree *DT) {
285 return DT->getRootNode();
286 }
287};
288
289} // end namespace llvm
290
291#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h

1//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file defines a set of templates that efficiently compute a dominator
12/// tree over a generic graph. This is used typically in LLVM for fast
13/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
14/// graph types.
15///
16/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
17/// on the graph's NodeRef. The NodeRef should be a pointer and,
18/// NodeRef->getParent() must return the parent node that is also a pointer.
19///
20/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
21///
22//===----------------------------------------------------------------------===//
23
24#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
25#define LLVM_SUPPORT_GENERICDOMTREE_H
26
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <iterator>
31#include <memory>
32#include <type_traits>
33#include <utility>
34#include <vector>
35#include "llvm/ADT/DenseMap.h"
36#include "llvm/ADT/GraphTraits.h"
37#include "llvm/ADT/PointerIntPair.h"
38#include "llvm/ADT/STLExtras.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Support/raw_ostream.h"
42
43namespace llvm {
44
45template <typename NodeT, bool IsPostDom>
46class DominatorTreeBase;
47
48namespace DomTreeBuilder {
49template <typename DomTreeT>
50struct SemiNCAInfo;
51} // namespace DomTreeBuilder
52
53/// Base class for the actual dominator tree node.
54template <class NodeT> class DomTreeNodeBase {
55 friend class PostDominatorTree;
56 friend class DominatorTreeBase<NodeT, false>;
57 friend class DominatorTreeBase<NodeT, true>;
58 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
59 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
60
61 NodeT *TheBB;
62 DomTreeNodeBase *IDom;
63 unsigned Level;
64 std::vector<DomTreeNodeBase *> Children;
65 mutable unsigned DFSNumIn = ~0;
66 mutable unsigned DFSNumOut = ~0;
67
68 public:
69 DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
70 : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}
71
72 using iterator = typename std::vector<DomTreeNodeBase *>::iterator;
73 using const_iterator =
74 typename std::vector<DomTreeNodeBase *>::const_iterator;
75
76 iterator begin() { return Children.begin(); }
77 iterator end() { return Children.end(); }
78 const_iterator begin() const { return Children.begin(); }
79 const_iterator end() const { return Children.end(); }
80
81 NodeT *getBlock() const { return TheBB; }
82 DomTreeNodeBase *getIDom() const { return IDom; }
83 unsigned getLevel() const { return Level; }
84
85 const std::vector<DomTreeNodeBase *> &getChildren() const { return Children; }
86
87 std::unique_ptr<DomTreeNodeBase> addChild(
88 std::unique_ptr<DomTreeNodeBase> C) {
89 Children.push_back(C.get());
90 return C;
91 }
92
93 size_t getNumChildren() const { return Children.size(); }
94
95 void clearAllChildren() { Children.clear(); }
96
97 bool compare(const DomTreeNodeBase *Other) const {
98 if (getNumChildren() != Other->getNumChildren())
99 return true;
100
101 if (Level != Other->Level) return true;
102
103 SmallPtrSet<const NodeT *, 4> OtherChildren;
104 for (const DomTreeNodeBase *I : *Other) {
105 const NodeT *Nd = I->getBlock();
106 OtherChildren.insert(Nd);
107 }
108
109 for (const DomTreeNodeBase *I : *this) {
110 const NodeT *N = I->getBlock();
111 if (OtherChildren.count(N) == 0)
112 return true;
113 }
114 return false;
115 }
116
117 void setIDom(DomTreeNodeBase *NewIDom) {
118 assert(IDom && "No immediate dominator?")(static_cast <bool> (IDom && "No immediate dominator?"
) ? void (0) : __assert_fail ("IDom && \"No immediate dominator?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119 if (IDom == NewIDom) return;
120
121 auto I = find(IDom->Children, this);
122 assert(I != IDom->Children.end() &&(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 123, __extension__ __PRETTY_FUNCTION__))
123 "Not in immediate dominator children set!")(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 123, __extension__ __PRETTY_FUNCTION__))
;
124 // I am no longer your child...
125 IDom->Children.erase(I);
126
127 // Switch to new dominator
128 IDom = NewIDom;
129 IDom->Children.push_back(this);
130
131 UpdateLevel();
132 }
133
134 /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
135 /// in the dominator tree. They are only guaranteed valid if
136 /// updateDFSNumbers() has been called.
137 unsigned getDFSNumIn() const { return DFSNumIn; }
138 unsigned getDFSNumOut() const { return DFSNumOut; }
139
140private:
141 // Return true if this node is dominated by other. Use this only if DFS info
142 // is valid.
143 bool DominatedBy(const DomTreeNodeBase *other) const {
144 return this->DFSNumIn >= other->DFSNumIn &&
145 this->DFSNumOut <= other->DFSNumOut;
146 }
147
148 void UpdateLevel() {
149 assert(IDom)(static_cast <bool> (IDom) ? void (0) : __assert_fail (
"IDom", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 149, __extension__ __PRETTY_FUNCTION__))
;
150 if (Level == IDom->Level + 1) return;
151
152 SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};
153
154 while (!WorkStack.empty()) {
155 DomTreeNodeBase *Current = WorkStack.pop_back_val();
156 Current->Level = Current->IDom->Level + 1;
157
158 for (DomTreeNodeBase *C : *Current) {
159 assert(C->IDom)(static_cast <bool> (C->IDom) ? void (0) : __assert_fail
("C->IDom", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 159, __extension__ __PRETTY_FUNCTION__))
;
160 if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
161 }
162 }
163 }
164};
165
166template <class NodeT>
167raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
168 if (Node->getBlock())
169 Node->getBlock()->printAsOperand(O, false);
170 else
171 O << " <<exit node>>";
172
173 O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
174 << Node->getLevel() << "]\n";
175
176 return O;
177}
178
179template <class NodeT>
180void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
181 unsigned Lev) {
182 O.indent(2 * Lev) << "[" << Lev << "] " << N;
183 for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
184 E = N->end();
185 I != E; ++I)
186 PrintDomTree<NodeT>(*I, O, Lev + 1);
187}
188
189namespace DomTreeBuilder {
190// The routines below are provided in a separate header but referenced here.
191template <typename DomTreeT>
192void Calculate(DomTreeT &DT);
193
194template <typename DomTreeT>
195void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
196 typename DomTreeT::NodePtr To);
197
198template <typename DomTreeT>
199void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
200 typename DomTreeT::NodePtr To);
201
202// UpdateKind and Update are used by the batch update API and it's easiest to
203// define them here.
204enum class UpdateKind : unsigned char { Insert, Delete };
205
206template <typename NodePtr>
207struct Update {
208 using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
209
210 NodePtr From;
211 NodeKindPair ToAndKind;
212
213 Update(UpdateKind Kind, NodePtr From, NodePtr To)
214 : From(From), ToAndKind(To, Kind) {}
215
216 UpdateKind getKind() const { return ToAndKind.getInt(); }
217 NodePtr getFrom() const { return From; }
218 NodePtr getTo() const { return ToAndKind.getPointer(); }
219 bool operator==(const Update &RHS) const {
220 return From == RHS.From && ToAndKind == RHS.ToAndKind;
221 }
222
223 friend raw_ostream &operator<<(raw_ostream &OS, const Update &U) {
224 OS << (U.getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
225 U.getFrom()->printAsOperand(OS, false);
226 OS << " -> ";
227 U.getTo()->printAsOperand(OS, false);
228 return OS;
229 }
230};
231
232template <typename DomTreeT>
233void ApplyUpdates(DomTreeT &DT,
234 ArrayRef<typename DomTreeT::UpdateType> Updates);
235
236template <typename DomTreeT>
237bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
238} // namespace DomTreeBuilder
239
240/// Core dominator tree base class.
241///
242/// This class is a generic template over graph nodes. It is instantiated for
243/// various graphs in the LLVM IR or in the code generator.
244template <typename NodeT, bool IsPostDom>
245class DominatorTreeBase {
246 public:
247 static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
248 "Currently DominatorTreeBase supports only pointer nodes");
249 using NodeType = NodeT;
250 using NodePtr = NodeT *;
251 using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
252 static_assert(std::is_pointer<ParentPtr>::value,
253 "Currently NodeT's parent must be a pointer type");
254 using ParentType = typename std::remove_pointer<ParentPtr>::type;
255 static constexpr bool IsPostDominator = IsPostDom;
256
257 using UpdateType = DomTreeBuilder::Update<NodePtr>;
258 using UpdateKind = DomTreeBuilder::UpdateKind;
259 static constexpr UpdateKind Insert = UpdateKind::Insert;
260 static constexpr UpdateKind Delete = UpdateKind::Delete;
261
262 enum class VerificationLevel { Fast, Basic, Full };
263
264protected:
265 // Dominators always have a single root, postdominators can have more.
266 SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;
267
268 using DomTreeNodeMapType =
269 DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
270 DomTreeNodeMapType DomTreeNodes;
271 DomTreeNodeBase<NodeT> *RootNode;
272 ParentPtr Parent = nullptr;
273
274 mutable bool DFSInfoValid = false;
275 mutable unsigned int SlowQueries = 0;
276
277 friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
278
279 public:
280 DominatorTreeBase() {}
281
282 DominatorTreeBase(DominatorTreeBase &&Arg)
283 : Roots(std::move(Arg.Roots)),
284 DomTreeNodes(std::move(Arg.DomTreeNodes)),
285 RootNode(Arg.RootNode),
286 Parent(Arg.Parent),
287 DFSInfoValid(Arg.DFSInfoValid),
288 SlowQueries(Arg.SlowQueries) {
289 Arg.wipe();
290 }
291
292 DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
293 Roots = std::move(RHS.Roots);
294 DomTreeNodes = std::move(RHS.DomTreeNodes);
295 RootNode = RHS.RootNode;
296 Parent = RHS.Parent;
297 DFSInfoValid = RHS.DFSInfoValid;
298 SlowQueries = RHS.SlowQueries;
299 RHS.wipe();
300 return *this;
301 }
302
303 DominatorTreeBase(const DominatorTreeBase &) = delete;
304 DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
305
306 /// getRoots - Return the root blocks of the current CFG. This may include
307 /// multiple blocks if we are computing post dominators. For forward
308 /// dominators, this will always be a single block (the entry node).
309 ///
310 const SmallVectorImpl<NodeT *> &getRoots() const { return Roots; }
311
312 /// isPostDominator - Returns true if analysis based of postdoms
313 ///
314 bool isPostDominator() const { return IsPostDominator; }
315
316 /// compare - Return false if the other dominator tree base matches this
317 /// dominator tree base. Otherwise return true.
318 bool compare(const DominatorTreeBase &Other) const {
319 if (Parent != Other.Parent) return true;
320
321 if (Roots.size() != Other.Roots.size())
322 return true;
323
324 if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
325 return true;
326
327 const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
328 if (DomTreeNodes.size() != OtherDomTreeNodes.size())
329 return true;
330
331 for (const auto &DomTreeNode : DomTreeNodes) {
332 NodeT *BB = DomTreeNode.first;
333 typename DomTreeNodeMapType::const_iterator OI =
334 OtherDomTreeNodes.find(BB);
335 if (OI == OtherDomTreeNodes.end())
336 return true;
337
338 DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
339 DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
340
341 if (MyNd.compare(&OtherNd))
342 return true;
343 }
344
345 return false;
346 }
347
348 void releaseMemory() { reset(); }
349
350 /// getNode - return the (Post)DominatorTree node for the specified basic
351 /// block. This is the same as using operator[] on this class. The result
352 /// may (but is not required to) be null for a forward (backwards)
353 /// statically unreachable block.
354 DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
355 auto I = DomTreeNodes.find(BB);
356 if (I != DomTreeNodes.end())
9
Assuming the condition is true
10
Taking true branch
357 return I->second.get();
11
Calling 'unique_ptr::get'
22
Returning from 'unique_ptr::get'
358 return nullptr;
359 }
360
361 /// See getNode.
362 DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const {
363 return getNode(BB);
364 }
365
366 /// getRootNode - This returns the entry node for the CFG of the function. If
367 /// this tree represents the post-dominance relations for a function, however,
368 /// this root may be a node with the block == NULL. This is the case when
369 /// there are multiple exit nodes from a particular function. Consumers of
370 /// post-dominance information must be capable of dealing with this
371 /// possibility.
372 ///
373 DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
374 const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
375
376 /// Get all nodes dominated by R, including R itself.
377 void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
378 Result.clear();
379 const DomTreeNodeBase<NodeT> *RN = getNode(R);
380 if (!RN)
381 return; // If R is unreachable, it will not be present in the DOM tree.
382 SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
383 WL.push_back(RN);
384
385 while (!WL.empty()) {
386 const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
387 Result.push_back(N->getBlock());
388 WL.append(N->begin(), N->end());
389 }
390 }
391
392 /// properlyDominates - Returns true iff A dominates B and A != B.
393 /// Note that this is not a constant time operation!
394 ///
395 bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
396 const DomTreeNodeBase<NodeT> *B) const {
397 if (!A || !B)
398 return false;
399 if (A == B)
400 return false;
401 return dominates(A, B);
402 }
403
404 bool properlyDominates(const NodeT *A, const NodeT *B) const;
405
406 /// isReachableFromEntry - Return true if A is dominated by the entry
407 /// block of the function containing it.
408 bool isReachableFromEntry(const NodeT *A) const {
409 assert(!this->isPostDominator() &&(static_cast <bool> (!this->isPostDominator() &&
"This is not implemented for post dominators") ? void (0) : __assert_fail
("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 410, __extension__ __PRETTY_FUNCTION__))
410 "This is not implemented for post dominators")(static_cast <bool> (!this->isPostDominator() &&
"This is not implemented for post dominators") ? void (0) : __assert_fail
("!this->isPostDominator() && \"This is not implemented for post dominators\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 410, __extension__ __PRETTY_FUNCTION__))
;
411 return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
412 }
413
414 bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
415
416 /// dominates - Returns true iff A dominates B. Note that this is not a
417 /// constant time operation!
418 ///
419 bool dominates(const DomTreeNodeBase<NodeT> *A,
420 const DomTreeNodeBase<NodeT> *B) const {
421 // A node trivially dominates itself.
422 if (B == A)
423 return true;
424
425 // An unreachable node is dominated by anything.
426 if (!isReachableFromEntry(B))
427 return true;
428
429 // And dominates nothing.
430 if (!isReachableFromEntry(A))
431 return false;
432
433 if (B->getIDom() == A) return true;
434
435 if (A->getIDom() == B) return false;
436
437 // A can only dominate B if it is higher in the tree.
438 if (A->getLevel() >= B->getLevel()) return false;
439
440 // Compare the result of the tree walk and the dfs numbers, if expensive
441 // checks are enabled.
442#ifdef EXPENSIVE_CHECKS
443 assert((!DFSInfoValid ||(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 445, __extension__ __PRETTY_FUNCTION__))
444 (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 445, __extension__ __PRETTY_FUNCTION__))
445 "Tree walk disagrees with dfs numbers!")(static_cast <bool> ((!DFSInfoValid || (dominatedBySlowTreeWalk
(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"
) ? void (0) : __assert_fail ("(!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && \"Tree walk disagrees with dfs numbers!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 445, __extension__ __PRETTY_FUNCTION__))
;
446#endif
447
448 if (DFSInfoValid)
449 return B->DominatedBy(A);
450
451 // If we end up with too many slow queries, just update the
452 // DFS numbers on the theory that we are going to keep querying.
453 SlowQueries++;
454 if (SlowQueries > 32) {
455 updateDFSNumbers();
456 return B->DominatedBy(A);
457 }
458
459 return dominatedBySlowTreeWalk(A, B);
460 }
461
462 bool dominates(const NodeT *A, const NodeT *B) const;
463
464 NodeT *getRoot() const {
465 assert(this->Roots.size() == 1 && "Should always have entry node!")(static_cast <bool> (this->Roots.size() == 1 &&
"Should always have entry node!") ? void (0) : __assert_fail
("this->Roots.size() == 1 && \"Should always have entry node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 465, __extension__ __PRETTY_FUNCTION__))
;
466 return this->Roots[0];
467 }
468
469 /// findNearestCommonDominator - Find nearest common dominator basic block
470 /// for basic block A and B. If there is no such block then return nullptr.
471 NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
472 assert(A && B && "Pointers are not valid")(static_cast <bool> (A && B && "Pointers are not valid"
) ? void (0) : __assert_fail ("A && B && \"Pointers are not valid\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 472, __extension__ __PRETTY_FUNCTION__))
;
473 assert(A->getParent() == B->getParent() &&(static_cast <bool> (A->getParent() == B->getParent
() && "Two blocks are not in same function") ? void (
0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 474, __extension__ __PRETTY_FUNCTION__))
474 "Two blocks are not in same function")(static_cast <bool> (A->getParent() == B->getParent
() && "Two blocks are not in same function") ? void (
0) : __assert_fail ("A->getParent() == B->getParent() && \"Two blocks are not in same function\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 474, __extension__ __PRETTY_FUNCTION__))
;
475
476 // If either A or B is a entry block then it is nearest common dominator
477 // (for forward-dominators).
478 if (!isPostDominator()) {
479 NodeT &Entry = A->getParent()->front();
480 if (A == &Entry || B == &Entry)
481 return &Entry;
482 }
483
484 DomTreeNodeBase<NodeT> *NodeA = getNode(A);
485 DomTreeNodeBase<NodeT> *NodeB = getNode(B);
486
487 if (!NodeA || !NodeB) return nullptr;
488
489 // Use level information to go up the tree until the levels match. Then
490 // continue going up til we arrive at the same node.
491 while (NodeA && NodeA != NodeB) {
492 if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);
493
494 NodeA = NodeA->IDom;
495 }
496
497 return NodeA ? NodeA->getBlock() : nullptr;
498 }
499
500 const NodeT *findNearestCommonDominator(const NodeT *A,
501 const NodeT *B) const {
502 // Cast away the const qualifiers here. This is ok since
503 // const is re-introduced on the return type.
504 return findNearestCommonDominator(const_cast<NodeT *>(A),
505 const_cast<NodeT *>(B));
506 }
507
508 bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
509 return isPostDominator() && !A->getBlock();
510 }
511
512 //===--------------------------------------------------------------------===//
513 // API to update (Post)DominatorTree information based on modifications to
514 // the CFG...
515
516 /// Inform the dominator tree about a sequence of CFG edge insertions and
517 /// deletions and perform a batch update on the tree.
518 ///
519 /// This function should be used when there were multiple CFG updates after
520 /// the last dominator tree update. It takes care of performing the updates
521 /// in sync with the CFG and optimizes away the redundant operations that
522 /// cancel each other.
523 /// The functions expects the sequence of updates to be balanced. Eg.:
524 /// - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
525 /// logically it results in a single insertions.
526 /// - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
527 /// sense to insert the same edge twice.
528 ///
529 /// What's more, the functions assumes that it's safe to ask every node in the
530 /// CFG about its children and inverse children. This implies that deletions
531 /// of CFG edges must not delete the CFG nodes before calling this function.
532 ///
533 /// The applyUpdates function can reorder the updates and remove redundant
534 /// ones internally. The batch updater is also able to detect sequences of
535 /// zero and exactly one update -- it's optimized to do less work in these
536 /// cases.
537 ///
538 /// Note that for postdominators it automatically takes care of applying
539 /// updates on reverse edges internally (so there's no need to swap the
540 /// From and To pointers when constructing DominatorTree::UpdateType).
541 /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
542 /// with the same template parameter T.
543 ///
544 /// \param Updates An unordered sequence of updates to perform.
545 ///
546 void applyUpdates(ArrayRef<UpdateType> Updates) {
547 DomTreeBuilder::ApplyUpdates(*this, Updates);
548 }
549
550 /// Inform the dominator tree about a CFG edge insertion and update the tree.
551 ///
552 /// This function has to be called just before or just after making the update
553 /// on the actual CFG. There cannot be any other updates that the dominator
554 /// tree doesn't know about.
555 ///
556 /// Note that for postdominators it automatically takes care of inserting
557 /// a reverse edge internally (so there's no need to swap the parameters).
558 ///
559 void insertEdge(NodeT *From, NodeT *To) {
560 assert(From)(static_cast <bool> (From) ? void (0) : __assert_fail (
"From", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 560, __extension__ __PRETTY_FUNCTION__))
;
561 assert(To)(static_cast <bool> (To) ? void (0) : __assert_fail ("To"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 561, __extension__ __PRETTY_FUNCTION__))
;
562 assert(From->getParent() == Parent)(static_cast <bool> (From->getParent() == Parent) ? void
(0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 assert(To->getParent() == Parent)(static_cast <bool> (To->getParent() == Parent) ? void
(0) : __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 563, __extension__ __PRETTY_FUNCTION__))
;
564 DomTreeBuilder::InsertEdge(*this, From, To);
565 }
566
567 /// Inform the dominator tree about a CFG edge deletion and update the tree.
568 ///
569 /// This function has to be called just after making the update on the actual
570 /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
571 /// DEBUG mode. There cannot be any other updates that the
572 /// dominator tree doesn't know about.
573 ///
574 /// Note that for postdominators it automatically takes care of deleting
575 /// a reverse edge internally (so there's no need to swap the parameters).
576 ///
577 void deleteEdge(NodeT *From, NodeT *To) {
578 assert(From)(static_cast <bool> (From) ? void (0) : __assert_fail (
"From", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 578, __extension__ __PRETTY_FUNCTION__))
;
579 assert(To)(static_cast <bool> (To) ? void (0) : __assert_fail ("To"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 assert(From->getParent() == Parent)(static_cast <bool> (From->getParent() == Parent) ? void
(0) : __assert_fail ("From->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 580, __extension__ __PRETTY_FUNCTION__))
;
581 assert(To->getParent() == Parent)(static_cast <bool> (To->getParent() == Parent) ? void
(0) : __assert_fail ("To->getParent() == Parent", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 581, __extension__ __PRETTY_FUNCTION__))
;
582 DomTreeBuilder::DeleteEdge(*this, From, To);
583 }
584
585 /// Add a new node to the dominator tree information.
586 ///
587 /// This creates a new node as a child of DomBB dominator node, linking it
588 /// into the children list of the immediate dominator.
589 ///
590 /// \param BB New node in CFG.
591 /// \param DomBB CFG node that is dominator for BB.
592 /// \returns New dominator tree node that represents new CFG node.
593 ///
594 DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
595 assert(getNode(BB) == nullptr && "Block already in dominator tree!")(static_cast <bool> (getNode(BB) == nullptr && "Block already in dominator tree!"
) ? void (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 595, __extension__ __PRETTY_FUNCTION__))
;
596 DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
597 assert(IDomNode && "Not immediate dominator specified for block!")(static_cast <bool> (IDomNode && "Not immediate dominator specified for block!"
) ? void (0) : __assert_fail ("IDomNode && \"Not immediate dominator specified for block!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 597, __extension__ __PRETTY_FUNCTION__))
;
598 DFSInfoValid = false;
599 return (DomTreeNodes[BB] = IDomNode->addChild(
600 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
601 }
602
603 /// Add a new node to the forward dominator tree and make it a new root.
604 ///
605 /// \param BB New node in CFG.
606 /// \returns New dominator tree node that represents new CFG node.
607 ///
608 DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
609 assert(getNode(BB) == nullptr && "Block already in dominator tree!")(static_cast <bool> (getNode(BB) == nullptr && "Block already in dominator tree!"
) ? void (0) : __assert_fail ("getNode(BB) == nullptr && \"Block already in dominator tree!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 609, __extension__ __PRETTY_FUNCTION__))
;
610 assert(!this->isPostDominator() &&(static_cast <bool> (!this->isPostDominator() &&
"Cannot change root of post-dominator tree") ? void (0) : __assert_fail
("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 611, __extension__ __PRETTY_FUNCTION__))
611 "Cannot change root of post-dominator tree")(static_cast <bool> (!this->isPostDominator() &&
"Cannot change root of post-dominator tree") ? void (0) : __assert_fail
("!this->isPostDominator() && \"Cannot change root of post-dominator tree\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 DFSInfoValid = false;
613 DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
614 llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
615 if (Roots.empty()) {
616 addRoot(BB);
617 } else {
618 assert(Roots.size() == 1)(static_cast <bool> (Roots.size() == 1) ? void (0) : __assert_fail
("Roots.size() == 1", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 618, __extension__ __PRETTY_FUNCTION__))
;
619 NodeT *OldRoot = Roots.front();
620 auto &OldNode = DomTreeNodes[OldRoot];
621 OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
622 OldNode->IDom = NewNode;
623 OldNode->UpdateLevel();
624 Roots[0] = BB;
625 }
626 return RootNode = NewNode;
627 }
628
629 /// changeImmediateDominator - This method is used to update the dominator
630 /// tree information when a node's immediate dominator changes.
631 ///
632 void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
633 DomTreeNodeBase<NodeT> *NewIDom) {
634 assert(N && NewIDom && "Cannot change null node pointers!")(static_cast <bool> (N && NewIDom && "Cannot change null node pointers!"
) ? void (0) : __assert_fail ("N && NewIDom && \"Cannot change null node pointers!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 634, __extension__ __PRETTY_FUNCTION__))
;
635 DFSInfoValid = false;
636 N->setIDom(NewIDom);
637 }
638
639 void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
640 changeImmediateDominator(getNode(BB), getNode(NewBB));
641 }
642
643 /// eraseNode - Removes a node from the dominator tree. Block must not
644 /// dominate any other blocks. Removes node from its immediate dominator's
645 /// children list. Deletes dominator node associated with basic block BB.
646 void eraseNode(NodeT *BB) {
647 DomTreeNodeBase<NodeT> *Node = getNode(BB);
648 assert(Node && "Removing node that isn't in dominator tree.")(static_cast <bool> (Node && "Removing node that isn't in dominator tree."
) ? void (0) : __assert_fail ("Node && \"Removing node that isn't in dominator tree.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 648, __extension__ __PRETTY_FUNCTION__))
;
649 assert(Node->getChildren().empty() && "Node is not a leaf node.")(static_cast <bool> (Node->getChildren().empty() &&
"Node is not a leaf node.") ? void (0) : __assert_fail ("Node->getChildren().empty() && \"Node is not a leaf node.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 649, __extension__ __PRETTY_FUNCTION__))
;
650
651 DFSInfoValid = false;
652
653 // Remove node from immediate dominator's children list.
654 DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
655 if (IDom) {
656 const auto I = find(IDom->Children, Node);
657 assert(I != IDom->Children.end() &&(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 658, __extension__ __PRETTY_FUNCTION__))
658 "Not in immediate dominator children set!")(static_cast <bool> (I != IDom->Children.end() &&
"Not in immediate dominator children set!") ? void (0) : __assert_fail
("I != IDom->Children.end() && \"Not in immediate dominator children set!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 658, __extension__ __PRETTY_FUNCTION__))
;
659 // I am no longer your child...
660 IDom->Children.erase(I);
661 }
662
663 DomTreeNodes.erase(BB);
664
665 if (!IsPostDom) return;
666
667 // Remember to update PostDominatorTree roots.
668 auto RIt = llvm::find(Roots, BB);
669 if (RIt != Roots.end()) {
670 std::swap(*RIt, Roots.back());
671 Roots.pop_back();
672 }
673 }
674
675 /// splitBlock - BB is split and now it has one successor. Update dominator
676 /// tree to reflect this change.
677 void splitBlock(NodeT *NewBB) {
678 if (IsPostDominator)
679 Split<Inverse<NodeT *>>(NewBB);
680 else
681 Split<NodeT *>(NewBB);
682 }
683
684 /// print - Convert to human readable form
685 ///
686 void print(raw_ostream &O) const {
687 O << "=============================--------------------------------\n";
688 if (IsPostDominator)
689 O << "Inorder PostDominator Tree: ";
690 else
691 O << "Inorder Dominator Tree: ";
692 if (!DFSInfoValid)
693 O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
694 O << "\n";
695
696 // The postdom tree can have a null root if there are no returns.
697 if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
698 if (IsPostDominator) {
699 O << "Roots: ";
700 for (const NodePtr Block : Roots) {
701 Block->printAsOperand(O, false);
702 O << " ";
703 }
704 O << "\n";
705 }
706 }
707
708public:
709 /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
710 /// dominator tree in dfs order.
711 void updateDFSNumbers() const {
712 if (DFSInfoValid) {
713 SlowQueries = 0;
714 return;
715 }
716
717 SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
718 typename DomTreeNodeBase<NodeT>::const_iterator>,
719 32> WorkStack;
720
721 const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
722 assert((!Parent || ThisRoot) && "Empty constructed DomTree")(static_cast <bool> ((!Parent || ThisRoot) && "Empty constructed DomTree"
) ? void (0) : __assert_fail ("(!Parent || ThisRoot) && \"Empty constructed DomTree\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 722, __extension__ __PRETTY_FUNCTION__))
;
723 if (!ThisRoot)
724 return;
725
726 // Both dominators and postdominators have a single root node. In the case
727 // case of PostDominatorTree, this node is a virtual root.
728 WorkStack.push_back({ThisRoot, ThisRoot->begin()});
729
730 unsigned DFSNum = 0;
731 ThisRoot->DFSNumIn = DFSNum++;
732
733 while (!WorkStack.empty()) {
734 const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
735 const auto ChildIt = WorkStack.back().second;
736
737 // If we visited all of the children of this node, "recurse" back up the
738 // stack setting the DFOutNum.
739 if (ChildIt == Node->end()) {
740 Node->DFSNumOut = DFSNum++;
741 WorkStack.pop_back();
742 } else {
743 // Otherwise, recursively visit this child.
744 const DomTreeNodeBase<NodeT> *Child = *ChildIt;
745 ++WorkStack.back().second;
746
747 WorkStack.push_back({Child, Child->begin()});
748 Child->DFSNumIn = DFSNum++;
749 }
750 }
751
752 SlowQueries = 0;
753 DFSInfoValid = true;
754 }
755
756 /// recalculate - compute a dominator tree for the given function
757 void recalculate(ParentType &Func) {
758 Parent = &Func;
759 DomTreeBuilder::Calculate(*this);
760 }
761
762 /// verify - checks if the tree is correct. There are 3 level of verification:
763 /// - Full -- verifies if the tree is correct by making sure all the
764 /// properties (including the parent and the sibling property)
765 /// hold.
766 /// Takes O(N^3) time.
767 ///
768 /// - Basic -- checks if the tree is correct, but compares it to a freshly
769 /// constructed tree instead of checking the sibling property.
770 /// Takes O(N^2) time.
771 ///
772 /// - Fast -- checks basic tree structure and compares it with a freshly
773 /// constructed tree.
774 /// Takes O(N^2) time worst case, but is faster in practise (same
775 /// as tree construction).
776 bool verify(VerificationLevel VL = VerificationLevel::Full) const {
777 return DomTreeBuilder::Verify(*this, VL);
778 }
779
780protected:
781 void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
782
783 void reset() {
784 DomTreeNodes.clear();
785 Roots.clear();
786 RootNode = nullptr;
787 Parent = nullptr;
788 DFSInfoValid = false;
789 SlowQueries = 0;
790 }
791
792 // NewBB is split and now it has one successor. Update dominator tree to
793 // reflect this change.
794 template <class N>
795 void Split(typename GraphTraits<N>::NodeRef NewBB) {
796 using GraphT = GraphTraits<N>;
797 using NodeRef = typename GraphT::NodeRef;
798 assert(std::distance(GraphT::child_begin(NewBB),(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 800, __extension__ __PRETTY_FUNCTION__))
799 GraphT::child_end(NewBB)) == 1 &&(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 800, __extension__ __PRETTY_FUNCTION__))
800 "NewBB should have a single successor!")(static_cast <bool> (std::distance(GraphT::child_begin(
NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"
) ? void (0) : __assert_fail ("std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && \"NewBB should have a single successor!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 800, __extension__ __PRETTY_FUNCTION__))
;
801 NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
802
803 std::vector<NodeRef> PredBlocks;
804 for (const auto &Pred : children<Inverse<N>>(NewBB))
805 PredBlocks.push_back(Pred);
806
807 assert(!PredBlocks.empty() && "No predblocks?")(static_cast <bool> (!PredBlocks.empty() && "No predblocks?"
) ? void (0) : __assert_fail ("!PredBlocks.empty() && \"No predblocks?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 807, __extension__ __PRETTY_FUNCTION__))
;
808
809 bool NewBBDominatesNewBBSucc = true;
810 for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
811 if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
812 isReachableFromEntry(Pred)) {
813 NewBBDominatesNewBBSucc = false;
814 break;
815 }
816 }
817
818 // Find NewBB's immediate dominator and create new dominator tree node for
819 // NewBB.
820 NodeT *NewBBIDom = nullptr;
821 unsigned i = 0;
822 for (i = 0; i < PredBlocks.size(); ++i)
823 if (isReachableFromEntry(PredBlocks[i])) {
824 NewBBIDom = PredBlocks[i];
825 break;
826 }
827
828 // It's possible that none of the predecessors of NewBB are reachable;
829 // in that case, NewBB itself is unreachable, so nothing needs to be
830 // changed.
831 if (!NewBBIDom) return;
832
833 for (i = i + 1; i < PredBlocks.size(); ++i) {
834 if (isReachableFromEntry(PredBlocks[i]))
835 NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
836 }
837
838 // Create the new dominator tree node... and set the idom of NewBB.
839 DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
840
841 // If NewBB strictly dominates other blocks, then it is now the immediate
842 // dominator of NewBBSucc. Update the dominator tree as appropriate.
843 if (NewBBDominatesNewBBSucc) {
844 DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
845 changeImmediateDominator(NewBBSuccNode, NewBBNode);
846 }
847 }
848
849 private:
850 bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
851 const DomTreeNodeBase<NodeT> *B) const {
852 assert(A != B)(static_cast <bool> (A != B) ? void (0) : __assert_fail
("A != B", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 852, __extension__ __PRETTY_FUNCTION__))
;
853 assert(isReachableFromEntry(B))(static_cast <bool> (isReachableFromEntry(B)) ? void (0
) : __assert_fail ("isReachableFromEntry(B)", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 853, __extension__ __PRETTY_FUNCTION__))
;
854 assert(isReachableFromEntry(A))(static_cast <bool> (isReachableFromEntry(A)) ? void (0
) : __assert_fail ("isReachableFromEntry(A)", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Support/GenericDomTree.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855
856 const DomTreeNodeBase<NodeT> *IDom;
857 while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B)
858 B = IDom; // Walk up the tree
859 return IDom != nullptr;
860 }
861
862 /// Wipe this tree's state without releasing any resources.
863 ///
864 /// This is essentially a post-move helper only. It leaves the object in an
865 /// assignable and destroyable state, but otherwise invalid.
866 void wipe() {
867 DomTreeNodes.clear();
868 RootNode = nullptr;
869 Parent = nullptr;
870 }
871};
872
873template <typename T>
874using DomTreeBase = DominatorTreeBase<T, false>;
875
876template <typename T>
877using PostDomTreeBase = DominatorTreeBase<T, true>;
878
879// These two functions are declared out of line as a workaround for building
880// with old (< r147295) versions of clang because of pr11642.
881template <typename NodeT, bool IsPostDom>
882bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
883 const NodeT *B) const {
884 if (A == B)
885 return true;
886
887 // Cast away the const qualifiers here. This is ok since
888 // this function doesn't actually return the values returned
889 // from getNode.
890 return dominates(getNode(const_cast<NodeT *>(A)),
891 getNode(const_cast<NodeT *>(B)));
892}
893template <typename NodeT, bool IsPostDom>
894bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
895 const NodeT *A, const NodeT *B) const {
896 if (A == B)
897 return false;
898
899 // Cast away the const qualifiers here. This is ok since
900 // this function doesn't actually return the values returned
901 // from getNode.
902 return dominates(getNode(const_cast<NodeT *>(A)),
903 getNode(const_cast<NodeT *>(B)));
904}
905
906} // end namespace llvm
907
908#endif // LLVM_SUPPORT_GENERICDOMTREE_H

/usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2018 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 /**
46 * @addtogroup pointer_abstractions
47 * @{
48 */
49
50#if _GLIBCXX_USE_DEPRECATED1
51#pragma GCC diagnostic push
52#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
53 template<typename> class auto_ptr;
54#pragma GCC diagnostic pop
55#endif
56
57 /// Primary template of default_delete, used by unique_ptr
58 template<typename _Tp>
59 struct default_delete
60 {
61 /// Default constructor
62 constexpr default_delete() noexcept = default;
63
64 /** @brief Converting constructor.
65 *
66 * Allows conversion from a deleter for arrays of another type, @p _Up,
67 * only if @p _Up* is convertible to @p _Tp*.
68 */
69 template<typename _Up, typename = typename
70 enable_if<is_convertible<_Up*, _Tp*>::value>::type>
71 default_delete(const default_delete<_Up>&) noexcept { }
72
73 /// Calls @c delete @p __ptr
74 void
75 operator()(_Tp* __ptr) const
76 {
77 static_assert(!is_void<_Tp>::value,
78 "can't delete pointer to incomplete type");
79 static_assert(sizeof(_Tp)>0,
80 "can't delete pointer to incomplete type");
81 delete __ptr;
82 }
83 };
84
85 // _GLIBCXX_RESOLVE_LIB_DEFECTS
86 // DR 740 - omit specialization for array objects with a compile time length
87 /// Specialization for arrays, default_delete.
88 template<typename _Tp>
89 struct default_delete<_Tp[]>
90 {
91 public:
92 /// Default constructor
93 constexpr default_delete() noexcept = default;
94
95 /** @brief Converting constructor.
96 *
97 * Allows conversion from a deleter for arrays of another type, such as
98 * a const-qualified version of @p _Tp.
99 *
100 * Conversions from types derived from @c _Tp are not allowed because
101 * it is unsafe to @c delete[] an array of derived types through a
102 * pointer to the base type.
103 */
104 template<typename _Up, typename = typename
105 enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type>
106 default_delete(const default_delete<_Up[]>&) noexcept { }
107
108 /// Calls @c delete[] @p __ptr
109 template<typename _Up>
110 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
111 operator()(_Up* __ptr) const
112 {
113 static_assert(sizeof(_Tp)>0,
114 "can't delete pointer to incomplete type");
115 delete [] __ptr;
116 }
117 };
118
119 template <typename _Tp, typename _Dp>
120 class __uniq_ptr_impl
121 {
122 template <typename _Up, typename _Ep, typename = void>
123 struct _Ptr
124 {
125 using type = _Up*;
126 };
127
128 template <typename _Up, typename _Ep>
129 struct
130 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
131 {
132 using type = typename remove_reference<_Ep>::type::pointer;
133 };
134
135 public:
136 using _DeleterConstraint = enable_if<
137 __and_<__not_<is_pointer<_Dp>>,
138 is_default_constructible<_Dp>>::value>;
139
140 using pointer = typename _Ptr<_Tp, _Dp>::type;
141
142 __uniq_ptr_impl() = default;
143 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
144
145 template<typename _Del>
146 __uniq_ptr_impl(pointer __p, _Del&& __d)
147 : _M_t(__p, std::forward<_Del>(__d)) { }
148
149 pointer& _M_ptr() { return std::get<0>(_M_t); }
150 pointer _M_ptr() const { return std::get<0>(_M_t); }
13
Calling 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
20
Returning from 'get<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
151 _Dp& _M_deleter() { return std::get<1>(_M_t); }
152 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
153
154 private:
155 tuple<pointer, _Dp> _M_t;
156 };
157
158 /// 20.7.1.2 unique_ptr for single objects.
159 template <typename _Tp, typename _Dp = default_delete<_Tp>>
160 class unique_ptr
161 {
162 template <class _Up>
163 using _DeleterConstraint =
164 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
165
166 __uniq_ptr_impl<_Tp, _Dp> _M_t;
167
168 public:
169 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
170 using element_type = _Tp;
171 using deleter_type = _Dp;
172
173 // helper template for detecting a safe conversion from another
174 // unique_ptr
175 template<typename _Up, typename _Ep>
176 using __safe_conversion_up = __and_<
177 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
178 __not_<is_array<_Up>>,
179 __or_<__and_<is_reference<deleter_type>,
180 is_same<deleter_type, _Ep>>,
181 __and_<__not_<is_reference<deleter_type>>,
182 is_convertible<_Ep, deleter_type>>
183 >
184 >;
185
186 // Constructors.
187
188 /// Default constructor, creates a unique_ptr that owns nothing.
189 template <typename _Up = _Dp,
190 typename = _DeleterConstraint<_Up>>
191 constexpr unique_ptr() noexcept
192 : _M_t()
193 { }
194
195 /** Takes ownership of a pointer.
196 *
197 * @param __p A pointer to an object of @c element_type
198 *
199 * The deleter will be value-initialized.
200 */
201 template <typename _Up = _Dp,
202 typename = _DeleterConstraint<_Up>>
203 explicit
204 unique_ptr(pointer __p) noexcept
205 : _M_t(__p)
206 { }
207
208 /** Takes ownership of a pointer.
209 *
210 * @param __p A pointer to an object of @c element_type
211 * @param __d A reference to a deleter.
212 *
213 * The deleter will be initialized with @p __d
214 */
215 unique_ptr(pointer __p,
216 typename conditional<is_reference<deleter_type>::value,
217 deleter_type, const deleter_type&>::type __d) noexcept
218 : _M_t(__p, __d) { }
219
220 /** Takes ownership of a pointer.
221 *
222 * @param __p A pointer to an object of @c element_type
223 * @param __d An rvalue reference to a deleter.
224 *
225 * The deleter will be initialized with @p std::move(__d)
226 */
227 unique_ptr(pointer __p,
228 typename remove_reference<deleter_type>::type&& __d) noexcept
229 : _M_t(std::move(__p), std::move(__d))
230 { static_assert(!std::is_reference<deleter_type>::value,
231 "rvalue deleter bound to reference"); }
232
233 /// Creates a unique_ptr that owns nothing.
234 template <typename _Up = _Dp,
235 typename = _DeleterConstraint<_Up>>
236 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
237
238 // Move constructors.
239
240 /// Move constructor.
241 unique_ptr(unique_ptr&& __u) noexcept
242 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
243
244 /** @brief Converting constructor from another type
245 *
246 * Requires that the pointer owned by @p __u is convertible to the
247 * type of pointer owned by this object, @p __u does not own an array,
248 * and @p __u has a compatible deleter type.
249 */
250 template<typename _Up, typename _Ep, typename = _Require<
251 __safe_conversion_up<_Up, _Ep>,
252 typename conditional<is_reference<_Dp>::value,
253 is_same<_Ep, _Dp>,
254 is_convertible<_Ep, _Dp>>::type>>
255 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
256 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
257 { }
258
259#if _GLIBCXX_USE_DEPRECATED1
260#pragma GCC diagnostic push
261#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
262 /// Converting constructor from @c auto_ptr
263 template<typename _Up, typename = _Require<
264 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
265 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
266#pragma GCC diagnostic pop
267#endif
268
269 /// Destructor, invokes the deleter if the stored pointer is not null.
270 ~unique_ptr() noexcept
271 {
272 auto& __ptr = _M_t._M_ptr();
273 if (__ptr != nullptr)
274 get_deleter()(__ptr);
275 __ptr = pointer();
276 }
277
278 // Assignment.
279
280 /** @brief Move assignment operator.
281 *
282 * @param __u The object to transfer ownership from.
283 *
284 * Invokes the deleter first if this object owns a pointer.
285 */
286 unique_ptr&
287 operator=(unique_ptr&& __u) noexcept
288 {
289 reset(__u.release());
290 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
291 return *this;
292 }
293
294 /** @brief Assignment from another type.
295 *
296 * @param __u The object to transfer ownership from, which owns a
297 * convertible pointer to a non-array object.
298 *
299 * Invokes the deleter first if this object owns a pointer.
300 */
301 template<typename _Up, typename _Ep>
302 typename enable_if< __and_<
303 __safe_conversion_up<_Up, _Ep>,
304 is_assignable<deleter_type&, _Ep&&>
305 >::value,
306 unique_ptr&>::type
307 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
308 {
309 reset(__u.release());
310 get_deleter() = std::forward<_Ep>(__u.get_deleter());
311 return *this;
312 }
313
314 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
315 unique_ptr&
316 operator=(nullptr_t) noexcept
317 {
318 reset();
319 return *this;
320 }
321
322 // Observers.
323
324 /// Dereference the stored pointer.
325 typename add_lvalue_reference<element_type>::type
326 operator*() const
327 {
328 __glibcxx_assert(get() != pointer());
329 return *get();
330 }
331
332 /// Return the stored pointer.
333 pointer
334 operator->() const noexcept
335 {
336 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
337 return get();
338 }
339
340 /// Return the stored pointer.
341 pointer
342 get() const noexcept
343 { return _M_t._M_ptr(); }
12
Calling '__uniq_ptr_impl::_M_ptr'
21
Returning from '__uniq_ptr_impl::_M_ptr'
344
345 /// Return a reference to the stored deleter.
346 deleter_type&
347 get_deleter() noexcept
348 { return _M_t._M_deleter(); }
349
350 /// Return a reference to the stored deleter.
351 const deleter_type&
352 get_deleter() const noexcept
353 { return _M_t._M_deleter(); }
354
355 /// Return @c true if the stored pointer is not null.
356 explicit operator bool() const noexcept
357 { return get() == pointer() ? false : true; }
358
359 // Modifiers.
360
361 /// Release ownership of any stored pointer.
362 pointer
363 release() noexcept
364 {
365 pointer __p = get();
366 _M_t._M_ptr() = pointer();
367 return __p;
368 }
369
370 /** @brief Replace the stored pointer.
371 *
372 * @param __p The new pointer to store.
373 *
374 * The deleter will be invoked if a pointer is already owned.
375 */
376 void
377 reset(pointer __p = pointer()) noexcept
378 {
379 using std::swap;
380 swap(_M_t._M_ptr(), __p);
381 if (__p != pointer())
382 get_deleter()(__p);
383 }
384
385 /// Exchange the pointer and deleter with another object.
386 void
387 swap(unique_ptr& __u) noexcept
388 {
389 using std::swap;
390 swap(_M_t, __u._M_t);
391 }
392
393 // Disable copy from lvalue.
394 unique_ptr(const unique_ptr&) = delete;
395 unique_ptr& operator=(const unique_ptr&) = delete;
396 };
397
398 /// 20.7.1.3 unique_ptr for array objects with a runtime length
399 // [unique.ptr.runtime]
400 // _GLIBCXX_RESOLVE_LIB_DEFECTS
401 // DR 740 - omit specialization for array objects with a compile time length
402 template<typename _Tp, typename _Dp>
403 class unique_ptr<_Tp[], _Dp>
404 {
405 template <typename _Up>
406 using _DeleterConstraint =
407 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
408
409 __uniq_ptr_impl<_Tp, _Dp> _M_t;
410
411 template<typename _Up>
412 using __remove_cv = typename remove_cv<_Up>::type;
413
414 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
415 template<typename _Up>
416 using __is_derived_Tp
417 = __and_< is_base_of<_Tp, _Up>,
418 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
419
420 public:
421 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
422 using element_type = _Tp;
423 using deleter_type = _Dp;
424
425 // helper template for detecting a safe conversion from another
426 // unique_ptr
427 template<typename _Up, typename _Ep,
428 typename _Up_up = unique_ptr<_Up, _Ep>,
429 typename _Up_element_type = typename _Up_up::element_type>
430 using __safe_conversion_up = __and_<
431 is_array<_Up>,
432 is_same<pointer, element_type*>,
433 is_same<typename _Up_up::pointer, _Up_element_type*>,
434 is_convertible<_Up_element_type(*)[], element_type(*)[]>,
435 __or_<__and_<is_reference<deleter_type>, is_same<deleter_type, _Ep>>,
436 __and_<__not_<is_reference<deleter_type>>,
437 is_convertible<_Ep, deleter_type>>>
438 >;
439
440 // helper template for detecting a safe conversion from a raw pointer
441 template<typename _Up>
442 using __safe_conversion_raw = __and_<
443 __or_<__or_<is_same<_Up, pointer>,
444 is_same<_Up, nullptr_t>>,
445 __and_<is_pointer<_Up>,
446 is_same<pointer, element_type*>,
447 is_convertible<
448 typename remove_pointer<_Up>::type(*)[],
449 element_type(*)[]>
450 >
451 >
452 >;
453
454 // Constructors.
455
456 /// Default constructor, creates a unique_ptr that owns nothing.
457 template <typename _Up = _Dp,
458 typename = _DeleterConstraint<_Up>>
459 constexpr unique_ptr() noexcept
460 : _M_t()
461 { }
462
463 /** Takes ownership of a pointer.
464 *
465 * @param __p A pointer to an array of a type safely convertible
466 * to an array of @c element_type
467 *
468 * The deleter will be value-initialized.
469 */
470 template<typename _Up,
471 typename _Vp = _Dp,
472 typename = _DeleterConstraint<_Vp>,
473 typename = typename enable_if<
474 __safe_conversion_raw<_Up>::value, bool>::type>
475 explicit
476 unique_ptr(_Up __p) noexcept
477 : _M_t(__p)
478 { }
479
480 /** Takes ownership of a pointer.
481 *
482 * @param __p A pointer to an array of a type safely convertible
483 * to an array of @c element_type
484 * @param __d A reference to a deleter.
485 *
486 * The deleter will be initialized with @p __d
487 */
488 template<typename _Up,
489 typename = typename enable_if<
490 __safe_conversion_raw<_Up>::value, bool>::type>
491 unique_ptr(_Up __p,
492 typename conditional<is_reference<deleter_type>::value,
493 deleter_type, const deleter_type&>::type __d) noexcept
494 : _M_t(__p, __d) { }
495
496 /** Takes ownership of a pointer.
497 *
498 * @param __p A pointer to an array of a type safely convertible
499 * to an array of @c element_type
500 * @param __d A reference to a deleter.
501 *
502 * The deleter will be initialized with @p std::move(__d)
503 */
504 template<typename _Up,
505 typename = typename enable_if<
506 __safe_conversion_raw<_Up>::value, bool>::type>
507 unique_ptr(_Up __p, typename
508 remove_reference<deleter_type>::type&& __d) noexcept
509 : _M_t(std::move(__p), std::move(__d))
510 { static_assert(!is_reference<deleter_type>::value,
511 "rvalue deleter bound to reference"); }
512
513 /// Move constructor.
514 unique_ptr(unique_ptr&& __u) noexcept
515 : _M_t(__u.release(), std::forward<deleter_type>(__u.get_deleter())) { }
516
517 /// Creates a unique_ptr that owns nothing.
518 template <typename _Up = _Dp,
519 typename = _DeleterConstraint<_Up>>
520 constexpr unique_ptr(nullptr_t) noexcept : unique_ptr() { }
521
522 template<typename _Up, typename _Ep,
523 typename = _Require<__safe_conversion_up<_Up, _Ep>>>
524 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
525 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
526 { }
527
528 /// Destructor, invokes the deleter if the stored pointer is not null.
529 ~unique_ptr()
530 {
531 auto& __ptr = _M_t._M_ptr();
532 if (__ptr != nullptr)
533 get_deleter()(__ptr);
534 __ptr = pointer();
535 }
536
537 // Assignment.
538
539 /** @brief Move assignment operator.
540 *
541 * @param __u The object to transfer ownership from.
542 *
543 * Invokes the deleter first if this object owns a pointer.
544 */
545 unique_ptr&
546 operator=(unique_ptr&& __u) noexcept
547 {
548 reset(__u.release());
549 get_deleter() = std::forward<deleter_type>(__u.get_deleter());
550 return *this;
551 }
552
553 /** @brief Assignment from another type.
554 *
555 * @param __u The object to transfer ownership from, which owns a
556 * convertible pointer to an array object.
557 *
558 * Invokes the deleter first if this object owns a pointer.
559 */
560 template<typename _Up, typename _Ep>
561 typename
562 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
563 is_assignable<deleter_type&, _Ep&&>
564 >::value,
565 unique_ptr&>::type
566 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
567 {
568 reset(__u.release());
569 get_deleter() = std::forward<_Ep>(__u.get_deleter());
570 return *this;
571 }
572
573 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
574 unique_ptr&
575 operator=(nullptr_t) noexcept
576 {
577 reset();
578 return *this;
579 }
580
581 // Observers.
582
583 /// Access an element of owned array.
584 typename std::add_lvalue_reference<element_type>::type
585 operator[](size_t __i) const
586 {
587 __glibcxx_assert(get() != pointer());
588 return get()[__i];
589 }
590
591 /// Return the stored pointer.
592 pointer
593 get() const noexcept
594 { return _M_t._M_ptr(); }
595
596 /// Return a reference to the stored deleter.
597 deleter_type&
598 get_deleter() noexcept
599 { return _M_t._M_deleter(); }
600
601 /// Return a reference to the stored deleter.
602 const deleter_type&
603 get_deleter() const noexcept
604 { return _M_t._M_deleter(); }
605
606 /// Return @c true if the stored pointer is not null.
607 explicit operator bool() const noexcept
608 { return get() == pointer() ? false : true; }
609
610 // Modifiers.
611
612 /// Release ownership of any stored pointer.
613 pointer
614 release() noexcept
615 {
616 pointer __p = get();
617 _M_t._M_ptr() = pointer();
618 return __p;
619 }
620
621 /** @brief Replace the stored pointer.
622 *
623 * @param __p The new pointer to store.
624 *
625 * The deleter will be invoked if a pointer is already owned.
626 */
627 template <typename _Up,
628 typename = _Require<
629 __or_<is_same<_Up, pointer>,
630 __and_<is_same<pointer, element_type*>,
631 is_pointer<_Up>,
632 is_convertible<
633 typename remove_pointer<_Up>::type(*)[],
634 element_type(*)[]
635 >
636 >
637 >
638 >>
639 void
640 reset(_Up __p) noexcept
641 {
642 pointer __ptr = __p;
643 using std::swap;
644 swap(_M_t._M_ptr(), __ptr);
645 if (__ptr != nullptr)
646 get_deleter()(__ptr);
647 }
648
649 void reset(nullptr_t = nullptr) noexcept
650 {
651 reset(pointer());
652 }
653
654 /// Exchange the pointer and deleter with another object.
655 void
656 swap(unique_ptr& __u) noexcept
657 {
658 using std::swap;
659 swap(_M_t, __u._M_t);
660 }
661
662 // Disable copy from lvalue.
663 unique_ptr(const unique_ptr&) = delete;
664 unique_ptr& operator=(const unique_ptr&) = delete;
665 };
666
667 template<typename _Tp, typename _Dp>
668 inline
669#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
670 // Constrained free swap overload, see p0185r1
671 typename enable_if<__is_swappable<_Dp>::value>::type
672#else
673 void
674#endif
675 swap(unique_ptr<_Tp, _Dp>& __x,
676 unique_ptr<_Tp, _Dp>& __y) noexcept
677 { __x.swap(__y); }
678
679#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
680 template<typename _Tp, typename _Dp>
681 typename enable_if<!__is_swappable<_Dp>::value>::type
682 swap(unique_ptr<_Tp, _Dp>&,
683 unique_ptr<_Tp, _Dp>&) = delete;
684#endif
685
686 template<typename _Tp, typename _Dp,
687 typename _Up, typename _Ep>
688 inline bool
689 operator==(const unique_ptr<_Tp, _Dp>& __x,
690 const unique_ptr<_Up, _Ep>& __y)
691 { return __x.get() == __y.get(); }
692
693 template<typename _Tp, typename _Dp>
694 inline bool
695 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
696 { return !__x; }
697
698 template<typename _Tp, typename _Dp>
699 inline bool
700 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
701 { return !__x; }
702
703 template<typename _Tp, typename _Dp,
704 typename _Up, typename _Ep>
705 inline bool
706 operator!=(const unique_ptr<_Tp, _Dp>& __x,
707 const unique_ptr<_Up, _Ep>& __y)
708 { return __x.get() != __y.get(); }
709
710 template<typename _Tp, typename _Dp>
711 inline bool
712 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
713 { return (bool)__x; }
714
715 template<typename _Tp, typename _Dp>
716 inline bool
717 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
718 { return (bool)__x; }
719
720 template<typename _Tp, typename _Dp,
721 typename _Up, typename _Ep>
722 inline bool
723 operator<(const unique_ptr<_Tp, _Dp>& __x,
724 const unique_ptr<_Up, _Ep>& __y)
725 {
726 typedef typename
727 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
728 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
729 return std::less<_CT>()(__x.get(), __y.get());
730 }
731
732 template<typename _Tp, typename _Dp>
733 inline bool
734 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
735 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
736 nullptr); }
737
738 template<typename _Tp, typename _Dp>
739 inline bool
740 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
741 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
742 __x.get()); }
743
744 template<typename _Tp, typename _Dp,
745 typename _Up, typename _Ep>
746 inline bool
747 operator<=(const unique_ptr<_Tp, _Dp>& __x,
748 const unique_ptr<_Up, _Ep>& __y)
749 { return !(__y < __x); }
750
751 template<typename _Tp, typename _Dp>
752 inline bool
753 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
754 { return !(nullptr < __x); }
755
756 template<typename _Tp, typename _Dp>
757 inline bool
758 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
759 { return !(__x < nullptr); }
760
761 template<typename _Tp, typename _Dp,
762 typename _Up, typename _Ep>
763 inline bool
764 operator>(const unique_ptr<_Tp, _Dp>& __x,
765 const unique_ptr<_Up, _Ep>& __y)
766 { return (__y < __x); }
767
768 template<typename _Tp, typename _Dp>
769 inline bool
770 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
771 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
772 __x.get()); }
773
774 template<typename _Tp, typename _Dp>
775 inline bool
776 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
777 { return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
778 nullptr); }
779
780 template<typename _Tp, typename _Dp,
781 typename _Up, typename _Ep>
782 inline bool
783 operator>=(const unique_ptr<_Tp, _Dp>& __x,
784 const unique_ptr<_Up, _Ep>& __y)
785 { return !(__x < __y); }
786
787 template<typename _Tp, typename _Dp>
788 inline bool
789 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
790 { return !(__x < nullptr); }
791
792 template<typename _Tp, typename _Dp>
793 inline bool
794 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
795 { return !(nullptr < __x); }
796
797 /// std::hash specialization for unique_ptr.
798 template<typename _Tp, typename _Dp>
799 struct hash<unique_ptr<_Tp, _Dp>>
800 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
801 private __poison_hash<typename unique_ptr<_Tp, _Dp>::pointer>
802 {
803 size_t
804 operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept
805 {
806 typedef unique_ptr<_Tp, _Dp> _UP;
807 return std::hash<typename _UP::pointer>()(__u.get());
808 }
809 };
810
811#if __cplusplus201103L > 201103L
812
813#define __cpp_lib_make_unique 201304
814
815 template<typename _Tp>
816 struct _MakeUniq
817 { typedef unique_ptr<_Tp> __single_object; };
818
819 template<typename _Tp>
820 struct _MakeUniq<_Tp[]>
821 { typedef unique_ptr<_Tp[]> __array; };
822
823 template<typename _Tp, size_t _Bound>
824 struct _MakeUniq<_Tp[_Bound]>
825 { struct __invalid_type { }; };
826
827 /// std::make_unique for single objects
828 template<typename _Tp, typename... _Args>
829 inline typename _MakeUniq<_Tp>::__single_object
830 make_unique(_Args&&... __args)
831 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
832
833 /// std::make_unique for arrays of unknown bound
834 template<typename _Tp>
835 inline typename _MakeUniq<_Tp>::__array
836 make_unique(size_t __num)
837 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
838
839 /// Disable std::make_unique for arrays of known bound
840 template<typename _Tp, typename... _Args>
841 inline typename _MakeUniq<_Tp>::__invalid_type
842 make_unique(_Args&&...) = delete;
843#endif
844
845 // @} group pointer_abstractions
846
847_GLIBCXX_END_NAMESPACE_VERSION
848} // namespace
849
850#endif /* _UNIQUE_PTR_H */

/usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2018 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41#include <bits/invoke.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @addtogroup utilities
49 * @{
50 */
51
52 template<typename... _Elements>
53 class tuple;
54
55 template<typename _Tp>
56 struct __is_empty_non_tuple : is_empty<_Tp> { };
57
58 // Using EBO for elements that are tuples causes ambiguous base errors.
59 template<typename _El0, typename... _El>
60 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
61
62 // Use the Empty Base-class Optimization for empty, non-final types.
63 template<typename _Tp>
64 using __empty_not_final
65 = typename conditional<__is_final(_Tp), false_type,
66 __is_empty_non_tuple<_Tp>>::type;
67
68 template<std::size_t _Idx, typename _Head,
69 bool = __empty_not_final<_Head>::value>
70 struct _Head_base;
71
72 template<std::size_t _Idx, typename _Head>
73 struct _Head_base<_Idx, _Head, true>
74 : public _Head
75 {
76 constexpr _Head_base()
77 : _Head() { }
78
79 constexpr _Head_base(const _Head& __h)
80 : _Head(__h) { }
81
82 constexpr _Head_base(const _Head_base&) = default;
83 constexpr _Head_base(_Head_base&&) = default;
84
85 template<typename _UHead>
86 constexpr _Head_base(_UHead&& __h)
87 : _Head(std::forward<_UHead>(__h)) { }
88
89 _Head_base(allocator_arg_t, __uses_alloc0)
90 : _Head() { }
91
92 template<typename _Alloc>
93 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
94 : _Head(allocator_arg, *__a._M_a) { }
95
96 template<typename _Alloc>
97 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
98 : _Head(*__a._M_a) { }
99
100 template<typename _UHead>
101 _Head_base(__uses_alloc0, _UHead&& __uhead)
102 : _Head(std::forward<_UHead>(__uhead)) { }
103
104 template<typename _Alloc, typename _UHead>
105 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
106 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
107
108 template<typename _Alloc, typename _UHead>
109 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
110 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
111
112 static constexpr _Head&
113 _M_head(_Head_base& __b) noexcept { return __b; }
114
115 static constexpr const _Head&
116 _M_head(const _Head_base& __b) noexcept { return __b; }
117 };
118
119 template<std::size_t _Idx, typename _Head>
120 struct _Head_base<_Idx, _Head, false>
121 {
122 constexpr _Head_base()
123 : _M_head_impl() { }
124
125 constexpr _Head_base(const _Head& __h)
126 : _M_head_impl(__h) { }
127
128 constexpr _Head_base(const _Head_base&) = default;
129 constexpr _Head_base(_Head_base&&) = default;
130
131 template<typename _UHead>
132 constexpr _Head_base(_UHead&& __h)
133 : _M_head_impl(std::forward<_UHead>(__h)) { }
134
135 _Head_base(allocator_arg_t, __uses_alloc0)
136 : _M_head_impl() { }
137
138 template<typename _Alloc>
139 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
140 : _M_head_impl(allocator_arg, *__a._M_a) { }
141
142 template<typename _Alloc>
143 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
144 : _M_head_impl(*__a._M_a) { }
145
146 template<typename _UHead>
147 _Head_base(__uses_alloc0, _UHead&& __uhead)
148 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
149
150 template<typename _Alloc, typename _UHead>
151 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
152 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
153 { }
154
155 template<typename _Alloc, typename _UHead>
156 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
157 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
158
159 static constexpr _Head&
160 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
161
162 static constexpr const _Head&
163 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
164
165 _Head _M_head_impl;
166 };
167
168 /**
169 * Contains the actual implementation of the @c tuple template, stored
170 * as a recursive inheritance hierarchy from the first element (most
171 * derived class) to the last (least derived class). The @c Idx
172 * parameter gives the 0-based index of the element stored at this
173 * point in the hierarchy; we use it to implement a constant-time
174 * get() operation.
175 */
176 template<std::size_t _Idx, typename... _Elements>
177 struct _Tuple_impl;
178
179 /**
180 * Recursive tuple implementation. Here we store the @c Head element
181 * and derive from a @c Tuple_impl containing the remaining elements
182 * (which contains the @c Tail).
183 */
184 template<std::size_t _Idx, typename _Head, typename... _Tail>
185 struct _Tuple_impl<_Idx, _Head, _Tail...>
186 : public _Tuple_impl<_Idx + 1, _Tail...>,
187 private _Head_base<_Idx, _Head>
188 {
189 template<std::size_t, typename...> friend class _Tuple_impl;
190
191 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
192 typedef _Head_base<_Idx, _Head> _Base;
193
194 static constexpr _Head&
195 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
196
197 static constexpr const _Head&
198 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
16
Calling '_Head_base::_M_head'
17
Returning from '_Head_base::_M_head'
199
200 static constexpr _Inherited&
201 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
202
203 static constexpr const _Inherited&
204 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
205
206 constexpr _Tuple_impl()
207 : _Inherited(), _Base() { }
208
209 explicit
210 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
211 : _Inherited(__tail...), _Base(__head) { }
212
213 template<typename _UHead, typename... _UTail, typename = typename
214 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
215 explicit
216 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
217 : _Inherited(std::forward<_UTail>(__tail)...),
218 _Base(std::forward<_UHead>(__head)) { }
219
220 constexpr _Tuple_impl(const _Tuple_impl&) = default;
221
222 constexpr
223 _Tuple_impl(_Tuple_impl&& __in)
224 noexcept(__and_<is_nothrow_move_constructible<_Head>,
225 is_nothrow_move_constructible<_Inherited>>::value)
226 : _Inherited(std::move(_M_tail(__in))),
227 _Base(std::forward<_Head>(_M_head(__in))) { }
228
229 template<typename... _UElements>
230 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
231 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
232 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
233
234 template<typename _UHead, typename... _UTails>
235 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
236 : _Inherited(std::move
237 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
238 _Base(std::forward<_UHead>
239 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
243 : _Inherited(__tag, __a),
244 _Base(__tag, __use_alloc<_Head>(__a)) { }
245
246 template<typename _Alloc>
247 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
248 const _Head& __head, const _Tail&... __tail)
249 : _Inherited(__tag, __a, __tail...),
250 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
251
252 template<typename _Alloc, typename _UHead, typename... _UTail,
253 typename = typename enable_if<sizeof...(_Tail)
254 == sizeof...(_UTail)>::type>
255 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
256 _UHead&& __head, _UTail&&... __tail)
257 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
258 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
259 std::forward<_UHead>(__head)) { }
260
261 template<typename _Alloc>
262 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
263 const _Tuple_impl& __in)
264 : _Inherited(__tag, __a, _M_tail(__in)),
265 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
266
267 template<typename _Alloc>
268 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
269 _Tuple_impl&& __in)
270 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
271 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
272 std::forward<_Head>(_M_head(__in))) { }
273
274 template<typename _Alloc, typename... _UElements>
275 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
276 const _Tuple_impl<_Idx, _UElements...>& __in)
277 : _Inherited(__tag, __a,
278 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
279 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
280 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
281
282 template<typename _Alloc, typename _UHead, typename... _UTails>
283 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
284 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
285 : _Inherited(__tag, __a, std::move
286 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
287 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
288 std::forward<_UHead>
289 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
290
291 _Tuple_impl&
292 operator=(const _Tuple_impl& __in)
293 {
294 _M_head(*this) = _M_head(__in);
295 _M_tail(*this) = _M_tail(__in);
296 return *this;
297 }
298
299 _Tuple_impl&
300 operator=(_Tuple_impl&& __in)
301 noexcept(__and_<is_nothrow_move_assignable<_Head>,
302 is_nothrow_move_assignable<_Inherited>>::value)
303 {
304 _M_head(*this) = std::forward<_Head>(_M_head(__in));
305 _M_tail(*this) = std::move(_M_tail(__in));
306 return *this;
307 }
308
309 template<typename... _UElements>
310 _Tuple_impl&
311 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
312 {
313 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
314 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
315 return *this;
316 }
317
318 template<typename _UHead, typename... _UTails>
319 _Tuple_impl&
320 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
321 {
322 _M_head(*this) = std::forward<_UHead>
323 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
324 _M_tail(*this) = std::move
325 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
326 return *this;
327 }
328
329 protected:
330 void
331 _M_swap(_Tuple_impl& __in)
332 noexcept(__is_nothrow_swappable<_Head>::value
333 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
334 {
335 using std::swap;
336 swap(_M_head(*this), _M_head(__in));
337 _Inherited::_M_swap(_M_tail(__in));
338 }
339 };
340
341 // Basis case of inheritance recursion.
342 template<std::size_t _Idx, typename _Head>
343 struct _Tuple_impl<_Idx, _Head>
344 : private _Head_base<_Idx, _Head>
345 {
346 template<std::size_t, typename...> friend class _Tuple_impl;
347
348 typedef _Head_base<_Idx, _Head> _Base;
349
350 static constexpr _Head&
351 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
352
353 static constexpr const _Head&
354 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
355
356 constexpr _Tuple_impl()
357 : _Base() { }
358
359 explicit
360 constexpr _Tuple_impl(const _Head& __head)
361 : _Base(__head) { }
362
363 template<typename _UHead>
364 explicit
365 constexpr _Tuple_impl(_UHead&& __head)
366 : _Base(std::forward<_UHead>(__head)) { }
367
368 constexpr _Tuple_impl(const _Tuple_impl&) = default;
369
370 constexpr
371 _Tuple_impl(_Tuple_impl&& __in)
372 noexcept(is_nothrow_move_constructible<_Head>::value)
373 : _Base(std::forward<_Head>(_M_head(__in))) { }
374
375 template<typename _UHead>
376 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
377 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
378
379 template<typename _UHead>
380 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
381 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
382 { }
383
384 template<typename _Alloc>
385 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
386 : _Base(__tag, __use_alloc<_Head>(__a)) { }
387
388 template<typename _Alloc>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 const _Head& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
392
393 template<typename _Alloc, typename _UHead>
394 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
395 _UHead&& __head)
396 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
397 std::forward<_UHead>(__head)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 const _Tuple_impl& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
403
404 template<typename _Alloc>
405 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
406 _Tuple_impl&& __in)
407 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
408 std::forward<_Head>(_M_head(__in))) { }
409
410 template<typename _Alloc, typename _UHead>
411 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
412 const _Tuple_impl<_Idx, _UHead>& __in)
413 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
414 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
415
416 template<typename _Alloc, typename _UHead>
417 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
418 _Tuple_impl<_Idx, _UHead>&& __in)
419 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
420 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
421 { }
422
423 _Tuple_impl&
424 operator=(const _Tuple_impl& __in)
425 {
426 _M_head(*this) = _M_head(__in);
427 return *this;
428 }
429
430 _Tuple_impl&
431 operator=(_Tuple_impl&& __in)
432 noexcept(is_nothrow_move_assignable<_Head>::value)
433 {
434 _M_head(*this) = std::forward<_Head>(_M_head(__in));
435 return *this;
436 }
437
438 template<typename _UHead>
439 _Tuple_impl&
440 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
441 {
442 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
443 return *this;
444 }
445
446 template<typename _UHead>
447 _Tuple_impl&
448 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
449 {
450 _M_head(*this)
451 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
452 return *this;
453 }
454
455 protected:
456 void
457 _M_swap(_Tuple_impl& __in)
458 noexcept(__is_nothrow_swappable<_Head>::value)
459 {
460 using std::swap;
461 swap(_M_head(*this), _M_head(__in));
462 }
463 };
464
465 // Concept utility functions, reused in conditionally-explicit
466 // constructors.
467 template<bool, typename... _Elements>
468 struct _TC
469 {
470 template<typename... _UElements>
471 static constexpr bool _ConstructibleTuple()
472 {
473 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
474 }
475
476 template<typename... _UElements>
477 static constexpr bool _ImplicitlyConvertibleTuple()
478 {
479 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
480 }
481
482 template<typename... _UElements>
483 static constexpr bool _MoveConstructibleTuple()
484 {
485 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
486 }
487
488 template<typename... _UElements>
489 static constexpr bool _ImplicitlyMoveConvertibleTuple()
490 {
491 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
492 }
493
494 template<typename _SrcTuple>
495 static constexpr bool _NonNestedTuple()
496 {
497 return __and_<__not_<is_same<tuple<_Elements...>,
498 typename remove_cv<
499 typename remove_reference<_SrcTuple>::type
500 >::type>>,
501 __not_<is_convertible<_SrcTuple, _Elements...>>,
502 __not_<is_constructible<_Elements..., _SrcTuple>>
503 >::value;
504 }
505 template<typename... _UElements>
506 static constexpr bool _NotSameTuple()
507 {
508 return __not_<is_same<tuple<_Elements...>,
509 typename remove_const<
510 typename remove_reference<_UElements...>::type
511 >::type>>::value;
512 }
513 };
514
515 template<typename... _Elements>
516 struct _TC<false, _Elements...>
517 {
518 template<typename... _UElements>
519 static constexpr bool _ConstructibleTuple()
520 {
521 return false;
522 }
523
524 template<typename... _UElements>
525 static constexpr bool _ImplicitlyConvertibleTuple()
526 {
527 return false;
528 }
529
530 template<typename... _UElements>
531 static constexpr bool _MoveConstructibleTuple()
532 {
533 return false;
534 }
535
536 template<typename... _UElements>
537 static constexpr bool _ImplicitlyMoveConvertibleTuple()
538 {
539 return false;
540 }
541
542 template<typename... _UElements>
543 static constexpr bool _NonNestedTuple()
544 {
545 return true;
546 }
547 template<typename... _UElements>
548 static constexpr bool _NotSameTuple()
549 {
550 return true;
551 }
552 };
553
554 /// Primary class template, tuple
555 template<typename... _Elements>
556 class tuple : public _Tuple_impl<0, _Elements...>
557 {
558 typedef _Tuple_impl<0, _Elements...> _Inherited;
559
560 // Used for constraining the default constructor so
561 // that it becomes dependent on the constraints.
562 template<typename _Dummy>
563 struct _TC2
564 {
565 static constexpr bool _DefaultConstructibleTuple()
566 {
567 return __and_<is_default_constructible<_Elements>...>::value;
568 }
569 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
570 {
571 return __and_<__is_implicitly_default_constructible<_Elements>...>
572 ::value;
573 }
574 };
575
576 public:
577 template<typename _Dummy = void,
578 typename enable_if<_TC2<_Dummy>::
579 _ImplicitlyDefaultConstructibleTuple(),
580 bool>::type = true>
581 constexpr tuple()
582 : _Inherited() { }
583
584 template<typename _Dummy = void,
585 typename enable_if<_TC2<_Dummy>::
586 _DefaultConstructibleTuple()
587 &&
588 !_TC2<_Dummy>::
589 _ImplicitlyDefaultConstructibleTuple(),
590 bool>::type = false>
591 explicit constexpr tuple()
592 : _Inherited() { }
593
594 // Shortcut for the cases where constructors taking _Elements...
595 // need to be constrained.
596 template<typename _Dummy> using _TCC =
597 _TC<is_same<_Dummy, void>::value,
598 _Elements...>;
599
600 template<typename _Dummy = void,
601 typename enable_if<
602 _TCC<_Dummy>::template
603 _ConstructibleTuple<_Elements...>()
604 && _TCC<_Dummy>::template
605 _ImplicitlyConvertibleTuple<_Elements...>()
606 && (sizeof...(_Elements) >= 1),
607 bool>::type=true>
608 constexpr tuple(const _Elements&... __elements)
609 : _Inherited(__elements...) { }
610
611 template<typename _Dummy = void,
612 typename enable_if<
613 _TCC<_Dummy>::template
614 _ConstructibleTuple<_Elements...>()
615 && !_TCC<_Dummy>::template
616 _ImplicitlyConvertibleTuple<_Elements...>()
617 && (sizeof...(_Elements) >= 1),
618 bool>::type=false>
619 explicit constexpr tuple(const _Elements&... __elements)
620 : _Inherited(__elements...) { }
621
622 // Shortcut for the cases where constructors taking _UElements...
623 // need to be constrained.
624 template<typename... _UElements> using _TMC =
625 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
626 && (_TC<(sizeof...(_UElements)==1), _Elements...>::
627 template _NotSameTuple<_UElements...>()),
628 _Elements...>;
629
630 // Shortcut for the cases where constructors taking tuple<_UElements...>
631 // need to be constrained.
632 template<typename... _UElements> using _TMCT =
633 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
634 && !is_same<tuple<_Elements...>,
635 tuple<_UElements...>>::value,
636 _Elements...>;
637
638 template<typename... _UElements, typename
639 enable_if<
640 _TMC<_UElements...>::template
641 _MoveConstructibleTuple<_UElements...>()
642 && _TMC<_UElements...>::template
643 _ImplicitlyMoveConvertibleTuple<_UElements...>()
644 && (sizeof...(_Elements) >= 1),
645 bool>::type=true>
646 constexpr tuple(_UElements&&... __elements)
647 : _Inherited(std::forward<_UElements>(__elements)...) { }
648
649 template<typename... _UElements, typename
650 enable_if<
651 _TMC<_UElements...>::template
652 _MoveConstructibleTuple<_UElements...>()
653 && !_TMC<_UElements...>::template
654 _ImplicitlyMoveConvertibleTuple<_UElements...>()
655 && (sizeof...(_Elements) >= 1),
656 bool>::type=false>
657 explicit constexpr tuple(_UElements&&... __elements)
658 : _Inherited(std::forward<_UElements>(__elements)...) { }
659
660 constexpr tuple(const tuple&) = default;
661
662 constexpr tuple(tuple&&) = default;
663
664 // Shortcut for the cases where constructors taking tuples
665 // must avoid creating temporaries.
666 template<typename _Dummy> using _TNTC =
667 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
668 _Elements...>;
669
670 template<typename... _UElements, typename _Dummy = void, typename
671 enable_if<_TMCT<_UElements...>::template
672 _ConstructibleTuple<_UElements...>()
673 && _TMCT<_UElements...>::template
674 _ImplicitlyConvertibleTuple<_UElements...>()
675 && _TNTC<_Dummy>::template
676 _NonNestedTuple<const tuple<_UElements...>&>(),
677 bool>::type=true>
678 constexpr tuple(const tuple<_UElements...>& __in)
679 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
680 { }
681
682 template<typename... _UElements, typename _Dummy = void, typename
683 enable_if<_TMCT<_UElements...>::template
684 _ConstructibleTuple<_UElements...>()
685 && !_TMCT<_UElements...>::template
686 _ImplicitlyConvertibleTuple<_UElements...>()
687 && _TNTC<_Dummy>::template
688 _NonNestedTuple<const tuple<_UElements...>&>(),
689 bool>::type=false>
690 explicit constexpr tuple(const tuple<_UElements...>& __in)
691 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
692 { }
693
694 template<typename... _UElements, typename _Dummy = void, typename
695 enable_if<_TMCT<_UElements...>::template
696 _MoveConstructibleTuple<_UElements...>()
697 && _TMCT<_UElements...>::template
698 _ImplicitlyMoveConvertibleTuple<_UElements...>()
699 && _TNTC<_Dummy>::template
700 _NonNestedTuple<tuple<_UElements...>&&>(),
701 bool>::type=true>
702 constexpr tuple(tuple<_UElements...>&& __in)
703 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
704
705 template<typename... _UElements, typename _Dummy = void, typename
706 enable_if<_TMCT<_UElements...>::template
707 _MoveConstructibleTuple<_UElements...>()
708 && !_TMCT<_UElements...>::template
709 _ImplicitlyMoveConvertibleTuple<_UElements...>()
710 && _TNTC<_Dummy>::template
711 _NonNestedTuple<tuple<_UElements...>&&>(),
712 bool>::type=false>
713 explicit constexpr tuple(tuple<_UElements...>&& __in)
714 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
715
716 // Allocator-extended constructors.
717
718 template<typename _Alloc>
719 tuple(allocator_arg_t __tag, const _Alloc& __a)
720 : _Inherited(__tag, __a) { }
721
722 template<typename _Alloc, typename _Dummy = void,
723 typename enable_if<
724 _TCC<_Dummy>::template
725 _ConstructibleTuple<_Elements...>()
726 && _TCC<_Dummy>::template
727 _ImplicitlyConvertibleTuple<_Elements...>(),
728 bool>::type=true>
729 tuple(allocator_arg_t __tag, const _Alloc& __a,
730 const _Elements&... __elements)
731 : _Inherited(__tag, __a, __elements...) { }
732
733 template<typename _Alloc, typename _Dummy = void,
734 typename enable_if<
735 _TCC<_Dummy>::template
736 _ConstructibleTuple<_Elements...>()
737 && !_TCC<_Dummy>::template
738 _ImplicitlyConvertibleTuple<_Elements...>(),
739 bool>::type=false>
740 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
741 const _Elements&... __elements)
742 : _Inherited(__tag, __a, __elements...) { }
743
744 template<typename _Alloc, typename... _UElements, typename
745 enable_if<_TMC<_UElements...>::template
746 _MoveConstructibleTuple<_UElements...>()
747 && _TMC<_UElements...>::template
748 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
749 bool>::type=true>
750 tuple(allocator_arg_t __tag, const _Alloc& __a,
751 _UElements&&... __elements)
752 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
753 { }
754
755 template<typename _Alloc, typename... _UElements, typename
756 enable_if<_TMC<_UElements...>::template
757 _MoveConstructibleTuple<_UElements...>()
758 && !_TMC<_UElements...>::template
759 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
760 bool>::type=false>
761 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
762 _UElements&&... __elements)
763 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
764 { }
765
766 template<typename _Alloc>
767 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
768 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
769
770 template<typename _Alloc>
771 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
772 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
773
774 template<typename _Alloc, typename _Dummy = void,
775 typename... _UElements, typename
776 enable_if<_TMCT<_UElements...>::template
777 _ConstructibleTuple<_UElements...>()
778 && _TMCT<_UElements...>::template
779 _ImplicitlyConvertibleTuple<_UElements...>()
780 && _TNTC<_Dummy>::template
781 _NonNestedTuple<tuple<_UElements...>&&>(),
782 bool>::type=true>
783 tuple(allocator_arg_t __tag, const _Alloc& __a,
784 const tuple<_UElements...>& __in)
785 : _Inherited(__tag, __a,
786 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
787 { }
788
789 template<typename _Alloc, typename _Dummy = void,
790 typename... _UElements, typename
791 enable_if<_TMCT<_UElements...>::template
792 _ConstructibleTuple<_UElements...>()
793 && !_TMCT<_UElements...>::template
794 _ImplicitlyConvertibleTuple<_UElements...>()
795 && _TNTC<_Dummy>::template
796 _NonNestedTuple<tuple<_UElements...>&&>(),
797 bool>::type=false>
798 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
799 const tuple<_UElements...>& __in)
800 : _Inherited(__tag, __a,
801 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
802 { }
803
804 template<typename _Alloc, typename _Dummy = void,
805 typename... _UElements, typename
806 enable_if<_TMCT<_UElements...>::template
807 _MoveConstructibleTuple<_UElements...>()
808 && _TMCT<_UElements...>::template
809 _ImplicitlyMoveConvertibleTuple<_UElements...>()
810 && _TNTC<_Dummy>::template
811 _NonNestedTuple<tuple<_UElements...>&&>(),
812 bool>::type=true>
813 tuple(allocator_arg_t __tag, const _Alloc& __a,
814 tuple<_UElements...>&& __in)
815 : _Inherited(__tag, __a,
816 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
817 { }
818
819 template<typename _Alloc, typename _Dummy = void,
820 typename... _UElements, typename
821 enable_if<_TMCT<_UElements...>::template
822 _MoveConstructibleTuple<_UElements...>()
823 && !_TMCT<_UElements...>::template
824 _ImplicitlyMoveConvertibleTuple<_UElements...>()
825 && _TNTC<_Dummy>::template
826 _NonNestedTuple<tuple<_UElements...>&&>(),
827 bool>::type=false>
828 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
829 tuple<_UElements...>&& __in)
830 : _Inherited(__tag, __a,
831 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
832 { }
833
834 tuple&
835 operator=(const tuple& __in)
836 {
837 static_cast<_Inherited&>(*this) = __in;
838 return *this;
839 }
840
841 tuple&
842 operator=(tuple&& __in)
843 noexcept(is_nothrow_move_assignable<_Inherited>::value)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 template<typename... _UElements>
850 typename
851 enable_if<sizeof...(_UElements)
852 == sizeof...(_Elements), tuple&>::type
853 operator=(const tuple<_UElements...>& __in)
854 {
855 static_cast<_Inherited&>(*this) = __in;
856 return *this;
857 }
858
859 template<typename... _UElements>
860 typename
861 enable_if<sizeof...(_UElements)
862 == sizeof...(_Elements), tuple&>::type
863 operator=(tuple<_UElements...>&& __in)
864 {
865 static_cast<_Inherited&>(*this) = std::move(__in);
866 return *this;
867 }
868
869 void
870 swap(tuple& __in)
871 noexcept(noexcept(__in._M_swap(__in)))
872 { _Inherited::_M_swap(__in); }
873 };
874
875#if __cpp_deduction_guides >= 201606
876 template<typename... _UTypes>
877 tuple(_UTypes...) -> tuple<_UTypes...>;
878 template<typename _T1, typename _T2>
879 tuple(pair<_T1, _T2>) -> tuple<_T1, _T2>;
880 template<typename _Alloc, typename... _UTypes>
881 tuple(allocator_arg_t, _Alloc, _UTypes...) -> tuple<_UTypes...>;
882 template<typename _Alloc, typename _T1, typename _T2>
883 tuple(allocator_arg_t, _Alloc, pair<_T1, _T2>) -> tuple<_T1, _T2>;
884 template<typename _Alloc, typename... _UTypes>
885 tuple(allocator_arg_t, _Alloc, tuple<_UTypes...>) -> tuple<_UTypes...>;
886#endif
887
888 // Explicit specialization, zero-element tuple.
889 template<>
890 class tuple<>
891 {
892 public:
893 void swap(tuple&) noexcept { /* no-op */ }
894 // We need the default since we're going to define no-op
895 // allocator constructors.
896 tuple() = default;
897 // No-op allocator constructors.
898 template<typename _Alloc>
899 tuple(allocator_arg_t, const _Alloc&) { }
900 template<typename _Alloc>
901 tuple(allocator_arg_t, const _Alloc&, const tuple&) { }
902 };
903
904 /// Partial specialization, 2-element tuple.
905 /// Includes construction and assignment from a pair.
906 template<typename _T1, typename _T2>
907 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
908 {
909 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
910
911 public:
912 template <typename _U1 = _T1,
913 typename _U2 = _T2,
914 typename enable_if<__and_<
915 __is_implicitly_default_constructible<_U1>,
916 __is_implicitly_default_constructible<_U2>>
917 ::value, bool>::type = true>
918
919 constexpr tuple()
920 : _Inherited() { }
921
922 template <typename _U1 = _T1,
923 typename _U2 = _T2,
924 typename enable_if<
925 __and_<
926 is_default_constructible<_U1>,
927 is_default_constructible<_U2>,
928 __not_<
929 __and_<__is_implicitly_default_constructible<_U1>,
930 __is_implicitly_default_constructible<_U2>>>>
931 ::value, bool>::type = false>
932
933 explicit constexpr tuple()
934 : _Inherited() { }
935
936 // Shortcut for the cases where constructors taking _T1, _T2
937 // need to be constrained.
938 template<typename _Dummy> using _TCC =
939 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
940
941 template<typename _Dummy = void, typename
942 enable_if<_TCC<_Dummy>::template
943 _ConstructibleTuple<_T1, _T2>()
944 && _TCC<_Dummy>::template
945 _ImplicitlyConvertibleTuple<_T1, _T2>(),
946 bool>::type = true>
947 constexpr tuple(const _T1& __a1, const _T2& __a2)
948 : _Inherited(__a1, __a2) { }
949
950 template<typename _Dummy = void, typename
951 enable_if<_TCC<_Dummy>::template
952 _ConstructibleTuple<_T1, _T2>()
953 && !_TCC<_Dummy>::template
954 _ImplicitlyConvertibleTuple<_T1, _T2>(),
955 bool>::type = false>
956 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
957 : _Inherited(__a1, __a2) { }
958
959 // Shortcut for the cases where constructors taking _U1, _U2
960 // need to be constrained.
961 using _TMC = _TC<true, _T1, _T2>;
962
963 template<typename _U1, typename _U2, typename
964 enable_if<_TMC::template
965 _MoveConstructibleTuple<_U1, _U2>()
966 && _TMC::template
967 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
968 && !is_same<typename decay<_U1>::type,
969 allocator_arg_t>::value,
970 bool>::type = true>
971 constexpr tuple(_U1&& __a1, _U2&& __a2)
972 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
973
974 template<typename _U1, typename _U2, typename
975 enable_if<_TMC::template
976 _MoveConstructibleTuple<_U1, _U2>()
977 && !_TMC::template
978 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
979 && !is_same<typename decay<_U1>::type,
980 allocator_arg_t>::value,
981 bool>::type = false>
982 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
983 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
984
985 constexpr tuple(const tuple&) = default;
986
987 constexpr tuple(tuple&&) = default;
988
989 template<typename _U1, typename _U2, typename
990 enable_if<_TMC::template
991 _ConstructibleTuple<_U1, _U2>()
992 && _TMC::template
993 _ImplicitlyConvertibleTuple<_U1, _U2>(),
994 bool>::type = true>
995 constexpr tuple(const tuple<_U1, _U2>& __in)
996 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
997
998 template<typename _U1, typename _U2, typename
999 enable_if<_TMC::template
1000 _ConstructibleTuple<_U1, _U2>()
1001 && !_TMC::template
1002 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1003 bool>::type = false>
1004 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
1005 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
1006
1007 template<typename _U1, typename _U2, typename
1008 enable_if<_TMC::template
1009 _MoveConstructibleTuple<_U1, _U2>()
1010 && _TMC::template
1011 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1012 bool>::type = true>
1013 constexpr tuple(tuple<_U1, _U2>&& __in)
1014 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1015
1016 template<typename _U1, typename _U2, typename
1017 enable_if<_TMC::template
1018 _MoveConstructibleTuple<_U1, _U2>()
1019 && !_TMC::template
1020 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1021 bool>::type = false>
1022 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
1023 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1024
1025 template<typename _U1, typename _U2, typename
1026 enable_if<_TMC::template
1027 _ConstructibleTuple<_U1, _U2>()
1028 && _TMC::template
1029 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1030 bool>::type = true>
1031 constexpr tuple(const pair<_U1, _U2>& __in)
1032 : _Inherited(__in.first, __in.second) { }
1033
1034 template<typename _U1, typename _U2, typename
1035 enable_if<_TMC::template
1036 _ConstructibleTuple<_U1, _U2>()
1037 && !_TMC::template
1038 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1039 bool>::type = false>
1040 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1041 : _Inherited(__in.first, __in.second) { }
1042
1043 template<typename _U1, typename _U2, typename
1044 enable_if<_TMC::template
1045 _MoveConstructibleTuple<_U1, _U2>()
1046 && _TMC::template
1047 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1048 bool>::type = true>
1049 constexpr tuple(pair<_U1, _U2>&& __in)
1050 : _Inherited(std::forward<_U1>(__in.first),
1051 std::forward<_U2>(__in.second)) { }
1052
1053 template<typename _U1, typename _U2, typename
1054 enable_if<_TMC::template
1055 _MoveConstructibleTuple<_U1, _U2>()
1056 && !_TMC::template
1057 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1058 bool>::type = false>
1059 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1060 : _Inherited(std::forward<_U1>(__in.first),
1061 std::forward<_U2>(__in.second)) { }
1062
1063 // Allocator-extended constructors.
1064
1065 template<typename _Alloc>
1066 tuple(allocator_arg_t __tag, const _Alloc& __a)
1067 : _Inherited(__tag, __a) { }
1068
1069 template<typename _Alloc, typename _Dummy = void,
1070 typename enable_if<
1071 _TCC<_Dummy>::template
1072 _ConstructibleTuple<_T1, _T2>()
1073 && _TCC<_Dummy>::template
1074 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1075 bool>::type=true>
1076
1077 tuple(allocator_arg_t __tag, const _Alloc& __a,
1078 const _T1& __a1, const _T2& __a2)
1079 : _Inherited(__tag, __a, __a1, __a2) { }
1080
1081 template<typename _Alloc, typename _Dummy = void,
1082 typename enable_if<
1083 _TCC<_Dummy>::template
1084 _ConstructibleTuple<_T1, _T2>()
1085 && !_TCC<_Dummy>::template
1086 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1087 bool>::type=false>
1088
1089 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1090 const _T1& __a1, const _T2& __a2)
1091 : _Inherited(__tag, __a, __a1, __a2) { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _MoveConstructibleTuple<_U1, _U2>()
1096 && _TMC::template
1097 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1098 bool>::type = true>
1099 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1100 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1101 std::forward<_U2>(__a2)) { }
1102
1103 template<typename _Alloc, typename _U1, typename _U2, typename
1104 enable_if<_TMC::template
1105 _MoveConstructibleTuple<_U1, _U2>()
1106 && !_TMC::template
1107 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1108 bool>::type = false>
1109 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1110 _U1&& __a1, _U2&& __a2)
1111 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1112 std::forward<_U2>(__a2)) { }
1113
1114 template<typename _Alloc>
1115 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1116 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1117
1118 template<typename _Alloc>
1119 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1120 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1121
1122 template<typename _Alloc, typename _U1, typename _U2, typename
1123 enable_if<_TMC::template
1124 _ConstructibleTuple<_U1, _U2>()
1125 && _TMC::template
1126 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1127 bool>::type = true>
1128 tuple(allocator_arg_t __tag, const _Alloc& __a,
1129 const tuple<_U1, _U2>& __in)
1130 : _Inherited(__tag, __a,
1131 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1132 { }
1133
1134 template<typename _Alloc, typename _U1, typename _U2, typename
1135 enable_if<_TMC::template
1136 _ConstructibleTuple<_U1, _U2>()
1137 && !_TMC::template
1138 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1139 bool>::type = false>
1140 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1141 const tuple<_U1, _U2>& __in)
1142 : _Inherited(__tag, __a,
1143 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1144 { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1154 { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 tuple<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1165 { }
1166
1167 template<typename _Alloc, typename _U1, typename _U2, typename
1168 enable_if<_TMC::template
1169 _ConstructibleTuple<_U1, _U2>()
1170 && _TMC::template
1171 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1172 bool>::type = true>
1173 tuple(allocator_arg_t __tag, const _Alloc& __a,
1174 const pair<_U1, _U2>& __in)
1175 : _Inherited(__tag, __a, __in.first, __in.second) { }
1176
1177 template<typename _Alloc, typename _U1, typename _U2, typename
1178 enable_if<_TMC::template
1179 _ConstructibleTuple<_U1, _U2>()
1180 && !_TMC::template
1181 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1182 bool>::type = false>
1183 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1184 const pair<_U1, _U2>& __in)
1185 : _Inherited(__tag, __a, __in.first, __in.second) { }
1186
1187 template<typename _Alloc, typename _U1, typename _U2, typename
1188 enable_if<_TMC::template
1189 _MoveConstructibleTuple<_U1, _U2>()
1190 && _TMC::template
1191 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1192 bool>::type = true>
1193 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1194 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1195 std::forward<_U2>(__in.second)) { }
1196
1197 template<typename _Alloc, typename _U1, typename _U2, typename
1198 enable_if<_TMC::template
1199 _MoveConstructibleTuple<_U1, _U2>()
1200 && !_TMC::template
1201 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1202 bool>::type = false>
1203 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1204 pair<_U1, _U2>&& __in)
1205 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1206 std::forward<_U2>(__in.second)) { }
1207
1208 tuple&
1209 operator=(const tuple& __in)
1210 {
1211 static_cast<_Inherited&>(*this) = __in;
1212 return *this;
1213 }
1214
1215 tuple&
1216 operator=(tuple&& __in)
1217 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1218 {
1219 static_cast<_Inherited&>(*this) = std::move(__in);
1220 return *this;
1221 }
1222
1223 template<typename _U1, typename _U2>
1224 tuple&
1225 operator=(const tuple<_U1, _U2>& __in)
1226 {
1227 static_cast<_Inherited&>(*this) = __in;
1228 return *this;
1229 }
1230
1231 template<typename _U1, typename _U2>
1232 tuple&
1233 operator=(tuple<_U1, _U2>&& __in)
1234 {
1235 static_cast<_Inherited&>(*this) = std::move(__in);
1236 return *this;
1237 }
1238
1239 template<typename _U1, typename _U2>
1240 tuple&
1241 operator=(const pair<_U1, _U2>& __in)
1242 {
1243 this->_M_head(*this) = __in.first;
1244 this->_M_tail(*this)._M_head(*this) = __in.second;
1245 return *this;
1246 }
1247
1248 template<typename _U1, typename _U2>
1249 tuple&
1250 operator=(pair<_U1, _U2>&& __in)
1251 {
1252 this->_M_head(*this) = std::forward<_U1>(__in.first);
1253 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1254 return *this;
1255 }
1256
1257 void
1258 swap(tuple& __in)
1259 noexcept(noexcept(__in._M_swap(__in)))
1260 { _Inherited::_M_swap(__in); }
1261 };
1262
1263
1264 /// class tuple_size
1265 template<typename... _Elements>
1266 struct tuple_size<tuple<_Elements...>>
1267 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1268
1269#if __cplusplus201103L > 201402L
1270 template <typename _Tp>
1271 inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
1272#endif
1273
1274 /**
1275 * Recursive case for tuple_element: strip off the first element in
1276 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1277 */
1278 template<std::size_t __i, typename _Head, typename... _Tail>
1279 struct tuple_element<__i, tuple<_Head, _Tail...> >
1280 : tuple_element<__i - 1, tuple<_Tail...> > { };
1281
1282 /**
1283 * Basis case for tuple_element: The first element is the one we're seeking.
1284 */
1285 template<typename _Head, typename... _Tail>
1286 struct tuple_element<0, tuple<_Head, _Tail...> >
1287 {
1288 typedef _Head type;
1289 };
1290
1291 /**
1292 * Error case for tuple_element: invalid index.
1293 */
1294 template<size_t __i>
1295 struct tuple_element<__i, tuple<>>
1296 {
1297 static_assert(__i < tuple_size<tuple<>>::value,
1298 "tuple index is in range");
1299 };
1300
1301 template<std::size_t __i, typename _Head, typename... _Tail>
1302 constexpr _Head&
1303 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1304 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1305
1306 template<std::size_t __i, typename _Head, typename... _Tail>
1307 constexpr const _Head&
1308 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1309 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
15
Calling '_Tuple_impl::_M_head'
18
Returning from '_Tuple_impl::_M_head'
1310
1311 /// Return a reference to the ith element of a tuple.
1312 template<std::size_t __i, typename... _Elements>
1313 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1314 get(tuple<_Elements...>& __t) noexcept
1315 { return std::__get_helper<__i>(__t); }
1316
1317 /// Return a const reference to the ith element of a const tuple.
1318 template<std::size_t __i, typename... _Elements>
1319 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1320 get(const tuple<_Elements...>& __t) noexcept
1321 { return std::__get_helper<__i>(__t); }
14
Calling '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
19
Returning from '__get_helper<0, llvm::DomTreeNodeBase<llvm::MachineBasicBlock> *, std::default_delete<llvm::DomTreeNodeBase<llvm::MachineBasicBlock> >>'
1322
1323 /// Return an rvalue reference to the ith element of a tuple rvalue.
1324 template<std::size_t __i, typename... _Elements>
1325 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1326 get(tuple<_Elements...>&& __t) noexcept
1327 {
1328 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1329 return std::forward<__element_type&&>(std::get<__i>(__t));
1330 }
1331
1332 /// Return a const rvalue reference to the ith element of a const tuple rvalue.
1333 template<std::size_t __i, typename... _Elements>
1334 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&&
1335 get(const tuple<_Elements...>&& __t) noexcept
1336 {
1337 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1338 return std::forward<const __element_type&&>(std::get<__i>(__t));
1339 }
1340
1341#if __cplusplus201103L > 201103L
1342
1343#define __cpp_lib_tuples_by_type 201304
1344
1345 template<typename _Head, size_t __i, typename... _Tail>
1346 constexpr _Head&
1347 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1348 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1349
1350 template<typename _Head, size_t __i, typename... _Tail>
1351 constexpr const _Head&
1352 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1353 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1354
1355 /// Return a reference to the unique element of type _Tp of a tuple.
1356 template <typename _Tp, typename... _Types>
1357 constexpr _Tp&
1358 get(tuple<_Types...>& __t) noexcept
1359 { return std::__get_helper2<_Tp>(__t); }
1360
1361 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1362 template <typename _Tp, typename... _Types>
1363 constexpr _Tp&&
1364 get(tuple<_Types...>&& __t) noexcept
1365 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1366
1367 /// Return a const reference to the unique element of type _Tp of a tuple.
1368 template <typename _Tp, typename... _Types>
1369 constexpr const _Tp&
1370 get(const tuple<_Types...>& __t) noexcept
1371 { return std::__get_helper2<_Tp>(__t); }
1372
1373 /// Return a const reference to the unique element of type _Tp of
1374 /// a const tuple rvalue.
1375 template <typename _Tp, typename... _Types>
1376 constexpr const _Tp&&
1377 get(const tuple<_Types...>&& __t) noexcept
1378 { return std::forward<const _Tp&&>(std::__get_helper2<_Tp>(__t)); }
1379#endif
1380
1381 // This class performs the comparison operations on tuples
1382 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1383 struct __tuple_compare
1384 {
1385 static constexpr bool
1386 __eq(const _Tp& __t, const _Up& __u)
1387 {
1388 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1389 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1390 }
1391
1392 static constexpr bool
1393 __less(const _Tp& __t, const _Up& __u)
1394 {
1395 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1396 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1397 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1398 }
1399 };
1400
1401 template<typename _Tp, typename _Up, size_t __size>
1402 struct __tuple_compare<_Tp, _Up, __size, __size>
1403 {
1404 static constexpr bool
1405 __eq(const _Tp&, const _Up&) { return true; }
1406
1407 static constexpr bool
1408 __less(const _Tp&, const _Up&) { return false; }
1409 };
1410
1411 template<typename... _TElements, typename... _UElements>
1412 constexpr bool
1413 operator==(const tuple<_TElements...>& __t,
1414 const tuple<_UElements...>& __u)
1415 {
1416 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1417 "tuple objects can only be compared if they have equal sizes.");
1418 using __compare = __tuple_compare<tuple<_TElements...>,
1419 tuple<_UElements...>,
1420 0, sizeof...(_TElements)>;
1421 return __compare::__eq(__t, __u);
1422 }
1423
1424 template<typename... _TElements, typename... _UElements>
1425 constexpr bool
1426 operator<(const tuple<_TElements...>& __t,
1427 const tuple<_UElements...>& __u)
1428 {
1429 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1430 "tuple objects can only be compared if they have equal sizes.");
1431 using __compare = __tuple_compare<tuple<_TElements...>,
1432 tuple<_UElements...>,
1433 0, sizeof...(_TElements)>;
1434 return __compare::__less(__t, __u);
1435 }
1436
1437 template<typename... _TElements, typename... _UElements>
1438 constexpr bool
1439 operator!=(const tuple<_TElements...>& __t,
1440 const tuple<_UElements...>& __u)
1441 { return !(__t == __u); }
1442
1443 template<typename... _TElements, typename... _UElements>
1444 constexpr bool
1445 operator>(const tuple<_TElements...>& __t,
1446 const tuple<_UElements...>& __u)
1447 { return __u < __t; }
1448
1449 template<typename... _TElements, typename... _UElements>
1450 constexpr bool
1451 operator<=(const tuple<_TElements...>& __t,
1452 const tuple<_UElements...>& __u)
1453 { return !(__u < __t); }
1454
1455 template<typename... _TElements, typename... _UElements>
1456 constexpr bool
1457 operator>=(const tuple<_TElements...>& __t,
1458 const tuple<_UElements...>& __u)
1459 { return !(__t < __u); }
1460
1461 // NB: DR 705.
1462 template<typename... _Elements>
1463 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1464 make_tuple(_Elements&&... __args)
1465 {
1466 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1467 __result_type;
1468 return __result_type(std::forward<_Elements>(__args)...);
1469 }
1470
1471 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1472 // 2275. Why is forward_as_tuple not constexpr?
1473 template<typename... _Elements>
1474 constexpr tuple<_Elements&&...>
1475 forward_as_tuple(_Elements&&... __args) noexcept
1476 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1477
1478 template<size_t, typename, typename, size_t>
1479 struct __make_tuple_impl;
1480
1481 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1482 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1483 : __make_tuple_impl<_Idx + 1,
1484 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1485 _Tuple, _Nm>
1486 { };
1487
1488 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1489 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1490 {
1491 typedef tuple<_Tp...> __type;
1492 };
1493
1494 template<typename _Tuple>
1495 struct __do_make_tuple
1496 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1497 { };
1498
1499 // Returns the std::tuple equivalent of a tuple-like type.
1500 template<typename _Tuple>
1501 struct __make_tuple
1502 : public __do_make_tuple<typename std::remove_cv
1503 <typename std::remove_reference<_Tuple>::type>::type>
1504 { };
1505
1506 // Combines several std::tuple's into a single one.
1507 template<typename...>
1508 struct __combine_tuples;
1509
1510 template<>
1511 struct __combine_tuples<>
1512 {
1513 typedef tuple<> __type;
1514 };
1515
1516 template<typename... _Ts>
1517 struct __combine_tuples<tuple<_Ts...>>
1518 {
1519 typedef tuple<_Ts...> __type;
1520 };
1521
1522 template<typename... _T1s, typename... _T2s, typename... _Rem>
1523 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1524 {
1525 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1526 _Rem...>::__type __type;
1527 };
1528
1529 // Computes the result type of tuple_cat given a set of tuple-like types.
1530 template<typename... _Tpls>
1531 struct __tuple_cat_result
1532 {
1533 typedef typename __combine_tuples
1534 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1535 };
1536
1537 // Helper to determine the index set for the first tuple-like
1538 // type of a given set.
1539 template<typename...>
1540 struct __make_1st_indices;
1541
1542 template<>
1543 struct __make_1st_indices<>
1544 {
1545 typedef std::_Index_tuple<> __type;
1546 };
1547
1548 template<typename _Tp, typename... _Tpls>
1549 struct __make_1st_indices<_Tp, _Tpls...>
1550 {
1551 typedef typename std::_Build_index_tuple<std::tuple_size<
1552 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1553 };
1554
1555 // Performs the actual concatenation by step-wise expanding tuple-like
1556 // objects into the elements, which are finally forwarded into the
1557 // result tuple.
1558 template<typename _Ret, typename _Indices, typename... _Tpls>
1559 struct __tuple_concater;
1560
1561 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1562 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1563 {
1564 template<typename... _Us>
1565 static constexpr _Ret
1566 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1567 {
1568 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1569 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1570 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1571 std::forward<_Us>(__us)...,
1572 std::get<_Is>(std::forward<_Tp>(__tp))...);
1573 }
1574 };
1575
1576 template<typename _Ret>
1577 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1578 {
1579 template<typename... _Us>
1580 static constexpr _Ret
1581 _S_do(_Us&&... __us)
1582 {
1583 return _Ret(std::forward<_Us>(__us)...);
1584 }
1585 };
1586
1587 /// tuple_cat
1588 template<typename... _Tpls, typename = typename
1589 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1590 constexpr auto
1591 tuple_cat(_Tpls&&... __tpls)
1592 -> typename __tuple_cat_result<_Tpls...>::__type
1593 {
1594 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1595 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1596 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1597 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1598 }
1599
1600 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1601 // 2301. Why is tie not constexpr?
1602 /// tie
1603 template<typename... _Elements>
1604 constexpr tuple<_Elements&...>
1605 tie(_Elements&... __args) noexcept
1606 { return tuple<_Elements&...>(__args...); }
1607
1608 /// swap
1609 template<typename... _Elements>
1610 inline
1611#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1612 // Constrained free swap overload, see p0185r1
1613 typename enable_if<__and_<__is_swappable<_Elements>...>::value
1614 >::type
1615#else
1616 void
1617#endif
1618 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1619 noexcept(noexcept(__x.swap(__y)))
1620 { __x.swap(__y); }
1621
1622#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1623 template<typename... _Elements>
1624 typename enable_if<!__and_<__is_swappable<_Elements>...>::value>::type
1625 swap(tuple<_Elements...>&, tuple<_Elements...>&) = delete;
1626#endif
1627
1628 // A class (and instance) which can be used in 'tie' when an element
1629 // of a tuple is not required.
1630 // _GLIBCXX14_CONSTEXPR
1631 // 2933. PR for LWG 2773 could be clearer
1632 struct _Swallow_assign
1633 {
1634 template<class _Tp>
1635 _GLIBCXX14_CONSTEXPR const _Swallow_assign&
1636 operator=(const _Tp&) const
1637 { return *this; }
1638 };
1639
1640 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1641 // 2773. Making std::ignore constexpr
1642 _GLIBCXX17_INLINE constexpr _Swallow_assign ignore{};
1643
1644 /// Partial specialization for tuples
1645 template<typename... _Types, typename _Alloc>
1646 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1647
1648 // See stl_pair.h...
1649 template<class _T1, class _T2>
1650 template<typename... _Args1, typename... _Args2>
1651 inline
1652 pair<_T1, _T2>::
1653 pair(piecewise_construct_t,
1654 tuple<_Args1...> __first, tuple<_Args2...> __second)
1655 : pair(__first, __second,
1656 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1657 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1658 { }
1659
1660 template<class _T1, class _T2>
1661 template<typename... _Args1, std::size_t... _Indexes1,
1662 typename... _Args2, std::size_t... _Indexes2>
1663 inline
1664 pair<_T1, _T2>::
1665 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1666 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1667 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1668 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1669 { }
1670
1671#if __cplusplus201103L > 201402L
1672# define __cpp_lib_apply 201603
1673
1674 template <typename _Fn, typename _Tuple, size_t... _Idx>
1675 constexpr decltype(auto)
1676 __apply_impl(_Fn&& __f, _Tuple&& __t, index_sequence<_Idx...>)
1677 {
1678 return std::__invoke(std::forward<_Fn>(__f),
1679 std::get<_Idx>(std::forward<_Tuple>(__t))...);
1680 }
1681
1682 template <typename _Fn, typename _Tuple>
1683 constexpr decltype(auto)
1684 apply(_Fn&& __f, _Tuple&& __t)
1685 {
1686 using _Indices = make_index_sequence<tuple_size_v<decay_t<_Tuple>>>;
1687 return std::__apply_impl(std::forward<_Fn>(__f),
1688 std::forward<_Tuple>(__t),
1689 _Indices{});
1690 }
1691
1692#define __cpp_lib_make_from_tuple 201606
1693
1694 template <typename _Tp, typename _Tuple, size_t... _Idx>
1695 constexpr _Tp
1696 __make_from_tuple_impl(_Tuple&& __t, index_sequence<_Idx...>)
1697 { return _Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...); }
1698
1699 template <typename _Tp, typename _Tuple>
1700 constexpr _Tp
1701 make_from_tuple(_Tuple&& __t)
1702 {
1703 return __make_from_tuple_impl<_Tp>(
1704 std::forward<_Tuple>(__t),
1705 make_index_sequence<tuple_size_v<decay_t<_Tuple>>>{});
1706 }
1707#endif // C++17
1708
1709 /// @}
1710
1711_GLIBCXX_END_NAMESPACE_VERSION
1712} // namespace std
1713
1714#endif // C++11
1715
1716#endif // _GLIBCXX_TUPLE