Bug Summary

File:include/llvm/CodeGen/RegAllocPBQP.h
Warning:line 72, column 7
Use of zero-allocated memory

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RegAllocPBQP.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp

1//===- RegAllocPBQP.cpp ---- PBQP Register Allocator ----------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a Partitioned Boolean Quadratic Programming (PBQP) based
11// register allocator for LLVM. This allocator works by constructing a PBQP
12// problem representing the register allocation problem under consideration,
13// solving this using a PBQP solver, and mapping the solution back to a
14// register assignment. If any variables are selected for spilling then spill
15// code is inserted and the process repeated.
16//
17// The PBQP solver (pbqp.c) provided for this allocator uses a heuristic tuned
18// for register allocation. For more information on PBQP for register
19// allocation, see the following papers:
20//
21// (1) Hames, L. and Scholz, B. 2006. Nearly optimal register allocation with
22// PBQP. In Proceedings of the 7th Joint Modular Languages Conference
23// (JMLC'06). LNCS, vol. 4228. Springer, New York, NY, USA. 346-361.
24//
25// (2) Scholz, B., Eckstein, E. 2002. Register allocation for irregular
26// architectures. In Proceedings of the Joint Conference on Languages,
27// Compilers and Tools for Embedded Systems (LCTES'02), ACM Press, New York,
28// NY, USA, 139-148.
29//
30//===----------------------------------------------------------------------===//
31
32#include "llvm/CodeGen/RegAllocPBQP.h"
33#include "RegisterCoalescer.h"
34#include "Spiller.h"
35#include "llvm/ADT/ArrayRef.h"
36#include "llvm/ADT/BitVector.h"
37#include "llvm/ADT/DenseMap.h"
38#include "llvm/ADT/DenseSet.h"
39#include "llvm/ADT/STLExtras.h"
40#include "llvm/ADT/SmallPtrSet.h"
41#include "llvm/ADT/SmallVector.h"
42#include "llvm/ADT/StringRef.h"
43#include "llvm/Analysis/AliasAnalysis.h"
44#include "llvm/CodeGen/CalcSpillWeights.h"
45#include "llvm/CodeGen/LiveInterval.h"
46#include "llvm/CodeGen/LiveIntervals.h"
47#include "llvm/CodeGen/LiveRangeEdit.h"
48#include "llvm/CodeGen/LiveStacks.h"
49#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
50#include "llvm/CodeGen/MachineDominators.h"
51#include "llvm/CodeGen/MachineFunction.h"
52#include "llvm/CodeGen/MachineFunctionPass.h"
53#include "llvm/CodeGen/MachineInstr.h"
54#include "llvm/CodeGen/MachineLoopInfo.h"
55#include "llvm/CodeGen/MachineRegisterInfo.h"
56#include "llvm/CodeGen/PBQP/Graph.h"
57#include "llvm/CodeGen/PBQP/Math.h"
58#include "llvm/CodeGen/PBQP/Solution.h"
59#include "llvm/CodeGen/PBQPRAConstraint.h"
60#include "llvm/CodeGen/RegAllocRegistry.h"
61#include "llvm/CodeGen/SlotIndexes.h"
62#include "llvm/CodeGen/TargetRegisterInfo.h"
63#include "llvm/CodeGen/TargetSubtargetInfo.h"
64#include "llvm/CodeGen/VirtRegMap.h"
65#include "llvm/Config/llvm-config.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/Module.h"
68#include "llvm/MC/MCRegisterInfo.h"
69#include "llvm/Pass.h"
70#include "llvm/Support/CommandLine.h"
71#include "llvm/Support/Compiler.h"
72#include "llvm/Support/Debug.h"
73#include "llvm/Support/FileSystem.h"
74#include "llvm/Support/Printable.h"
75#include "llvm/Support/raw_ostream.h"
76#include <algorithm>
77#include <cassert>
78#include <cstddef>
79#include <limits>
80#include <map>
81#include <memory>
82#include <queue>
83#include <set>
84#include <sstream>
85#include <string>
86#include <system_error>
87#include <tuple>
88#include <utility>
89#include <vector>
90
91using namespace llvm;
92
93#define DEBUG_TYPE"regalloc" "regalloc"
94
95static RegisterRegAlloc
96RegisterPBQPRepAlloc("pbqp", "PBQP register allocator",
97 createDefaultPBQPRegisterAllocator);
98
99static cl::opt<bool>
100PBQPCoalescing("pbqp-coalescing",
101 cl::desc("Attempt coalescing during PBQP register allocation."),
102 cl::init(false), cl::Hidden);
103
104#ifndef NDEBUG
105static cl::opt<bool>
106PBQPDumpGraphs("pbqp-dump-graphs",
107 cl::desc("Dump graphs for each function/round in the compilation unit."),
108 cl::init(false), cl::Hidden);
109#endif
110
111namespace {
112
113///
114/// PBQP based allocators solve the register allocation problem by mapping
115/// register allocation problems to Partitioned Boolean Quadratic
116/// Programming problems.
117class RegAllocPBQP : public MachineFunctionPass {
118public:
119 static char ID;
120
121 /// Construct a PBQP register allocator.
122 RegAllocPBQP(char *cPassID = nullptr)
123 : MachineFunctionPass(ID), customPassID(cPassID) {
124 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
125 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
126 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
127 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
128 }
129
130 /// Return the pass name.
131 StringRef getPassName() const override { return "PBQP Register Allocator"; }
132
133 /// PBQP analysis usage.
134 void getAnalysisUsage(AnalysisUsage &au) const override;
135
136 /// Perform register allocation
137 bool runOnMachineFunction(MachineFunction &MF) override;
138
139 MachineFunctionProperties getRequiredProperties() const override {
140 return MachineFunctionProperties().set(
141 MachineFunctionProperties::Property::NoPHIs);
142 }
143
144private:
145 using LI2NodeMap = std::map<const LiveInterval *, unsigned>;
146 using Node2LIMap = std::vector<const LiveInterval *>;
147 using AllowedSet = std::vector<unsigned>;
148 using AllowedSetMap = std::vector<AllowedSet>;
149 using RegPair = std::pair<unsigned, unsigned>;
150 using CoalesceMap = std::map<RegPair, PBQP::PBQPNum>;
151 using RegSet = std::set<unsigned>;
152
153 char *customPassID;
154
155 RegSet VRegsToAlloc, EmptyIntervalVRegs;
156
157 /// Inst which is a def of an original reg and whose defs are already all
158 /// dead after remat is saved in DeadRemats. The deletion of such inst is
159 /// postponed till all the allocations are done, so its remat expr is
160 /// always available for the remat of all the siblings of the original reg.
161 SmallPtrSet<MachineInstr *, 32> DeadRemats;
162
163 /// Finds the initial set of vreg intervals to allocate.
164 void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);
165
166 /// Constructs an initial graph.
167 void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
168
169 /// Spill the given VReg.
170 void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals,
171 MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
172 Spiller &VRegSpiller);
173
174 /// Given a solved PBQP problem maps this solution back to a register
175 /// assignment.
176 bool mapPBQPToRegAlloc(const PBQPRAGraph &G,
177 const PBQP::Solution &Solution,
178 VirtRegMap &VRM,
179 Spiller &VRegSpiller);
180
181 /// Postprocessing before final spilling. Sets basic block "live in"
182 /// variables.
183 void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,
184 VirtRegMap &VRM) const;
185
186 void postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS);
187};
188
189char RegAllocPBQP::ID = 0;
190
191/// Set spill costs for each node in the PBQP reg-alloc graph.
192class SpillCosts : public PBQPRAConstraint {
193public:
194 void apply(PBQPRAGraph &G) override {
195 LiveIntervals &LIS = G.getMetadata().LIS;
196
197 // A minimum spill costs, so that register constraints can can be set
198 // without normalization in the [0.0:MinSpillCost( interval.
199 const PBQP::PBQPNum MinSpillCost = 10.0;
200
201 for (auto NId : G.nodeIds()) {
202 PBQP::PBQPNum SpillCost =
203 LIS.getInterval(G.getNodeMetadata(NId).getVReg()).weight;
204 if (SpillCost == 0.0)
205 SpillCost = std::numeric_limits<PBQP::PBQPNum>::min();
206 else
207 SpillCost += MinSpillCost;
208 PBQPRAGraph::RawVector NodeCosts(G.getNodeCosts(NId));
209 NodeCosts[PBQP::RegAlloc::getSpillOptionIdx()] = SpillCost;
210 G.setNodeCosts(NId, std::move(NodeCosts));
211 }
212 }
213};
214
215/// Add interference edges between overlapping vregs.
216class Interference : public PBQPRAConstraint {
217private:
218 using AllowedRegVecPtr = const PBQP::RegAlloc::AllowedRegVector *;
219 using IKey = std::pair<AllowedRegVecPtr, AllowedRegVecPtr>;
220 using IMatrixCache = DenseMap<IKey, PBQPRAGraph::MatrixPtr>;
221 using DisjointAllowedRegsCache = DenseSet<IKey>;
222 using IEdgeKey = std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId>;
223 using IEdgeCache = DenseSet<IEdgeKey>;
224
225 bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
226 PBQPRAGraph::NodeId MId,
227 const DisjointAllowedRegsCache &D) const {
228 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
229 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
230
231 if (NRegs == MRegs)
232 return false;
233
234 if (NRegs < MRegs)
235 return D.count(IKey(NRegs, MRegs)) > 0;
236
237 return D.count(IKey(MRegs, NRegs)) > 0;
238 }
239
240 void setDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
241 PBQPRAGraph::NodeId MId,
242 DisjointAllowedRegsCache &D) {
243 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
244 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
245
246 assert(NRegs != MRegs && "AllowedRegs can not be disjoint with itself")((NRegs != MRegs && "AllowedRegs can not be disjoint with itself"
) ? static_cast<void> (0) : __assert_fail ("NRegs != MRegs && \"AllowedRegs can not be disjoint with itself\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 246, __PRETTY_FUNCTION__))
;
247
248 if (NRegs < MRegs)
249 D.insert(IKey(NRegs, MRegs));
250 else
251 D.insert(IKey(MRegs, NRegs));
252 }
253
254 // Holds (Interval, CurrentSegmentID, and NodeId). The first two are required
255 // for the fast interference graph construction algorithm. The last is there
256 // to save us from looking up node ids via the VRegToNode map in the graph
257 // metadata.
258 using IntervalInfo =
259 std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>;
260
261 static SlotIndex getStartPoint(const IntervalInfo &I) {
262 return std::get<0>(I)->segments[std::get<1>(I)].start;
263 }
264
265 static SlotIndex getEndPoint(const IntervalInfo &I) {
266 return std::get<0>(I)->segments[std::get<1>(I)].end;
267 }
268
269 static PBQP::GraphBase::NodeId getNodeId(const IntervalInfo &I) {
270 return std::get<2>(I);
271 }
272
273 static bool lowestStartPoint(const IntervalInfo &I1,
274 const IntervalInfo &I2) {
275 // Condition reversed because priority queue has the *highest* element at
276 // the front, rather than the lowest.
277 return getStartPoint(I1) > getStartPoint(I2);
278 }
279
280 static bool lowestEndPoint(const IntervalInfo &I1,
281 const IntervalInfo &I2) {
282 SlotIndex E1 = getEndPoint(I1);
283 SlotIndex E2 = getEndPoint(I2);
284
285 if (E1 < E2)
286 return true;
287
288 if (E1 > E2)
289 return false;
290
291 // If two intervals end at the same point, we need a way to break the tie or
292 // the set will assume they're actually equal and refuse to insert a
293 // "duplicate". Just compare the vregs - fast and guaranteed unique.
294 return std::get<0>(I1)->reg < std::get<0>(I2)->reg;
295 }
296
297 static bool isAtLastSegment(const IntervalInfo &I) {
298 return std::get<1>(I) == std::get<0>(I)->size() - 1;
299 }
300
301 static IntervalInfo nextSegment(const IntervalInfo &I) {
302 return std::make_tuple(std::get<0>(I), std::get<1>(I) + 1, std::get<2>(I));
303 }
304
305public:
306 void apply(PBQPRAGraph &G) override {
307 // The following is loosely based on the linear scan algorithm introduced in
308 // "Linear Scan Register Allocation" by Poletto and Sarkar. This version
309 // isn't linear, because the size of the active set isn't bound by the
310 // number of registers, but rather the size of the largest clique in the
311 // graph. Still, we expect this to be better than N^2.
312 LiveIntervals &LIS = G.getMetadata().LIS;
313
314 // Interferenc matrices are incredibly regular - they're only a function of
315 // the allowed sets, so we cache them to avoid the overhead of constructing
316 // and uniquing them.
317 IMatrixCache C;
318
319 // Finding an edge is expensive in the worst case (O(max_clique(G))). So
320 // cache locally edges we have already seen.
321 IEdgeCache EC;
322
323 // Cache known disjoint allowed registers pairs
324 DisjointAllowedRegsCache D;
325
326 using IntervalSet = std::set<IntervalInfo, decltype(&lowestEndPoint)>;
327 using IntervalQueue =
328 std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
329 decltype(&lowestStartPoint)>;
330 IntervalSet Active(lowestEndPoint);
331 IntervalQueue Inactive(lowestStartPoint);
332
333 // Start by building the inactive set.
334 for (auto NId : G.nodeIds()) {
335 unsigned VReg = G.getNodeMetadata(NId).getVReg();
336 LiveInterval &LI = LIS.getInterval(VReg);
337 assert(!LI.empty() && "PBQP graph contains node for empty interval")((!LI.empty() && "PBQP graph contains node for empty interval"
) ? static_cast<void> (0) : __assert_fail ("!LI.empty() && \"PBQP graph contains node for empty interval\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 337, __PRETTY_FUNCTION__))
;
338 Inactive.push(std::make_tuple(&LI, 0, NId));
339 }
340
341 while (!Inactive.empty()) {
342 // Tentatively grab the "next" interval - this choice may be overriden
343 // below.
344 IntervalInfo Cur = Inactive.top();
345
346 // Retire any active intervals that end before Cur starts.
347 IntervalSet::iterator RetireItr = Active.begin();
348 while (RetireItr != Active.end() &&
349 (getEndPoint(*RetireItr) <= getStartPoint(Cur))) {
350 // If this interval has subsequent segments, add the next one to the
351 // inactive list.
352 if (!isAtLastSegment(*RetireItr))
353 Inactive.push(nextSegment(*RetireItr));
354
355 ++RetireItr;
356 }
357 Active.erase(Active.begin(), RetireItr);
358
359 // One of the newly retired segments may actually start before the
360 // Cur segment, so re-grab the front of the inactive list.
361 Cur = Inactive.top();
362 Inactive.pop();
363
364 // At this point we know that Cur overlaps all active intervals. Add the
365 // interference edges.
366 PBQP::GraphBase::NodeId NId = getNodeId(Cur);
367 for (const auto &A : Active) {
368 PBQP::GraphBase::NodeId MId = getNodeId(A);
369
370 // Do not add an edge when the nodes' allowed registers do not
371 // intersect: there is obviously no interference.
372 if (haveDisjointAllowedRegs(G, NId, MId, D))
373 continue;
374
375 // Check that we haven't already added this edge
376 IEdgeKey EK(std::min(NId, MId), std::max(NId, MId));
377 if (EC.count(EK))
378 continue;
379
380 // This is a new edge - add it to the graph.
381 if (!createInterferenceEdge(G, NId, MId, C))
382 setDisjointAllowedRegs(G, NId, MId, D);
383 else
384 EC.insert(EK);
385 }
386
387 // Finally, add Cur to the Active set.
388 Active.insert(Cur);
389 }
390 }
391
392private:
393 // Create an Interference edge and add it to the graph, unless it is
394 // a null matrix, meaning the nodes' allowed registers do not have any
395 // interference. This case occurs frequently between integer and floating
396 // point registers for example.
397 // return true iff both nodes interferes.
398 bool createInterferenceEdge(PBQPRAGraph &G,
399 PBQPRAGraph::NodeId NId, PBQPRAGraph::NodeId MId,
400 IMatrixCache &C) {
401 const TargetRegisterInfo &TRI =
402 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
403 const auto &NRegs = G.getNodeMetadata(NId).getAllowedRegs();
404 const auto &MRegs = G.getNodeMetadata(MId).getAllowedRegs();
405
406 // Try looking the edge costs up in the IMatrixCache first.
407 IKey K(&NRegs, &MRegs);
408 IMatrixCache::iterator I = C.find(K);
409 if (I != C.end()) {
410 G.addEdgeBypassingCostAllocator(NId, MId, I->second);
411 return true;
412 }
413
414 PBQPRAGraph::RawMatrix M(NRegs.size() + 1, MRegs.size() + 1, 0);
415 bool NodesInterfere = false;
416 for (unsigned I = 0; I != NRegs.size(); ++I) {
417 unsigned PRegN = NRegs[I];
418 for (unsigned J = 0; J != MRegs.size(); ++J) {
419 unsigned PRegM = MRegs[J];
420 if (TRI.regsOverlap(PRegN, PRegM)) {
421 M[I + 1][J + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
422 NodesInterfere = true;
423 }
424 }
425 }
426
427 if (!NodesInterfere)
428 return false;
429
430 PBQPRAGraph::EdgeId EId = G.addEdge(NId, MId, std::move(M));
431 C[K] = G.getEdgeCostsPtr(EId);
432
433 return true;
434 }
435};
436
437class Coalescing : public PBQPRAConstraint {
438public:
439 void apply(PBQPRAGraph &G) override {
440 MachineFunction &MF = G.getMetadata().MF;
441 MachineBlockFrequencyInfo &MBFI = G.getMetadata().MBFI;
442 CoalescerPair CP(*MF.getSubtarget().getRegisterInfo());
443
444 // Scan the machine function and add a coalescing cost whenever CoalescerPair
445 // gives the Ok.
446 for (const auto &MBB : MF) {
447 for (const auto &MI : MBB) {
448 // Skip not-coalescable or already coalesced copies.
449 if (!CP.setRegisters(&MI) || CP.getSrcReg() == CP.getDstReg())
1
Assuming the condition is false
2
Assuming the condition is false
3
Taking false branch
450 continue;
451
452 unsigned DstReg = CP.getDstReg();
453 unsigned SrcReg = CP.getSrcReg();
454
455 const float Scale = 1.0f / MBFI.getEntryFreq();
456 PBQP::PBQPNum CBenefit = MBFI.getBlockFreq(&MBB).getFrequency() * Scale;
457
458 if (CP.isPhys()) {
4
Taking false branch
459 if (!MF.getRegInfo().isAllocatable(DstReg))
460 continue;
461
462 PBQPRAGraph::NodeId NId = G.getMetadata().getNodeIdForVReg(SrcReg);
463
464 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed =
465 G.getNodeMetadata(NId).getAllowedRegs();
466
467 unsigned PRegOpt = 0;
468 while (PRegOpt < Allowed.size() && Allowed[PRegOpt] != DstReg)
469 ++PRegOpt;
470
471 if (PRegOpt < Allowed.size()) {
472 PBQPRAGraph::RawVector NewCosts(G.getNodeCosts(NId));
473 NewCosts[PRegOpt + 1] -= CBenefit;
474 G.setNodeCosts(NId, std::move(NewCosts));
475 }
476 } else {
477 PBQPRAGraph::NodeId N1Id = G.getMetadata().getNodeIdForVReg(DstReg);
478 PBQPRAGraph::NodeId N2Id = G.getMetadata().getNodeIdForVReg(SrcReg);
479 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed1 =
480 &G.getNodeMetadata(N1Id).getAllowedRegs();
481 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed2 =
482 &G.getNodeMetadata(N2Id).getAllowedRegs();
483
484 PBQPRAGraph::EdgeId EId = G.findEdge(N1Id, N2Id);
485 if (EId == G.invalidEdgeId()) {
5
Assuming the condition is true
6
Taking true branch
486 PBQPRAGraph::RawMatrix Costs(Allowed1->size() + 1,
487 Allowed2->size() + 1, 0);
488 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
489 G.addEdge(N1Id, N2Id, std::move(Costs));
7
Calling 'Graph::addEdge'
490 } else {
491 if (G.getEdgeNode1Id(EId) == N2Id) {
492 std::swap(N1Id, N2Id);
493 std::swap(Allowed1, Allowed2);
494 }
495 PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
496 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
497 G.updateEdgeCosts(EId, std::move(Costs));
498 }
499 }
500 }
501 }
502 }
503
504private:
505 void addVirtRegCoalesce(
506 PBQPRAGraph::RawMatrix &CostMat,
507 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed1,
508 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed2,
509 PBQP::PBQPNum Benefit) {
510 assert(CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch.")((CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch."
) ? static_cast<void> (0) : __assert_fail ("CostMat.getRows() == Allowed1.size() + 1 && \"Size mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 510, __PRETTY_FUNCTION__))
;
511 assert(CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch.")((CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch."
) ? static_cast<void> (0) : __assert_fail ("CostMat.getCols() == Allowed2.size() + 1 && \"Size mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 511, __PRETTY_FUNCTION__))
;
512 for (unsigned I = 0; I != Allowed1.size(); ++I) {
513 unsigned PReg1 = Allowed1[I];
514 for (unsigned J = 0; J != Allowed2.size(); ++J) {
515 unsigned PReg2 = Allowed2[J];
516 if (PReg1 == PReg2)
517 CostMat[I + 1][J + 1] -= Benefit;
518 }
519 }
520 }
521};
522
523} // end anonymous namespace
524
525// Out-of-line destructor/anchor for PBQPRAConstraint.
526PBQPRAConstraint::~PBQPRAConstraint() = default;
527
528void PBQPRAConstraint::anchor() {}
529
530void PBQPRAConstraintList::anchor() {}
531
532void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
533 au.setPreservesCFG();
534 au.addRequired<AAResultsWrapperPass>();
535 au.addPreserved<AAResultsWrapperPass>();
536 au.addRequired<SlotIndexes>();
537 au.addPreserved<SlotIndexes>();
538 au.addRequired<LiveIntervals>();
539 au.addPreserved<LiveIntervals>();
540 //au.addRequiredID(SplitCriticalEdgesID);
541 if (customPassID)
542 au.addRequiredID(*customPassID);
543 au.addRequired<LiveStacks>();
544 au.addPreserved<LiveStacks>();
545 au.addRequired<MachineBlockFrequencyInfo>();
546 au.addPreserved<MachineBlockFrequencyInfo>();
547 au.addRequired<MachineLoopInfo>();
548 au.addPreserved<MachineLoopInfo>();
549 au.addRequired<MachineDominatorTree>();
550 au.addPreserved<MachineDominatorTree>();
551 au.addRequired<VirtRegMap>();
552 au.addPreserved<VirtRegMap>();
553 MachineFunctionPass::getAnalysisUsage(au);
554}
555
556void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
557 LiveIntervals &LIS) {
558 const MachineRegisterInfo &MRI = MF.getRegInfo();
559
560 // Iterate over all live ranges.
561 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
562 unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
563 if (MRI.reg_nodbg_empty(Reg))
564 continue;
565 VRegsToAlloc.insert(Reg);
566 }
567}
568
569static bool isACalleeSavedRegister(unsigned reg, const TargetRegisterInfo &TRI,
570 const MachineFunction &MF) {
571 const MCPhysReg *CSR = MF.getRegInfo().getCalleeSavedRegs();
572 for (unsigned i = 0; CSR[i] != 0; ++i)
573 if (TRI.regsOverlap(reg, CSR[i]))
574 return true;
575 return false;
576}
577
578void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
579 Spiller &VRegSpiller) {
580 MachineFunction &MF = G.getMetadata().MF;
581
582 LiveIntervals &LIS = G.getMetadata().LIS;
583 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
584 const TargetRegisterInfo &TRI =
585 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
586
587 std::vector<unsigned> Worklist(VRegsToAlloc.begin(), VRegsToAlloc.end());
588
589 std::map<unsigned, std::vector<unsigned>> VRegAllowedMap;
590
591 while (!Worklist.empty()) {
592 unsigned VReg = Worklist.back();
593 Worklist.pop_back();
594
595 LiveInterval &VRegLI = LIS.getInterval(VReg);
596
597 // If this is an empty interval move it to the EmptyIntervalVRegs set then
598 // continue.
599 if (VRegLI.empty()) {
600 EmptyIntervalVRegs.insert(VRegLI.reg);
601 VRegsToAlloc.erase(VRegLI.reg);
602 continue;
603 }
604
605 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
606
607 // Record any overlaps with regmask operands.
608 BitVector RegMaskOverlaps;
609 LIS.checkRegMaskInterference(VRegLI, RegMaskOverlaps);
610
611 // Compute an initial allowed set for the current vreg.
612 std::vector<unsigned> VRegAllowed;
613 ArrayRef<MCPhysReg> RawPRegOrder = TRC->getRawAllocationOrder(MF);
614 for (unsigned I = 0; I != RawPRegOrder.size(); ++I) {
615 unsigned PReg = RawPRegOrder[I];
616 if (MRI.isReserved(PReg))
617 continue;
618
619 // vregLI crosses a regmask operand that clobbers preg.
620 if (!RegMaskOverlaps.empty() && !RegMaskOverlaps.test(PReg))
621 continue;
622
623 // vregLI overlaps fixed regunit interference.
624 bool Interference = false;
625 for (MCRegUnitIterator Units(PReg, &TRI); Units.isValid(); ++Units) {
626 if (VRegLI.overlaps(LIS.getRegUnit(*Units))) {
627 Interference = true;
628 break;
629 }
630 }
631 if (Interference)
632 continue;
633
634 // preg is usable for this virtual register.
635 VRegAllowed.push_back(PReg);
636 }
637
638 // Check for vregs that have no allowed registers. These should be
639 // pre-spilled and the new vregs added to the worklist.
640 if (VRegAllowed.empty()) {
641 SmallVector<unsigned, 8> NewVRegs;
642 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
643 Worklist.insert(Worklist.end(), NewVRegs.begin(), NewVRegs.end());
644 continue;
645 } else
646 VRegAllowedMap[VReg] = std::move(VRegAllowed);
647 }
648
649 for (auto &KV : VRegAllowedMap) {
650 auto VReg = KV.first;
651
652 // Move empty intervals to the EmptyIntervalVReg set.
653 if (LIS.getInterval(VReg).empty()) {
654 EmptyIntervalVRegs.insert(VReg);
655 VRegsToAlloc.erase(VReg);
656 continue;
657 }
658
659 auto &VRegAllowed = KV.second;
660
661 PBQPRAGraph::RawVector NodeCosts(VRegAllowed.size() + 1, 0);
662
663 // Tweak cost of callee saved registers, as using then force spilling and
664 // restoring them. This would only happen in the prologue / epilogue though.
665 for (unsigned i = 0; i != VRegAllowed.size(); ++i)
666 if (isACalleeSavedRegister(VRegAllowed[i], TRI, MF))
667 NodeCosts[1 + i] += 1.0;
668
669 PBQPRAGraph::NodeId NId = G.addNode(std::move(NodeCosts));
670 G.getNodeMetadata(NId).setVReg(VReg);
671 G.getNodeMetadata(NId).setAllowedRegs(
672 G.getMetadata().getAllowedRegs(std::move(VRegAllowed)));
673 G.getMetadata().setNodeIdForVReg(VReg, NId);
674 }
675}
676
677void RegAllocPBQP::spillVReg(unsigned VReg,
678 SmallVectorImpl<unsigned> &NewIntervals,
679 MachineFunction &MF, LiveIntervals &LIS,
680 VirtRegMap &VRM, Spiller &VRegSpiller) {
681 VRegsToAlloc.erase(VReg);
682 LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
683 nullptr, &DeadRemats);
684 VRegSpiller.spill(LRE);
685
686 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
687 (void)TRI;
688 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> SPILLED (Cost: " << LRE.getParent
().weight << ", New vregs: "; } } while (false)
689 << LRE.getParent().weight << ", New vregs: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> SPILLED (Cost: " << LRE.getParent
().weight << ", New vregs: "; } } while (false)
;
690
691 // Copy any newly inserted live intervals into the list of regs to
692 // allocate.
693 for (LiveRangeEdit::iterator I = LRE.begin(), E = LRE.end();
694 I != E; ++I) {
695 const LiveInterval &LI = LIS.getInterval(*I);
696 assert(!LI.empty() && "Empty spill range.")((!LI.empty() && "Empty spill range.") ? static_cast<
void> (0) : __assert_fail ("!LI.empty() && \"Empty spill range.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 696, __PRETTY_FUNCTION__))
;
697 LLVM_DEBUG(dbgs() << printReg(LI.reg, &TRI) << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << printReg(LI.reg, &TRI) <<
" "; } } while (false)
;
698 VRegsToAlloc.insert(LI.reg);
699 }
700
701 LLVM_DEBUG(dbgs() << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << ")\n"; } } while (false)
;
702}
703
704bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
705 const PBQP::Solution &Solution,
706 VirtRegMap &VRM,
707 Spiller &VRegSpiller) {
708 MachineFunction &MF = G.getMetadata().MF;
709 LiveIntervals &LIS = G.getMetadata().LIS;
710 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
711 (void)TRI;
712
713 // Set to true if we have any spills
714 bool AnotherRoundNeeded = false;
715
716 // Clear the existing allocation.
717 VRM.clearAllVirt();
718
719 // Iterate over the nodes mapping the PBQP solution to a register
720 // assignment.
721 for (auto NId : G.nodeIds()) {
722 unsigned VReg = G.getNodeMetadata(NId).getVReg();
723 unsigned AllocOption = Solution.getSelection(NId);
724
725 if (AllocOption != PBQP::RegAlloc::getSpillOptionIdx()) {
726 unsigned PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOption - 1];
727 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> " << TRI.getName(PReg) <<
"\n"; } } while (false)
728 << TRI.getName(PReg) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> " << TRI.getName(PReg) <<
"\n"; } } while (false)
;
729 assert(PReg != 0 && "Invalid preg selected.")((PReg != 0 && "Invalid preg selected.") ? static_cast
<void> (0) : __assert_fail ("PReg != 0 && \"Invalid preg selected.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 729, __PRETTY_FUNCTION__))
;
730 VRM.assignVirt2Phys(VReg, PReg);
731 } else {
732 // Spill VReg. If this introduces new intervals we'll need another round
733 // of allocation.
734 SmallVector<unsigned, 8> NewVRegs;
735 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
736 AnotherRoundNeeded |= !NewVRegs.empty();
737 }
738 }
739
740 return !AnotherRoundNeeded;
741}
742
743void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
744 LiveIntervals &LIS,
745 VirtRegMap &VRM) const {
746 MachineRegisterInfo &MRI = MF.getRegInfo();
747
748 // First allocate registers for the empty intervals.
749 for (RegSet::const_iterator
750 I = EmptyIntervalVRegs.begin(), E = EmptyIntervalVRegs.end();
751 I != E; ++I) {
752 LiveInterval &LI = LIS.getInterval(*I);
753
754 unsigned PReg = MRI.getSimpleHint(LI.reg);
755
756 if (PReg == 0) {
757 const TargetRegisterClass &RC = *MRI.getRegClass(LI.reg);
758 const ArrayRef<MCPhysReg> RawPRegOrder = RC.getRawAllocationOrder(MF);
759 for (unsigned CandidateReg : RawPRegOrder) {
760 if (!VRM.getRegInfo().isReserved(CandidateReg)) {
761 PReg = CandidateReg;
762 break;
763 }
764 }
765 assert(PReg &&((PReg && "No un-reserved physical registers in this register class"
) ? static_cast<void> (0) : __assert_fail ("PReg && \"No un-reserved physical registers in this register class\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 766, __PRETTY_FUNCTION__))
766 "No un-reserved physical registers in this register class")((PReg && "No un-reserved physical registers in this register class"
) ? static_cast<void> (0) : __assert_fail ("PReg && \"No un-reserved physical registers in this register class\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 766, __PRETTY_FUNCTION__))
;
767 }
768
769 VRM.assignVirt2Phys(LI.reg, PReg);
770 }
771}
772
773void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {
774 VRegSpiller.postOptimization();
775 /// Remove dead defs because of rematerialization.
776 for (auto DeadInst : DeadRemats) {
777 LIS.RemoveMachineInstrFromMaps(*DeadInst);
778 DeadInst->eraseFromParent();
779 }
780 DeadRemats.clear();
781}
782
783static inline float normalizePBQPSpillWeight(float UseDefFreq, unsigned Size,
784 unsigned NumInstr) {
785 // All intervals have a spill weight that is mostly proportional to the number
786 // of uses, with uses in loops having a bigger weight.
787 return NumInstr * normalizeSpillWeight(UseDefFreq, Size, 1);
788}
789
790bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
791 LiveIntervals &LIS = getAnalysis<LiveIntervals>();
792 MachineBlockFrequencyInfo &MBFI =
793 getAnalysis<MachineBlockFrequencyInfo>();
794
795 VirtRegMap &VRM = getAnalysis<VirtRegMap>();
796
797 calculateSpillWeightsAndHints(LIS, MF, &VRM, getAnalysis<MachineLoopInfo>(),
798 MBFI, normalizePBQPSpillWeight);
799
800 std::unique_ptr<Spiller> VRegSpiller(createInlineSpiller(*this, MF, VRM));
801
802 MF.getRegInfo().freezeReservedRegs(MF);
803
804 LLVM_DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "PBQP Register Allocating for "
<< MF.getName() << "\n"; } } while (false)
;
805
806 // Allocator main loop:
807 //
808 // * Map current regalloc problem to a PBQP problem
809 // * Solve the PBQP problem
810 // * Map the solution back to a register allocation
811 // * Spill if necessary
812 //
813 // This process is continued till no more spills are generated.
814
815 // Find the vreg intervals in need of allocation.
816 findVRegIntervalsToAlloc(MF, LIS);
817
818#ifndef NDEBUG
819 const Function &F = MF.getFunction();
820 std::string FullyQualifiedName =
821 F.getParent()->getModuleIdentifier() + "." + F.getName().str();
822#endif
823
824 // If there are non-empty intervals allocate them using pbqp.
825 if (!VRegsToAlloc.empty()) {
826 const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
827 std::unique_ptr<PBQPRAConstraintList> ConstraintsRoot =
828 llvm::make_unique<PBQPRAConstraintList>();
829 ConstraintsRoot->addConstraint(llvm::make_unique<SpillCosts>());
830 ConstraintsRoot->addConstraint(llvm::make_unique<Interference>());
831 if (PBQPCoalescing)
832 ConstraintsRoot->addConstraint(llvm::make_unique<Coalescing>());
833 ConstraintsRoot->addConstraint(Subtarget.getCustomPBQPConstraints());
834
835 bool PBQPAllocComplete = false;
836 unsigned Round = 0;
837
838 while (!PBQPAllocComplete) {
839 LLVM_DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << " PBQP Regalloc round " <<
Round << ":\n"; } } while (false)
;
840
841 PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
842 initializeGraph(G, VRM, *VRegSpiller);
843 ConstraintsRoot->apply(G);
844
845#ifndef NDEBUG
846 if (PBQPDumpGraphs) {
847 std::ostringstream RS;
848 RS << Round;
849 std::string GraphFileName = FullyQualifiedName + "." + RS.str() +
850 ".pbqpgraph";
851 std::error_code EC;
852 raw_fd_ostream OS(GraphFileName, EC, sys::fs::F_Text);
853 LLVM_DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Dumping graph for round " <<
Round << " to \"" << GraphFileName << "\"\n"
; } } while (false)
854 << GraphFileName << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Dumping graph for round " <<
Round << " to \"" << GraphFileName << "\"\n"
; } } while (false)
;
855 G.dump(OS);
856 }
857#endif
858
859 PBQP::Solution Solution = PBQP::RegAlloc::solve(G);
860 PBQPAllocComplete = mapPBQPToRegAlloc(G, Solution, VRM, *VRegSpiller);
861 ++Round;
862 }
863 }
864
865 // Finalise allocation, allocate empty ranges.
866 finalizeAlloc(MF, LIS, VRM);
867 postOptimization(*VRegSpiller, LIS);
868 VRegsToAlloc.clear();
869 EmptyIntervalVRegs.clear();
870
871 LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Post alloc VirtRegMap:\n" <<
VRM << "\n"; } } while (false)
;
872
873 return true;
874}
875
876/// Create Printable object for node and register info.
877static Printable PrintNodeInfo(PBQP::RegAlloc::PBQPRAGraph::NodeId NId,
878 const PBQP::RegAlloc::PBQPRAGraph &G) {
879 return Printable([NId, &G](raw_ostream &OS) {
880 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
881 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
882 unsigned VReg = G.getNodeMetadata(NId).getVReg();
883 const char *RegClassName = TRI->getRegClassName(MRI.getRegClass(VReg));
884 OS << NId << " (" << RegClassName << ':' << printReg(VReg, TRI) << ')';
885 });
886}
887
888#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
889LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void PBQP::RegAlloc::PBQPRAGraph::dump(raw_ostream &OS) const {
890 for (auto NId : nodeIds()) {
891 const Vector &Costs = getNodeCosts(NId);
892 assert(Costs.getLength() != 0 && "Empty vector in graph.")((Costs.getLength() != 0 && "Empty vector in graph.")
? static_cast<void> (0) : __assert_fail ("Costs.getLength() != 0 && \"Empty vector in graph.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 892, __PRETTY_FUNCTION__))
;
893 OS << PrintNodeInfo(NId, *this) << ": " << Costs << '\n';
894 }
895 OS << '\n';
896
897 for (auto EId : edgeIds()) {
898 NodeId N1Id = getEdgeNode1Id(EId);
899 NodeId N2Id = getEdgeNode2Id(EId);
900 assert(N1Id != N2Id && "PBQP graphs should not have self-edges.")((N1Id != N2Id && "PBQP graphs should not have self-edges."
) ? static_cast<void> (0) : __assert_fail ("N1Id != N2Id && \"PBQP graphs should not have self-edges.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 900, __PRETTY_FUNCTION__))
;
901 const Matrix &M = getEdgeCosts(EId);
902 assert(M.getRows() != 0 && "No rows in matrix.")((M.getRows() != 0 && "No rows in matrix.") ? static_cast
<void> (0) : __assert_fail ("M.getRows() != 0 && \"No rows in matrix.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 902, __PRETTY_FUNCTION__))
;
903 assert(M.getCols() != 0 && "No cols in matrix.")((M.getCols() != 0 && "No cols in matrix.") ? static_cast
<void> (0) : __assert_fail ("M.getCols() != 0 && \"No cols in matrix.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/RegAllocPBQP.cpp"
, 903, __PRETTY_FUNCTION__))
;
904 OS << PrintNodeInfo(N1Id, *this) << ' ' << M.getRows() << " rows / ";
905 OS << PrintNodeInfo(N2Id, *this) << ' ' << M.getCols() << " cols:\n";
906 OS << M << '\n';
907 }
908}
909
910LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void PBQP::RegAlloc::PBQPRAGraph::dump() const {
911 dump(dbgs());
912}
913#endif
914
915void PBQP::RegAlloc::PBQPRAGraph::printDot(raw_ostream &OS) const {
916 OS << "graph {\n";
917 for (auto NId : nodeIds()) {
918 OS << " node" << NId << " [ label=\""
919 << PrintNodeInfo(NId, *this) << "\\n"
920 << getNodeCosts(NId) << "\" ]\n";
921 }
922
923 OS << " edge [ len=" << nodeIds().size() << " ]\n";
924 for (auto EId : edgeIds()) {
925 OS << " node" << getEdgeNode1Id(EId)
926 << " -- node" << getEdgeNode2Id(EId)
927 << " [ label=\"";
928 const Matrix &EdgeCosts = getEdgeCosts(EId);
929 for (unsigned i = 0; i < EdgeCosts.getRows(); ++i) {
930 OS << EdgeCosts.getRowAsVector(i) << "\\n";
931 }
932 OS << "\" ]\n";
933 }
934 OS << "}\n";
935}
936
937FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
938 return new RegAllocPBQP(customPassID);
939}
940
941FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
942 return createPBQPRegisterAllocator();
943}

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h

1//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// PBQP Graph class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
15#define LLVM_CODEGEN_PBQP_GRAPH_H
16
17#include "llvm/ADT/STLExtras.h"
18#include <algorithm>
19#include <cassert>
20#include <iterator>
21#include <limits>
22#include <vector>
23
24namespace llvm {
25namespace PBQP {
26
27 class GraphBase {
28 public:
29 using NodeId = unsigned;
30 using EdgeId = unsigned;
31
32 /// Returns a value representing an invalid (non-existent) node.
33 static NodeId invalidNodeId() {
34 return std::numeric_limits<NodeId>::max();
35 }
36
37 /// Returns a value representing an invalid (non-existent) edge.
38 static EdgeId invalidEdgeId() {
39 return std::numeric_limits<EdgeId>::max();
40 }
41 };
42
43 /// PBQP Graph class.
44 /// Instances of this class describe PBQP problems.
45 ///
46 template <typename SolverT>
47 class Graph : public GraphBase {
48 private:
49 using CostAllocator = typename SolverT::CostAllocator;
50
51 public:
52 using RawVector = typename SolverT::RawVector;
53 using RawMatrix = typename SolverT::RawMatrix;
54 using Vector = typename SolverT::Vector;
55 using Matrix = typename SolverT::Matrix;
56 using VectorPtr = typename CostAllocator::VectorPtr;
57 using MatrixPtr = typename CostAllocator::MatrixPtr;
58 using NodeMetadata = typename SolverT::NodeMetadata;
59 using EdgeMetadata = typename SolverT::EdgeMetadata;
60 using GraphMetadata = typename SolverT::GraphMetadata;
61
62 private:
63 class NodeEntry {
64 public:
65 using AdjEdgeList = std::vector<EdgeId>;
66 using AdjEdgeIdx = AdjEdgeList::size_type;
67 using AdjEdgeItr = AdjEdgeList::const_iterator;
68
69 NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
70
71 static AdjEdgeIdx getInvalidAdjEdgeIdx() {
72 return std::numeric_limits<AdjEdgeIdx>::max();
73 }
74
75 AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
76 AdjEdgeIdx Idx = AdjEdgeIds.size();
77 AdjEdgeIds.push_back(EId);
78 return Idx;
79 }
80
81 void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
82 // Swap-and-pop for fast removal.
83 // 1) Update the adj index of the edge currently at back().
84 // 2) Move last Edge down to Idx.
85 // 3) pop_back()
86 // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
87 // redundant, but both operations are cheap.
88 G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
89 AdjEdgeIds[Idx] = AdjEdgeIds.back();
90 AdjEdgeIds.pop_back();
91 }
92
93 const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
94
95 VectorPtr Costs;
96 NodeMetadata Metadata;
97
98 private:
99 AdjEdgeList AdjEdgeIds;
100 };
101
102 class EdgeEntry {
103 public:
104 EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
105 : Costs(std::move(Costs)) {
106 NIds[0] = N1Id;
107 NIds[1] = N2Id;
108 ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
109 ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
110 }
111
112 void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
113 assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&((ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge already connected to NIds[NIdx].") ? static_cast<void
> (0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() && \"Edge already connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 114, __PRETTY_FUNCTION__))
114 "Edge already connected to NIds[NIdx].")((ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge already connected to NIds[NIdx].") ? static_cast<void
> (0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() && \"Edge already connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 114, __PRETTY_FUNCTION__))
;
115 NodeEntry &N = G.getNode(NIds[NIdx]);
116 ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
117 }
118
119 void connect(Graph &G, EdgeId ThisEdgeId) {
120 connectToN(G, ThisEdgeId, 0);
121 connectToN(G, ThisEdgeId, 1);
122 }
123
124 void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
125 if (NId == NIds[0])
126 ThisEdgeAdjIdxs[0] = NewIdx;
127 else {
128 assert(NId == NIds[1] && "Edge not connected to NId")((NId == NIds[1] && "Edge not connected to NId") ? static_cast
<void> (0) : __assert_fail ("NId == NIds[1] && \"Edge not connected to NId\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 128, __PRETTY_FUNCTION__))
;
129 ThisEdgeAdjIdxs[1] = NewIdx;
130 }
131 }
132
133 void disconnectFromN(Graph &G, unsigned NIdx) {
134 assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&((ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge not connected to NIds[NIdx].") ? static_cast<void>
(0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() && \"Edge not connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 135, __PRETTY_FUNCTION__))
135 "Edge not connected to NIds[NIdx].")((ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge not connected to NIds[NIdx].") ? static_cast<void>
(0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() && \"Edge not connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 135, __PRETTY_FUNCTION__))
;
136 NodeEntry &N = G.getNode(NIds[NIdx]);
137 N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
138 ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
139 }
140
141 void disconnectFrom(Graph &G, NodeId NId) {
142 if (NId == NIds[0])
143 disconnectFromN(G, 0);
144 else {
145 assert(NId == NIds[1] && "Edge does not connect NId")((NId == NIds[1] && "Edge does not connect NId") ? static_cast
<void> (0) : __assert_fail ("NId == NIds[1] && \"Edge does not connect NId\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 145, __PRETTY_FUNCTION__))
;
146 disconnectFromN(G, 1);
147 }
148 }
149
150 NodeId getN1Id() const { return NIds[0]; }
151 NodeId getN2Id() const { return NIds[1]; }
152
153 MatrixPtr Costs;
154 EdgeMetadata Metadata;
155
156 private:
157 NodeId NIds[2];
158 typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
159 };
160
161 // ----- MEMBERS -----
162
163 GraphMetadata Metadata;
164 CostAllocator CostAlloc;
165 SolverT *Solver = nullptr;
166
167 using NodeVector = std::vector<NodeEntry>;
168 using FreeNodeVector = std::vector<NodeId>;
169 NodeVector Nodes;
170 FreeNodeVector FreeNodeIds;
171
172 using EdgeVector = std::vector<EdgeEntry>;
173 using FreeEdgeVector = std::vector<EdgeId>;
174 EdgeVector Edges;
175 FreeEdgeVector FreeEdgeIds;
176
177 Graph(const Graph &Other) {}
178
179 // ----- INTERNAL METHODS -----
180
181 NodeEntry &getNode(NodeId NId) {
182 assert(NId < Nodes.size() && "Out of bound NodeId")((NId < Nodes.size() && "Out of bound NodeId") ? static_cast
<void> (0) : __assert_fail ("NId < Nodes.size() && \"Out of bound NodeId\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 182, __PRETTY_FUNCTION__))
;
183 return Nodes[NId];
184 }
185 const NodeEntry &getNode(NodeId NId) const {
186 assert(NId < Nodes.size() && "Out of bound NodeId")((NId < Nodes.size() && "Out of bound NodeId") ? static_cast
<void> (0) : __assert_fail ("NId < Nodes.size() && \"Out of bound NodeId\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 186, __PRETTY_FUNCTION__))
;
187 return Nodes[NId];
188 }
189
190 EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
191 const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
192
193 NodeId addConstructedNode(NodeEntry N) {
194 NodeId NId = 0;
195 if (!FreeNodeIds.empty()) {
196 NId = FreeNodeIds.back();
197 FreeNodeIds.pop_back();
198 Nodes[NId] = std::move(N);
199 } else {
200 NId = Nodes.size();
201 Nodes.push_back(std::move(N));
202 }
203 return NId;
204 }
205
206 EdgeId addConstructedEdge(EdgeEntry E) {
207 assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&((findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
"Attempt to add duplicate edge.") ? static_cast<void> (
0) : __assert_fail ("findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && \"Attempt to add duplicate edge.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 208, __PRETTY_FUNCTION__))
208 "Attempt to add duplicate edge.")((findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
"Attempt to add duplicate edge.") ? static_cast<void> (
0) : __assert_fail ("findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && \"Attempt to add duplicate edge.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 208, __PRETTY_FUNCTION__))
;
209 EdgeId EId = 0;
210 if (!FreeEdgeIds.empty()) {
211 EId = FreeEdgeIds.back();
212 FreeEdgeIds.pop_back();
213 Edges[EId] = std::move(E);
214 } else {
215 EId = Edges.size();
216 Edges.push_back(std::move(E));
217 }
218
219 EdgeEntry &NE = getEdge(EId);
220
221 // Add the edge to the adjacency sets of its nodes.
222 NE.connect(*this, EId);
223 return EId;
224 }
225
226 void operator=(const Graph &Other) {}
227
228 public:
229 using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
230
231 class NodeItr {
232 public:
233 using iterator_category = std::forward_iterator_tag;
234 using value_type = NodeId;
235 using difference_type = int;
236 using pointer = NodeId *;
237 using reference = NodeId &;
238
239 NodeItr(NodeId CurNId, const Graph &G)
240 : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
241 this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
242 }
243
244 bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
245 bool operator!=(const NodeItr &O) const { return !(*this == O); }
246 NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
247 NodeId operator*() const { return CurNId; }
248
249 private:
250 NodeId findNextInUse(NodeId NId) const {
251 while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
252 ++NId;
253 }
254 return NId;
255 }
256
257 NodeId CurNId, EndNId;
258 const FreeNodeVector &FreeNodeIds;
259 };
260
261 class EdgeItr {
262 public:
263 EdgeItr(EdgeId CurEId, const Graph &G)
264 : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
265 this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
266 }
267
268 bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
269 bool operator!=(const EdgeItr &O) const { return !(*this == O); }
270 EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
271 EdgeId operator*() const { return CurEId; }
272
273 private:
274 EdgeId findNextInUse(EdgeId EId) const {
275 while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
276 ++EId;
277 }
278 return EId;
279 }
280
281 EdgeId CurEId, EndEId;
282 const FreeEdgeVector &FreeEdgeIds;
283 };
284
285 class NodeIdSet {
286 public:
287 NodeIdSet(const Graph &G) : G(G) {}
288
289 NodeItr begin() const { return NodeItr(0, G); }
290 NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
291
292 bool empty() const { return G.Nodes.empty(); }
293
294 typename NodeVector::size_type size() const {
295 return G.Nodes.size() - G.FreeNodeIds.size();
296 }
297
298 private:
299 const Graph& G;
300 };
301
302 class EdgeIdSet {
303 public:
304 EdgeIdSet(const Graph &G) : G(G) {}
305
306 EdgeItr begin() const { return EdgeItr(0, G); }
307 EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
308
309 bool empty() const { return G.Edges.empty(); }
310
311 typename NodeVector::size_type size() const {
312 return G.Edges.size() - G.FreeEdgeIds.size();
313 }
314
315 private:
316 const Graph& G;
317 };
318
319 class AdjEdgeIdSet {
320 public:
321 AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
322
323 typename NodeEntry::AdjEdgeItr begin() const {
324 return NE.getAdjEdgeIds().begin();
325 }
326
327 typename NodeEntry::AdjEdgeItr end() const {
328 return NE.getAdjEdgeIds().end();
329 }
330
331 bool empty() const { return NE.getAdjEdgeIds().empty(); }
332
333 typename NodeEntry::AdjEdgeList::size_type size() const {
334 return NE.getAdjEdgeIds().size();
335 }
336
337 private:
338 const NodeEntry &NE;
339 };
340
341 /// Construct an empty PBQP graph.
342 Graph() = default;
343
344 /// Construct an empty PBQP graph with the given graph metadata.
345 Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
346
347 /// Get a reference to the graph metadata.
348 GraphMetadata& getMetadata() { return Metadata; }
349
350 /// Get a const-reference to the graph metadata.
351 const GraphMetadata& getMetadata() const { return Metadata; }
352
353 /// Lock this graph to the given solver instance in preparation
354 /// for running the solver. This method will call solver.handleAddNode for
355 /// each node in the graph, and handleAddEdge for each edge, to give the
356 /// solver an opportunity to set up any requried metadata.
357 void setSolver(SolverT &S) {
358 assert(!Solver && "Solver already set. Call unsetSolver().")((!Solver && "Solver already set. Call unsetSolver()."
) ? static_cast<void> (0) : __assert_fail ("!Solver && \"Solver already set. Call unsetSolver().\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 358, __PRETTY_FUNCTION__))
;
359 Solver = &S;
360 for (auto NId : nodeIds())
361 Solver->handleAddNode(NId);
362 for (auto EId : edgeIds())
363 Solver->handleAddEdge(EId);
364 }
365
366 /// Release from solver instance.
367 void unsetSolver() {
368 assert(Solver && "Solver not set.")((Solver && "Solver not set.") ? static_cast<void>
(0) : __assert_fail ("Solver && \"Solver not set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 368, __PRETTY_FUNCTION__))
;
369 Solver = nullptr;
370 }
371
372 /// Add a node with the given costs.
373 /// @param Costs Cost vector for the new node.
374 /// @return Node iterator for the added node.
375 template <typename OtherVectorT>
376 NodeId addNode(OtherVectorT Costs) {
377 // Get cost vector from the problem domain
378 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
379 NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
380 if (Solver)
381 Solver->handleAddNode(NId);
382 return NId;
383 }
384
385 /// Add a node bypassing the cost allocator.
386 /// @param Costs Cost vector ptr for the new node (must be convertible to
387 /// VectorPtr).
388 /// @return Node iterator for the added node.
389 ///
390 /// This method allows for fast addition of a node whose costs don't need
391 /// to be passed through the cost allocator. The most common use case for
392 /// this is when duplicating costs from an existing node (when using a
393 /// pooling allocator). These have already been uniqued, so we can avoid
394 /// re-constructing and re-uniquing them by attaching them directly to the
395 /// new node.
396 template <typename OtherVectorPtrT>
397 NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
398 NodeId NId = addConstructedNode(NodeEntry(Costs));
399 if (Solver)
400 Solver->handleAddNode(NId);
401 return NId;
402 }
403
404 /// Add an edge between the given nodes with the given costs.
405 /// @param N1Id First node.
406 /// @param N2Id Second node.
407 /// @param Costs Cost matrix for new edge.
408 /// @return Edge iterator for the added edge.
409 template <typename OtherVectorT>
410 EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
411 assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 413, __PRETTY_FUNCTION__))
412 getNodeCosts(N2Id).getLength() == Costs.getCols() &&((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 413, __PRETTY_FUNCTION__))
413 "Matrix dimensions mismatch.")((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 413, __PRETTY_FUNCTION__))
;
414 // Get cost matrix from the problem domain.
415 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
8
Calling 'PoolCostAllocator::getMatrix'
416 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
417 if (Solver)
418 Solver->handleAddEdge(EId);
419 return EId;
420 }
421
422 /// Add an edge bypassing the cost allocator.
423 /// @param N1Id First node.
424 /// @param N2Id Second node.
425 /// @param Costs Cost matrix for new edge.
426 /// @return Edge iterator for the added edge.
427 ///
428 /// This method allows for fast addition of an edge whose costs don't need
429 /// to be passed through the cost allocator. The most common use case for
430 /// this is when duplicating costs from an existing edge (when using a
431 /// pooling allocator). These have already been uniqued, so we can avoid
432 /// re-constructing and re-uniquing them by attaching them directly to the
433 /// new edge.
434 template <typename OtherMatrixPtrT>
435 NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
436 OtherMatrixPtrT Costs) {
437 assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 439, __PRETTY_FUNCTION__))
438 getNodeCosts(N2Id).getLength() == Costs->getCols() &&((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 439, __PRETTY_FUNCTION__))
439 "Matrix dimensions mismatch.")((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Graph.h"
, 439, __PRETTY_FUNCTION__))
;
440 // Get cost matrix from the problem domain.
441 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
442 if (Solver)
443 Solver->handleAddEdge(EId);
444 return EId;
445 }
446
447 /// Returns true if the graph is empty.
448 bool empty() const { return NodeIdSet(*this).empty(); }
449
450 NodeIdSet nodeIds() const { return NodeIdSet(*this); }
451 EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
452
453 AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
454
455 /// Get the number of nodes in the graph.
456 /// @return Number of nodes in the graph.
457 unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
458
459 /// Get the number of edges in the graph.
460 /// @return Number of edges in the graph.
461 unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
462
463 /// Set a node's cost vector.
464 /// @param NId Node to update.
465 /// @param Costs New costs to set.
466 template <typename OtherVectorT>
467 void setNodeCosts(NodeId NId, OtherVectorT Costs) {
468 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
469 if (Solver)
470 Solver->handleSetNodeCosts(NId, *AllocatedCosts);
471 getNode(NId).Costs = AllocatedCosts;
472 }
473
474 /// Get a VectorPtr to a node's cost vector. Rarely useful - use
475 /// getNodeCosts where possible.
476 /// @param NId Node id.
477 /// @return VectorPtr to node cost vector.
478 ///
479 /// This method is primarily useful for duplicating costs quickly by
480 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
481 /// getNodeCosts when dealing with node cost values.
482 const VectorPtr& getNodeCostsPtr(NodeId NId) const {
483 return getNode(NId).Costs;
484 }
485
486 /// Get a node's cost vector.
487 /// @param NId Node id.
488 /// @return Node cost vector.
489 const Vector& getNodeCosts(NodeId NId) const {
490 return *getNodeCostsPtr(NId);
491 }
492
493 NodeMetadata& getNodeMetadata(NodeId NId) {
494 return getNode(NId).Metadata;
495 }
496
497 const NodeMetadata& getNodeMetadata(NodeId NId) const {
498 return getNode(NId).Metadata;
499 }
500
501 typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
502 return getNode(NId).getAdjEdgeIds().size();
503 }
504
505 /// Update an edge's cost matrix.
506 /// @param EId Edge id.
507 /// @param Costs New cost matrix.
508 template <typename OtherMatrixT>
509 void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
510 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
511 if (Solver)
512 Solver->handleUpdateCosts(EId, *AllocatedCosts);
513 getEdge(EId).Costs = AllocatedCosts;
514 }
515
516 /// Get a MatrixPtr to a node's cost matrix. Rarely useful - use
517 /// getEdgeCosts where possible.
518 /// @param EId Edge id.
519 /// @return MatrixPtr to edge cost matrix.
520 ///
521 /// This method is primarily useful for duplicating costs quickly by
522 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
523 /// getEdgeCosts when dealing with edge cost values.
524 const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
525 return getEdge(EId).Costs;
526 }
527
528 /// Get an edge's cost matrix.
529 /// @param EId Edge id.
530 /// @return Edge cost matrix.
531 const Matrix& getEdgeCosts(EdgeId EId) const {
532 return *getEdge(EId).Costs;
533 }
534
535 EdgeMetadata& getEdgeMetadata(EdgeId EId) {
536 return getEdge(EId).Metadata;
537 }
538
539 const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
540 return getEdge(EId).Metadata;
541 }
542
543 /// Get the first node connected to this edge.
544 /// @param EId Edge id.
545 /// @return The first node connected to the given edge.
546 NodeId getEdgeNode1Id(EdgeId EId) const {
547 return getEdge(EId).getN1Id();
548 }
549
550 /// Get the second node connected to this edge.
551 /// @param EId Edge id.
552 /// @return The second node connected to the given edge.
553 NodeId getEdgeNode2Id(EdgeId EId) const {
554 return getEdge(EId).getN2Id();
555 }
556
557 /// Get the "other" node connected to this edge.
558 /// @param EId Edge id.
559 /// @param NId Node id for the "given" node.
560 /// @return The iterator for the "other" node connected to this edge.
561 NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
562 EdgeEntry &E = getEdge(EId);
563 if (E.getN1Id() == NId) {
564 return E.getN2Id();
565 } // else
566 return E.getN1Id();
567 }
568
569 /// Get the edge connecting two nodes.
570 /// @param N1Id First node id.
571 /// @param N2Id Second node id.
572 /// @return An id for edge (N1Id, N2Id) if such an edge exists,
573 /// otherwise returns an invalid edge id.
574 EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
575 for (auto AEId : adjEdgeIds(N1Id)) {
576 if ((getEdgeNode1Id(AEId) == N2Id) ||
577 (getEdgeNode2Id(AEId) == N2Id)) {
578 return AEId;
579 }
580 }
581 return invalidEdgeId();
582 }
583
584 /// Remove a node from the graph.
585 /// @param NId Node id.
586 void removeNode(NodeId NId) {
587 if (Solver)
588 Solver->handleRemoveNode(NId);
589 NodeEntry &N = getNode(NId);
590 // TODO: Can this be for-each'd?
591 for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
592 AEEnd = N.adjEdgesEnd();
593 AEItr != AEEnd;) {
594 EdgeId EId = *AEItr;
595 ++AEItr;
596 removeEdge(EId);
597 }
598 FreeNodeIds.push_back(NId);
599 }
600
601 /// Disconnect an edge from the given node.
602 ///
603 /// Removes the given edge from the adjacency list of the given node.
604 /// This operation leaves the edge in an 'asymmetric' state: It will no
605 /// longer appear in an iteration over the given node's (NId's) edges, but
606 /// will appear in an iteration over the 'other', unnamed node's edges.
607 ///
608 /// This does not correspond to any normal graph operation, but exists to
609 /// support efficient PBQP graph-reduction based solvers. It is used to
610 /// 'effectively' remove the unnamed node from the graph while the solver
611 /// is performing the reduction. The solver will later call reconnectNode
612 /// to restore the edge in the named node's adjacency list.
613 ///
614 /// Since the degree of a node is the number of connected edges,
615 /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
616 /// drop by 1.
617 ///
618 /// A disconnected edge WILL still appear in an iteration over the graph
619 /// edges.
620 ///
621 /// A disconnected edge should not be removed from the graph, it should be
622 /// reconnected first.
623 ///
624 /// A disconnected edge can be reconnected by calling the reconnectEdge
625 /// method.
626 void disconnectEdge(EdgeId EId, NodeId NId) {
627 if (Solver)
628 Solver->handleDisconnectEdge(EId, NId);
629
630 EdgeEntry &E = getEdge(EId);
631 E.disconnectFrom(*this, NId);
632 }
633
634 /// Convenience method to disconnect all neighbours from the given
635 /// node.
636 void disconnectAllNeighborsFromNode(NodeId NId) {
637 for (auto AEId : adjEdgeIds(NId))
638 disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
639 }
640
641 /// Re-attach an edge to its nodes.
642 ///
643 /// Adds an edge that had been previously disconnected back into the
644 /// adjacency set of the nodes that the edge connects.
645 void reconnectEdge(EdgeId EId, NodeId NId) {
646 EdgeEntry &E = getEdge(EId);
647 E.connectTo(*this, EId, NId);
648 if (Solver)
649 Solver->handleReconnectEdge(EId, NId);
650 }
651
652 /// Remove an edge from the graph.
653 /// @param EId Edge id.
654 void removeEdge(EdgeId EId) {
655 if (Solver)
656 Solver->handleRemoveEdge(EId);
657 EdgeEntry &E = getEdge(EId);
658 E.disconnect();
659 FreeEdgeIds.push_back(EId);
660 Edges[EId].invalidate();
661 }
662
663 /// Remove all nodes and edges from the graph.
664 void clear() {
665 Nodes.clear();
666 FreeNodeIds.clear();
667 Edges.clear();
668 FreeEdgeIds.clear();
669 }
670 };
671
672} // end namespace PBQP
673} // end namespace llvm
674
675#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/CostAllocator.h

1//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Defines classes conforming to the PBQP cost value manager concept.
11//
12// Cost value managers are memory managers for PBQP cost values (vectors and
13// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
14// of edges on the largest function in SPEC2006).
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
19#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
20
21#include "llvm/ADT/DenseSet.h"
22#include <algorithm>
23#include <cstdint>
24#include <memory>
25
26namespace llvm {
27namespace PBQP {
28
29template <typename ValueT> class ValuePool {
30public:
31 using PoolRef = std::shared_ptr<const ValueT>;
32
33private:
34 class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
35 public:
36 template <typename ValueKeyT>
37 PoolEntry(ValuePool &Pool, ValueKeyT Value)
38 : Pool(Pool), Value(std::move(Value)) {}
21
Calling constructor for 'MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>'
39
40 ~PoolEntry() { Pool.removeEntry(this); }
41
42 const ValueT &getValue() const { return Value; }
43
44 private:
45 ValuePool &Pool;
46 ValueT Value;
47 };
48
49 class PoolEntryDSInfo {
50 public:
51 static inline PoolEntry *getEmptyKey() { return nullptr; }
52
53 static inline PoolEntry *getTombstoneKey() {
54 return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
55 }
56
57 template <typename ValueKeyT>
58 static unsigned getHashValue(const ValueKeyT &C) {
59 return hash_value(C);
60 }
61
62 static unsigned getHashValue(PoolEntry *P) {
63 return getHashValue(P->getValue());
64 }
65
66 static unsigned getHashValue(const PoolEntry *P) {
67 return getHashValue(P->getValue());
68 }
69
70 template <typename ValueKeyT1, typename ValueKeyT2>
71 static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
72 return C1 == C2;
73 }
74
75 template <typename ValueKeyT>
76 static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
77 if (P == getEmptyKey() || P == getTombstoneKey())
78 return false;
79 return isEqual(C, P->getValue());
80 }
81
82 static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
83 if (P1 == getEmptyKey() || P1 == getTombstoneKey())
84 return P1 == P2;
85 return isEqual(P1->getValue(), P2);
86 }
87 };
88
89 using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
90
91 EntrySetT EntrySet;
92
93 void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
94
95public:
96 template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
97 typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
98
99 if (I != EntrySet.end())
10
Assuming the condition is false
11
Taking false branch
100 return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
101
102 auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
12
Calling 'make_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> > &, llvm::PBQP::Matrix>'
103 EntrySet.insert(P.get());
104 return PoolRef(std::move(P), &P->getValue());
105 }
106};
107
108template <typename VectorT, typename MatrixT> class PoolCostAllocator {
109private:
110 using VectorCostPool = ValuePool<VectorT>;
111 using MatrixCostPool = ValuePool<MatrixT>;
112
113public:
114 using Vector = VectorT;
115 using Matrix = MatrixT;
116 using VectorPtr = typename VectorCostPool::PoolRef;
117 using MatrixPtr = typename MatrixCostPool::PoolRef;
118
119 template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
120 return VectorPool.getValue(std::move(v));
121 }
122
123 template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
124 return MatrixPool.getValue(std::move(m));
9
Calling 'ValuePool::getValue'
125 }
126
127private:
128 VectorCostPool VectorPool;
129 MatrixCostPool MatrixPool;
130};
131
132} // end namespace PBQP
133} // end namespace llvm
134
135#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/shared_ptr.h

1// shared_ptr and weak_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_H1
50#define _SHARED_PTR_H1 1
51
52#include <bits/shared_ptr_base.h>
53
54namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57
58 /**
59 * @addtogroup pointer_abstractions
60 * @{
61 */
62
63 /// 20.7.2.2.11 shared_ptr I/O
64 template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
65 inline std::basic_ostream<_Ch, _Tr>&
66 operator<<(std::basic_ostream<_Ch, _Tr>& __os,
67 const __shared_ptr<_Tp, _Lp>& __p)
68 {
69 __os << __p.get();
70 return __os;
71 }
72
73 /// 20.7.2.2.10 shared_ptr get_deleter
74 template<typename _Del, typename _Tp, _Lock_policy _Lp>
75 inline _Del*
76 get_deleter(const __shared_ptr<_Tp, _Lp>& __p) noexcept
77 {
78#if __cpp_rtti199711L
79 return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del)));
80#else
81 return 0;
82#endif
83 }
84
85
86 /**
87 * @brief A smart pointer with reference-counted copy semantics.
88 *
89 * The object pointed to is deleted when the last shared_ptr pointing to
90 * it is destroyed or reset.
91 */
92 template<typename _Tp>
93 class shared_ptr : public __shared_ptr<_Tp>
94 {
95 template<typename _Ptr>
96 using _Convertible
97 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
98
99 public:
100 /**
101 * @brief Construct an empty %shared_ptr.
102 * @post use_count()==0 && get()==0
103 */
104 constexpr shared_ptr() noexcept
105 : __shared_ptr<_Tp>() { }
106
107 shared_ptr(const shared_ptr&) noexcept = default;
108
109 /**
110 * @brief Construct a %shared_ptr that owns the pointer @a __p.
111 * @param __p A pointer that is convertible to element_type*.
112 * @post use_count() == 1 && get() == __p
113 * @throw std::bad_alloc, in which case @c delete @a __p is called.
114 */
115 template<typename _Tp1>
116 explicit shared_ptr(_Tp1* __p)
117 : __shared_ptr<_Tp>(__p) { }
118
119 /**
120 * @brief Construct a %shared_ptr that owns the pointer @a __p
121 * and the deleter @a __d.
122 * @param __p A pointer.
123 * @param __d A deleter.
124 * @post use_count() == 1 && get() == __p
125 * @throw std::bad_alloc, in which case @a __d(__p) is called.
126 *
127 * Requirements: _Deleter's copy constructor and destructor must
128 * not throw
129 *
130 * __shared_ptr will release __p by calling __d(__p)
131 */
132 template<typename _Tp1, typename _Deleter>
133 shared_ptr(_Tp1* __p, _Deleter __d)
134 : __shared_ptr<_Tp>(__p, __d) { }
135
136 /**
137 * @brief Construct a %shared_ptr that owns a null pointer
138 * and the deleter @a __d.
139 * @param __p A null pointer constant.
140 * @param __d A deleter.
141 * @post use_count() == 1 && get() == __p
142 * @throw std::bad_alloc, in which case @a __d(__p) is called.
143 *
144 * Requirements: _Deleter's copy constructor and destructor must
145 * not throw
146 *
147 * The last owner will call __d(__p)
148 */
149 template<typename _Deleter>
150 shared_ptr(nullptr_t __p, _Deleter __d)
151 : __shared_ptr<_Tp>(__p, __d) { }
152
153 /**
154 * @brief Construct a %shared_ptr that owns the pointer @a __p
155 * and the deleter @a __d.
156 * @param __p A pointer.
157 * @param __d A deleter.
158 * @param __a An allocator.
159 * @post use_count() == 1 && get() == __p
160 * @throw std::bad_alloc, in which case @a __d(__p) is called.
161 *
162 * Requirements: _Deleter's copy constructor and destructor must
163 * not throw _Alloc's copy constructor and destructor must not
164 * throw.
165 *
166 * __shared_ptr will release __p by calling __d(__p)
167 */
168 template<typename _Tp1, typename _Deleter, typename _Alloc>
169 shared_ptr(_Tp1* __p, _Deleter __d, _Alloc __a)
170 : __shared_ptr<_Tp>(__p, __d, std::move(__a)) { }
171
172 /**
173 * @brief Construct a %shared_ptr that owns a null pointer
174 * and the deleter @a __d.
175 * @param __p A null pointer constant.
176 * @param __d A deleter.
177 * @param __a An allocator.
178 * @post use_count() == 1 && get() == __p
179 * @throw std::bad_alloc, in which case @a __d(__p) is called.
180 *
181 * Requirements: _Deleter's copy constructor and destructor must
182 * not throw _Alloc's copy constructor and destructor must not
183 * throw.
184 *
185 * The last owner will call __d(__p)
186 */
187 template<typename _Deleter, typename _Alloc>
188 shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
189 : __shared_ptr<_Tp>(__p, __d, std::move(__a)) { }
190
191 // Aliasing constructor
192
193 /**
194 * @brief Constructs a %shared_ptr instance that stores @a __p
195 * and shares ownership with @a __r.
196 * @param __r A %shared_ptr.
197 * @param __p A pointer that will remain valid while @a *__r is valid.
198 * @post get() == __p && use_count() == __r.use_count()
199 *
200 * This can be used to construct a @c shared_ptr to a sub-object
201 * of an object managed by an existing @c shared_ptr.
202 *
203 * @code
204 * shared_ptr< pair<int,int> > pii(new pair<int,int>());
205 * shared_ptr<int> pi(pii, &pii->first);
206 * assert(pii.use_count() == 2);
207 * @endcode
208 */
209 template<typename _Tp1>
210 shared_ptr(const shared_ptr<_Tp1>& __r, _Tp* __p) noexcept
211 : __shared_ptr<_Tp>(__r, __p) { }
212
213 /**
214 * @brief If @a __r is empty, constructs an empty %shared_ptr;
215 * otherwise construct a %shared_ptr that shares ownership
216 * with @a __r.
217 * @param __r A %shared_ptr.
218 * @post get() == __r.get() && use_count() == __r.use_count()
219 */
220 template<typename _Tp1, typename = _Convertible<_Tp1*>>
221 shared_ptr(const shared_ptr<_Tp1>& __r) noexcept
222 : __shared_ptr<_Tp>(__r) { }
223
224 /**
225 * @brief Move-constructs a %shared_ptr instance from @a __r.
226 * @param __r A %shared_ptr rvalue.
227 * @post *this contains the old value of @a __r, @a __r is empty.
228 */
229 shared_ptr(shared_ptr&& __r) noexcept
230 : __shared_ptr<_Tp>(std::move(__r)) { }
231
232 /**
233 * @brief Move-constructs a %shared_ptr instance from @a __r.
234 * @param __r A %shared_ptr rvalue.
235 * @post *this contains the old value of @a __r, @a __r is empty.
236 */
237 template<typename _Tp1, typename = _Convertible<_Tp1*>>
238 shared_ptr(shared_ptr<_Tp1>&& __r) noexcept
239 : __shared_ptr<_Tp>(std::move(__r)) { }
240
241 /**
242 * @brief Constructs a %shared_ptr that shares ownership with @a __r
243 * and stores a copy of the pointer stored in @a __r.
244 * @param __r A weak_ptr.
245 * @post use_count() == __r.use_count()
246 * @throw bad_weak_ptr when __r.expired(),
247 * in which case the constructor has no effect.
248 */
249 template<typename _Tp1>
250 explicit shared_ptr(const weak_ptr<_Tp1>& __r)
251 : __shared_ptr<_Tp>(__r) { }
252
253#if _GLIBCXX_USE_DEPRECATED1
254 template<typename _Tp1>
255 shared_ptr(std::auto_ptr<_Tp1>&& __r);
256#endif
257
258 // _GLIBCXX_RESOLVE_LIB_DEFECTS
259 // 2399. shared_ptr's constructor from unique_ptr should be constrained
260 template<typename _Tp1, typename _Del, typename
261 = _Convertible<typename unique_ptr<_Tp1, _Del>::pointer>>
262 shared_ptr(std::unique_ptr<_Tp1, _Del>&& __r)
263 : __shared_ptr<_Tp>(std::move(__r)) { }
264
265 /**
266 * @brief Construct an empty %shared_ptr.
267 * @post use_count() == 0 && get() == nullptr
268 */
269 constexpr shared_ptr(nullptr_t) noexcept : shared_ptr() { }
270
271 shared_ptr& operator=(const shared_ptr&) noexcept = default;
272
273 template<typename _Tp1>
274 shared_ptr&
275 operator=(const shared_ptr<_Tp1>& __r) noexcept
276 {
277 this->__shared_ptr<_Tp>::operator=(__r);
278 return *this;
279 }
280
281#if _GLIBCXX_USE_DEPRECATED1
282 template<typename _Tp1>
283 shared_ptr&
284 operator=(std::auto_ptr<_Tp1>&& __r)
285 {
286 this->__shared_ptr<_Tp>::operator=(std::move(__r));
287 return *this;
288 }
289#endif
290
291 shared_ptr&
292 operator=(shared_ptr&& __r) noexcept
293 {
294 this->__shared_ptr<_Tp>::operator=(std::move(__r));
295 return *this;
296 }
297
298 template<class _Tp1>
299 shared_ptr&
300 operator=(shared_ptr<_Tp1>&& __r) noexcept
301 {
302 this->__shared_ptr<_Tp>::operator=(std::move(__r));
303 return *this;
304 }
305
306 template<typename _Tp1, typename _Del>
307 shared_ptr&
308 operator=(std::unique_ptr<_Tp1, _Del>&& __r)
309 {
310 this->__shared_ptr<_Tp>::operator=(std::move(__r));
311 return *this;
312 }
313
314 private:
315 // This constructor is non-standard, it is used by allocate_shared.
316 template<typename _Alloc, typename... _Args>
317 shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
318 _Args&&... __args)
319 : __shared_ptr<_Tp>(__tag, __a, std::forward<_Args>(__args)...)
15
Calling constructor for '__shared_ptr<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry, __gnu_cxx::_S_atomic>'
320 { }
321
322 template<typename _Tp1, typename _Alloc, typename... _Args>
323 friend shared_ptr<_Tp1>
324 allocate_shared(const _Alloc& __a, _Args&&... __args);
325
326 // This constructor is non-standard, it is used by weak_ptr::lock().
327 shared_ptr(const weak_ptr<_Tp>& __r, std::nothrow_t)
328 : __shared_ptr<_Tp>(__r, std::nothrow) { }
329
330 friend class weak_ptr<_Tp>;
331 };
332
333 // 20.7.2.2.7 shared_ptr comparisons
334 template<typename _Tp1, typename _Tp2>
335 inline bool
336 operator==(const shared_ptr<_Tp1>& __a,
337 const shared_ptr<_Tp2>& __b) noexcept
338 { return __a.get() == __b.get(); }
339
340 template<typename _Tp>
341 inline bool
342 operator==(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
343 { return !__a; }
344
345 template<typename _Tp>
346 inline bool
347 operator==(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
348 { return !__a; }
349
350 template<typename _Tp1, typename _Tp2>
351 inline bool
352 operator!=(const shared_ptr<_Tp1>& __a,
353 const shared_ptr<_Tp2>& __b) noexcept
354 { return __a.get() != __b.get(); }
355
356 template<typename _Tp>
357 inline bool
358 operator!=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
359 { return (bool)__a; }
360
361 template<typename _Tp>
362 inline bool
363 operator!=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
364 { return (bool)__a; }
365
366 template<typename _Tp1, typename _Tp2>
367 inline bool
368 operator<(const shared_ptr<_Tp1>& __a,
369 const shared_ptr<_Tp2>& __b) noexcept
370 {
371 typedef typename std::common_type<_Tp1*, _Tp2*>::type _CT;
372 return std::less<_CT>()(__a.get(), __b.get());
373 }
374
375 template<typename _Tp>
376 inline bool
377 operator<(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
378 { return std::less<_Tp*>()(__a.get(), nullptr); }
379
380 template<typename _Tp>
381 inline bool
382 operator<(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
383 { return std::less<_Tp*>()(nullptr, __a.get()); }
384
385 template<typename _Tp1, typename _Tp2>
386 inline bool
387 operator<=(const shared_ptr<_Tp1>& __a,
388 const shared_ptr<_Tp2>& __b) noexcept
389 { return !(__b < __a); }
390
391 template<typename _Tp>
392 inline bool
393 operator<=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
394 { return !(nullptr < __a); }
395
396 template<typename _Tp>
397 inline bool
398 operator<=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
399 { return !(__a < nullptr); }
400
401 template<typename _Tp1, typename _Tp2>
402 inline bool
403 operator>(const shared_ptr<_Tp1>& __a,
404 const shared_ptr<_Tp2>& __b) noexcept
405 { return (__b < __a); }
406
407 template<typename _Tp>
408 inline bool
409 operator>(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
410 { return std::less<_Tp*>()(nullptr, __a.get()); }
411
412 template<typename _Tp>
413 inline bool
414 operator>(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
415 { return std::less<_Tp*>()(__a.get(), nullptr); }
416
417 template<typename _Tp1, typename _Tp2>
418 inline bool
419 operator>=(const shared_ptr<_Tp1>& __a,
420 const shared_ptr<_Tp2>& __b) noexcept
421 { return !(__a < __b); }
422
423 template<typename _Tp>
424 inline bool
425 operator>=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
426 { return !(__a < nullptr); }
427
428 template<typename _Tp>
429 inline bool
430 operator>=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
431 { return !(nullptr < __a); }
432
433 template<typename _Tp>
434 struct less<shared_ptr<_Tp>> : public _Sp_less<shared_ptr<_Tp>>
435 { };
436
437 // 20.7.2.2.8 shared_ptr specialized algorithms.
438 template<typename _Tp>
439 inline void
440 swap(shared_ptr<_Tp>& __a, shared_ptr<_Tp>& __b) noexcept
441 { __a.swap(__b); }
442
443 // 20.7.2.2.9 shared_ptr casts.
444 template<typename _Tp, typename _Tp1>
445 inline shared_ptr<_Tp>
446 static_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
447 { return shared_ptr<_Tp>(__r, static_cast<_Tp*>(__r.get())); }
448
449 template<typename _Tp, typename _Tp1>
450 inline shared_ptr<_Tp>
451 const_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
452 { return shared_ptr<_Tp>(__r, const_cast<_Tp*>(__r.get())); }
453
454 template<typename _Tp, typename _Tp1>
455 inline shared_ptr<_Tp>
456 dynamic_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
457 {
458 if (_Tp* __p = dynamic_cast<_Tp*>(__r.get()))
459 return shared_ptr<_Tp>(__r, __p);
460 return shared_ptr<_Tp>();
461 }
462
463
464 /**
465 * @brief A smart pointer with weak semantics.
466 *
467 * With forwarding constructors and assignment operators.
468 */
469 template<typename _Tp>
470 class weak_ptr : public __weak_ptr<_Tp>
471 {
472 template<typename _Ptr>
473 using _Convertible
474 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
475
476 public:
477 constexpr weak_ptr() noexcept = default;
478
479 template<typename _Tp1, typename = _Convertible<_Tp1*>>
480 weak_ptr(const shared_ptr<_Tp1>& __r) noexcept
481 : __weak_ptr<_Tp>(__r) { }
482
483 weak_ptr(const weak_ptr&) noexcept = default;
484
485 template<typename _Tp1, typename = _Convertible<_Tp1*>>
486 weak_ptr(const weak_ptr<_Tp1>& __r) noexcept
487 : __weak_ptr<_Tp>(__r) { }
488
489 weak_ptr(weak_ptr&&) noexcept = default;
490
491 template<typename _Tp1, typename = _Convertible<_Tp1*>>
492 weak_ptr(weak_ptr<_Tp1>&& __r) noexcept
493 : __weak_ptr<_Tp>(std::move(__r)) { }
494
495 weak_ptr&
496 operator=(const weak_ptr& __r) noexcept = default;
497
498 template<typename _Tp1>
499 weak_ptr&
500 operator=(const weak_ptr<_Tp1>& __r) noexcept
501 {
502 this->__weak_ptr<_Tp>::operator=(__r);
503 return *this;
504 }
505
506 template<typename _Tp1>
507 weak_ptr&
508 operator=(const shared_ptr<_Tp1>& __r) noexcept
509 {
510 this->__weak_ptr<_Tp>::operator=(__r);
511 return *this;
512 }
513
514 weak_ptr&
515 operator=(weak_ptr&& __r) noexcept = default;
516
517 template<typename _Tp1>
518 weak_ptr&
519 operator=(weak_ptr<_Tp1>&& __r) noexcept
520 {
521 this->__weak_ptr<_Tp>::operator=(std::move(__r));
522 return *this;
523 }
524
525 shared_ptr<_Tp>
526 lock() const noexcept
527 { return shared_ptr<_Tp>(*this, std::nothrow); }
528 };
529
530 // 20.7.2.3.6 weak_ptr specialized algorithms.
531 template<typename _Tp>
532 inline void
533 swap(weak_ptr<_Tp>& __a, weak_ptr<_Tp>& __b) noexcept
534 { __a.swap(__b); }
535
536
537 /// Primary template owner_less
538 template<typename _Tp>
539 struct owner_less;
540
541 /// Partial specialization of owner_less for shared_ptr.
542 template<typename _Tp>
543 struct owner_less<shared_ptr<_Tp>>
544 : public _Sp_owner_less<shared_ptr<_Tp>, weak_ptr<_Tp>>
545 { };
546
547 /// Partial specialization of owner_less for weak_ptr.
548 template<typename _Tp>
549 struct owner_less<weak_ptr<_Tp>>
550 : public _Sp_owner_less<weak_ptr<_Tp>, shared_ptr<_Tp>>
551 { };
552
553 /**
554 * @brief Base class allowing use of member function shared_from_this.
555 */
556 template<typename _Tp>
557 class enable_shared_from_this
558 {
559 protected:
560 constexpr enable_shared_from_this() noexcept { }
561
562 enable_shared_from_this(const enable_shared_from_this&) noexcept { }
563
564 enable_shared_from_this&
565 operator=(const enable_shared_from_this&) noexcept
566 { return *this; }
567
568 ~enable_shared_from_this() { }
569
570 public:
571 shared_ptr<_Tp>
572 shared_from_this()
573 { return shared_ptr<_Tp>(this->_M_weak_this); }
574
575 shared_ptr<const _Tp>
576 shared_from_this() const
577 { return shared_ptr<const _Tp>(this->_M_weak_this); }
578
579 private:
580 template<typename _Tp1>
581 void
582 _M_weak_assign(_Tp1* __p, const __shared_count<>& __n) const noexcept
583 { _M_weak_this._M_assign(__p, __n); }
584
585 template<typename _Tp1, typename _Tp2>
586 friend void
587 __enable_shared_from_this_helper(const __shared_count<>&,
588 const enable_shared_from_this<_Tp1>*,
589 const _Tp2*) noexcept;
590
591 mutable weak_ptr<_Tp> _M_weak_this;
592 };
593
594 template<typename _Tp1, typename _Tp2>
595 inline void
596 __enable_shared_from_this_helper(const __shared_count<>& __pn,
597 const enable_shared_from_this<_Tp1>*
598 __pe, const _Tp2* __px) noexcept
599 {
600 if (__pe != nullptr)
601 __pe->_M_weak_assign(const_cast<_Tp2*>(__px), __pn);
602 }
603
604 /**
605 * @brief Create an object that is owned by a shared_ptr.
606 * @param __a An allocator.
607 * @param __args Arguments for the @a _Tp object's constructor.
608 * @return A shared_ptr that owns the newly created object.
609 * @throw An exception thrown from @a _Alloc::allocate or from the
610 * constructor of @a _Tp.
611 *
612 * A copy of @a __a will be used to allocate memory for the shared_ptr
613 * and the new object.
614 */
615 template<typename _Tp, typename _Alloc, typename... _Args>
616 inline shared_ptr<_Tp>
617 allocate_shared(const _Alloc& __a, _Args&&... __args)
618 {
619 return shared_ptr<_Tp>(_Sp_make_shared_tag(), __a,
14
Calling constructor for 'shared_ptr<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry>'
620 std::forward<_Args>(__args)...);
621 }
622
623 /**
624 * @brief Create an object that is owned by a shared_ptr.
625 * @param __args Arguments for the @a _Tp object's constructor.
626 * @return A shared_ptr that owns the newly created object.
627 * @throw std::bad_alloc, or an exception thrown from the
628 * constructor of @a _Tp.
629 */
630 template<typename _Tp, typename... _Args>
631 inline shared_ptr<_Tp>
632 make_shared(_Args&&... __args)
633 {
634 typedef typename std::remove_const<_Tp>::type _Tp_nc;
635 return std::allocate_shared<_Tp>(std::allocator<_Tp_nc>(),
13
Calling 'allocate_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry>, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> > &, llvm::PBQP::Matrix>'
636 std::forward<_Args>(__args)...);
637 }
638
639 /// std::hash specialization for shared_ptr.
640 template<typename _Tp>
641 struct hash<shared_ptr<_Tp>>
642 : public __hash_base<size_t, shared_ptr<_Tp>>
643 {
644 size_t
645 operator()(const shared_ptr<_Tp>& __s) const noexcept
646 { return std::hash<_Tp*>()(__s.get()); }
647 };
648
649 // @} group pointer_abstractions
650
651_GLIBCXX_END_NAMESPACE_VERSION
652} // namespace
653
654#endif // _SHARED_PTR_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/shared_ptr_base.h

1// shared_ptr and weak_ptr implementation details -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file bits/shared_ptr_base.h
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_BASE_H1
50#define _SHARED_PTR_BASE_H1 1
51
52#include <typeinfo>
53#include <bits/allocated_ptr.h>
54#include <ext/aligned_buffer.h>
55
56namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
57{
58_GLIBCXX_BEGIN_NAMESPACE_VERSION
59
60#if _GLIBCXX_USE_DEPRECATED1
61 template<typename> class auto_ptr;
62#endif
63
64 /**
65 * @brief Exception possibly thrown by @c shared_ptr.
66 * @ingroup exceptions
67 */
68 class bad_weak_ptr : public std::exception
69 {
70 public:
71 virtual char const* what() const noexcept;
72
73 virtual ~bad_weak_ptr() noexcept;
74 };
75
76 // Substitute for bad_weak_ptr object in the case of -fno-exceptions.
77 inline void
78 __throw_bad_weak_ptr()
79 { _GLIBCXX_THROW_OR_ABORT(bad_weak_ptr())(__builtin_abort()); }
80
81 using __gnu_cxx::_Lock_policy;
82 using __gnu_cxx::__default_lock_policy;
83 using __gnu_cxx::_S_single;
84 using __gnu_cxx::_S_mutex;
85 using __gnu_cxx::_S_atomic;
86
87 // Empty helper class except when the template argument is _S_mutex.
88 template<_Lock_policy _Lp>
89 class _Mutex_base
90 {
91 protected:
92 // The atomic policy uses fully-fenced builtins, single doesn't care.
93 enum { _S_need_barriers = 0 };
94 };
95
96 template<>
97 class _Mutex_base<_S_mutex>
98 : public __gnu_cxx::__mutex
99 {
100 protected:
101 // This policy is used when atomic builtins are not available.
102 // The replacement atomic operations might not have the necessary
103 // memory barriers.
104 enum { _S_need_barriers = 1 };
105 };
106
107 template<_Lock_policy _Lp = __default_lock_policy>
108 class _Sp_counted_base
109 : public _Mutex_base<_Lp>
110 {
111 public:
112 _Sp_counted_base() noexcept
113 : _M_use_count(1), _M_weak_count(1) { }
114
115 virtual
116 ~_Sp_counted_base() noexcept
117 { }
118
119 // Called when _M_use_count drops to zero, to release the resources
120 // managed by *this.
121 virtual void
122 _M_dispose() noexcept = 0;
123
124 // Called when _M_weak_count drops to zero.
125 virtual void
126 _M_destroy() noexcept
127 { delete this; }
128
129 virtual void*
130 _M_get_deleter(const std::type_info&) noexcept = 0;
131
132 void
133 _M_add_ref_copy()
134 { __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
135
136 void
137 _M_add_ref_lock();
138
139 bool
140 _M_add_ref_lock_nothrow();
141
142 void
143 _M_release() noexcept
144 {
145 // Be race-detector-friendly. For more info see bits/c++config.
146 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count);
147 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1)
148 {
149 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count);
150 _M_dispose();
151 // There must be a memory barrier between dispose() and destroy()
152 // to ensure that the effects of dispose() are observed in the
153 // thread that runs destroy().
154 // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html
155 if (_Mutex_base<_Lp>::_S_need_barriers)
156 {
157 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
158 }
159
160 // Be race-detector-friendly. For more info see bits/c++config.
161 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
162 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count,
163 -1) == 1)
164 {
165 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
166 _M_destroy();
167 }
168 }
169 }
170
171 void
172 _M_weak_add_ref() noexcept
173 { __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
174
175 void
176 _M_weak_release() noexcept
177 {
178 // Be race-detector-friendly. For more info see bits/c++config.
179 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
180 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1)
181 {
182 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
183 if (_Mutex_base<_Lp>::_S_need_barriers)
184 {
185 // See _M_release(),
186 // destroy() must observe results of dispose()
187 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
188 }
189 _M_destroy();
190 }
191 }
192
193 long
194 _M_get_use_count() const noexcept
195 {
196 // No memory barrier is used here so there is no synchronization
197 // with other threads.
198 return __atomic_load_n(&_M_use_count, __ATOMIC_RELAXED0);
199 }
200
201 private:
202 _Sp_counted_base(_Sp_counted_base const&) = delete;
203 _Sp_counted_base& operator=(_Sp_counted_base const&) = delete;
204
205 _Atomic_word _M_use_count; // #shared
206 _Atomic_word _M_weak_count; // #weak + (#shared != 0)
207 };
208
209 template<>
210 inline void
211 _Sp_counted_base<_S_single>::
212 _M_add_ref_lock()
213 {
214 if (_M_use_count == 0)
215 __throw_bad_weak_ptr();
216 ++_M_use_count;
217 }
218
219 template<>
220 inline void
221 _Sp_counted_base<_S_mutex>::
222 _M_add_ref_lock()
223 {
224 __gnu_cxx::__scoped_lock sentry(*this);
225 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
226 {
227 _M_use_count = 0;
228 __throw_bad_weak_ptr();
229 }
230 }
231
232 template<>
233 inline void
234 _Sp_counted_base<_S_atomic>::
235 _M_add_ref_lock()
236 {
237 // Perform lock-free add-if-not-zero operation.
238 _Atomic_word __count = _M_get_use_count();
239 do
240 {
241 if (__count == 0)
242 __throw_bad_weak_ptr();
243 // Replace the current counter value with the old value + 1, as
244 // long as it's not changed meanwhile.
245 }
246 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
247 true, __ATOMIC_ACQ_REL4,
248 __ATOMIC_RELAXED0));
249 }
250
251 template<>
252 inline bool
253 _Sp_counted_base<_S_single>::
254 _M_add_ref_lock_nothrow()
255 {
256 if (_M_use_count == 0)
257 return false;
258 ++_M_use_count;
259 return true;
260 }
261
262 template<>
263 inline bool
264 _Sp_counted_base<_S_mutex>::
265 _M_add_ref_lock_nothrow()
266 {
267 __gnu_cxx::__scoped_lock sentry(*this);
268 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
269 {
270 _M_use_count = 0;
271 return false;
272 }
273 return true;
274 }
275
276 template<>
277 inline bool
278 _Sp_counted_base<_S_atomic>::
279 _M_add_ref_lock_nothrow()
280 {
281 // Perform lock-free add-if-not-zero operation.
282 _Atomic_word __count = _M_get_use_count();
283 do
284 {
285 if (__count == 0)
286 return false;
287 // Replace the current counter value with the old value + 1, as
288 // long as it's not changed meanwhile.
289 }
290 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
291 true, __ATOMIC_ACQ_REL4,
292 __ATOMIC_RELAXED0));
293 return true;
294 }
295
296 template<>
297 inline void
298 _Sp_counted_base<_S_single>::_M_add_ref_copy()
299 { ++_M_use_count; }
300
301 template<>
302 inline void
303 _Sp_counted_base<_S_single>::_M_release() noexcept
304 {
305 if (--_M_use_count == 0)
306 {
307 _M_dispose();
308 if (--_M_weak_count == 0)
309 _M_destroy();
310 }
311 }
312
313 template<>
314 inline void
315 _Sp_counted_base<_S_single>::_M_weak_add_ref() noexcept
316 { ++_M_weak_count; }
317
318 template<>
319 inline void
320 _Sp_counted_base<_S_single>::_M_weak_release() noexcept
321 {
322 if (--_M_weak_count == 0)
323 _M_destroy();
324 }
325
326 template<>
327 inline long
328 _Sp_counted_base<_S_single>::_M_get_use_count() const noexcept
329 { return _M_use_count; }
330
331
332 // Forward declarations.
333 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
334 class __shared_ptr;
335
336 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
337 class __weak_ptr;
338
339 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
340 class __enable_shared_from_this;
341
342 template<typename _Tp>
343 class shared_ptr;
344
345 template<typename _Tp>
346 class weak_ptr;
347
348 template<typename _Tp>
349 struct owner_less;
350
351 template<typename _Tp>
352 class enable_shared_from_this;
353
354 template<_Lock_policy _Lp = __default_lock_policy>
355 class __weak_count;
356
357 template<_Lock_policy _Lp = __default_lock_policy>
358 class __shared_count;
359
360
361 // Counted ptr with no deleter or allocator support
362 template<typename _Ptr, _Lock_policy _Lp>
363 class _Sp_counted_ptr final : public _Sp_counted_base<_Lp>
364 {
365 public:
366 explicit
367 _Sp_counted_ptr(_Ptr __p) noexcept
368 : _M_ptr(__p) { }
369
370 virtual void
371 _M_dispose() noexcept
372 { delete _M_ptr; }
373
374 virtual void
375 _M_destroy() noexcept
376 { delete this; }
377
378 virtual void*
379 _M_get_deleter(const std::type_info&) noexcept
380 { return nullptr; }
381
382 _Sp_counted_ptr(const _Sp_counted_ptr&) = delete;
383 _Sp_counted_ptr& operator=(const _Sp_counted_ptr&) = delete;
384
385 private:
386 _Ptr _M_ptr;
387 };
388
389 template<>
390 inline void
391 _Sp_counted_ptr<nullptr_t, _S_single>::_M_dispose() noexcept { }
392
393 template<>
394 inline void
395 _Sp_counted_ptr<nullptr_t, _S_mutex>::_M_dispose() noexcept { }
396
397 template<>
398 inline void
399 _Sp_counted_ptr<nullptr_t, _S_atomic>::_M_dispose() noexcept { }
400
401 template<int _Nm, typename _Tp,
402 bool __use_ebo = !__is_final(_Tp) && __is_empty(_Tp)>
403 struct _Sp_ebo_helper;
404
405 /// Specialization using EBO.
406 template<int _Nm, typename _Tp>
407 struct _Sp_ebo_helper<_Nm, _Tp, true> : private _Tp
408 {
409 explicit _Sp_ebo_helper(const _Tp& __tp) : _Tp(__tp) { }
410
411 static _Tp&
412 _S_get(_Sp_ebo_helper& __eboh) { return static_cast<_Tp&>(__eboh); }
413 };
414
415 /// Specialization not using EBO.
416 template<int _Nm, typename _Tp>
417 struct _Sp_ebo_helper<_Nm, _Tp, false>
418 {
419 explicit _Sp_ebo_helper(const _Tp& __tp) : _M_tp(__tp) { }
420
421 static _Tp&
422 _S_get(_Sp_ebo_helper& __eboh)
423 { return __eboh._M_tp; }
424
425 private:
426 _Tp _M_tp;
427 };
428
429 // Support for custom deleter and/or allocator
430 template<typename _Ptr, typename _Deleter, typename _Alloc, _Lock_policy _Lp>
431 class _Sp_counted_deleter final : public _Sp_counted_base<_Lp>
432 {
433 class _Impl : _Sp_ebo_helper<0, _Deleter>, _Sp_ebo_helper<1, _Alloc>
434 {
435 typedef _Sp_ebo_helper<0, _Deleter> _Del_base;
436 typedef _Sp_ebo_helper<1, _Alloc> _Alloc_base;
437
438 public:
439 _Impl(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
440 : _M_ptr(__p), _Del_base(__d), _Alloc_base(__a)
441 { }
442
443 _Deleter& _M_del() noexcept { return _Del_base::_S_get(*this); }
444 _Alloc& _M_alloc() noexcept { return _Alloc_base::_S_get(*this); }
445
446 _Ptr _M_ptr;
447 };
448
449 public:
450 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_deleter>;
451
452 // __d(__p) must not throw.
453 _Sp_counted_deleter(_Ptr __p, _Deleter __d) noexcept
454 : _M_impl(__p, __d, _Alloc()) { }
455
456 // __d(__p) must not throw.
457 _Sp_counted_deleter(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
458 : _M_impl(__p, __d, __a) { }
459
460 ~_Sp_counted_deleter() noexcept { }
461
462 virtual void
463 _M_dispose() noexcept
464 { _M_impl._M_del()(_M_impl._M_ptr); }
465
466 virtual void
467 _M_destroy() noexcept
468 {
469 __allocator_type __a(_M_impl._M_alloc());
470 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
471 this->~_Sp_counted_deleter();
472 }
473
474 virtual void*
475 _M_get_deleter(const std::type_info& __ti) noexcept
476 {
477#if __cpp_rtti199711L
478 // _GLIBCXX_RESOLVE_LIB_DEFECTS
479 // 2400. shared_ptr's get_deleter() should use addressof()
480 return __ti == typeid(_Deleter)
481 ? std::__addressof(_M_impl._M_del())
482 : nullptr;
483#else
484 return nullptr;
485#endif
486 }
487
488 private:
489 _Impl _M_impl;
490 };
491
492 // helpers for make_shared / allocate_shared
493
494 struct _Sp_make_shared_tag { };
495
496 template<typename _Tp, typename _Alloc, _Lock_policy _Lp>
497 class _Sp_counted_ptr_inplace final : public _Sp_counted_base<_Lp>
498 {
499 class _Impl : _Sp_ebo_helper<0, _Alloc>
500 {
501 typedef _Sp_ebo_helper<0, _Alloc> _A_base;
502
503 public:
504 explicit _Impl(_Alloc __a) noexcept : _A_base(__a) { }
505
506 _Alloc& _M_alloc() noexcept { return _A_base::_S_get(*this); }
507
508 __gnu_cxx::__aligned_buffer<_Tp> _M_storage;
509 };
510
511 public:
512 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_ptr_inplace>;
513
514 template<typename... _Args>
515 _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args)
516 : _M_impl(__a)
517 {
518 // _GLIBCXX_RESOLVE_LIB_DEFECTS
519 // 2070. allocate_shared should use allocator_traits<A>::construct
520 allocator_traits<_Alloc>::construct(__a, _M_ptr(),
18
Calling 'allocator_traits::construct'
521 std::forward<_Args>(__args)...); // might throw
522 }
523
524 ~_Sp_counted_ptr_inplace() noexcept { }
525
526 virtual void
527 _M_dispose() noexcept
528 {
529 allocator_traits<_Alloc>::destroy(_M_impl._M_alloc(), _M_ptr());
530 }
531
532 // Override because the allocator needs to know the dynamic type
533 virtual void
534 _M_destroy() noexcept
535 {
536 __allocator_type __a(_M_impl._M_alloc());
537 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
538 this->~_Sp_counted_ptr_inplace();
539 }
540
541 // Sneaky trick so __shared_ptr can get the managed pointer
542 virtual void*
543 _M_get_deleter(const std::type_info& __ti) noexcept
544 {
545#if __cpp_rtti199711L
546 if (__ti == typeid(_Sp_make_shared_tag))
547 return const_cast<typename remove_cv<_Tp>::type*>(_M_ptr());
548#endif
549 return nullptr;
550 }
551
552 private:
553 _Tp* _M_ptr() noexcept { return _M_impl._M_storage._M_ptr(); }
554
555 _Impl _M_impl;
556 };
557
558
559 template<_Lock_policy _Lp>
560 class __shared_count
561 {
562 public:
563 constexpr __shared_count() noexcept : _M_pi(0)
564 { }
565
566 template<typename _Ptr>
567 explicit
568 __shared_count(_Ptr __p) : _M_pi(0)
569 {
570 __tryif (true)
571 {
572 _M_pi = new _Sp_counted_ptr<_Ptr, _Lp>(__p);
573 }
574 __catch(...)if (false)
575 {
576 delete __p;
577 __throw_exception_again;
578 }
579 }
580
581 template<typename _Ptr, typename _Deleter>
582 __shared_count(_Ptr __p, _Deleter __d)
583 : __shared_count(__p, std::move(__d), allocator<void>())
584 { }
585
586 template<typename _Ptr, typename _Deleter, typename _Alloc>
587 __shared_count(_Ptr __p, _Deleter __d, _Alloc __a) : _M_pi(0)
588 {
589 typedef _Sp_counted_deleter<_Ptr, _Deleter, _Alloc, _Lp> _Sp_cd_type;
590 __tryif (true)
591 {
592 typename _Sp_cd_type::__allocator_type __a2(__a);
593 auto __guard = std::__allocate_guarded(__a2);
594 _Sp_cd_type* __mem = __guard.get();
595 ::new (__mem) _Sp_cd_type(__p, std::move(__d), std::move(__a));
596 _M_pi = __mem;
597 __guard = nullptr;
598 }
599 __catch(...)if (false)
600 {
601 __d(__p); // Call _Deleter on __p.
602 __throw_exception_again;
603 }
604 }
605
606 template<typename _Tp, typename _Alloc, typename... _Args>
607 __shared_count(_Sp_make_shared_tag, _Tp*, const _Alloc& __a,
608 _Args&&... __args)
609 : _M_pi(0)
610 {
611 typedef _Sp_counted_ptr_inplace<_Tp, _Alloc, _Lp> _Sp_cp_type;
612 typename _Sp_cp_type::__allocator_type __a2(__a);
613 auto __guard = std::__allocate_guarded(__a2);
614 _Sp_cp_type* __mem = __guard.get();
615 ::new (__mem) _Sp_cp_type(std::move(__a),
17
Calling constructor for '_Sp_counted_ptr_inplace<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata> >::PoolEntry>, __gnu_cxx::_S_atomic>'
616 std::forward<_Args>(__args)...);
617 _M_pi = __mem;
618 __guard = nullptr;
619 }
620
621#if _GLIBCXX_USE_DEPRECATED1
622 // Special case for auto_ptr<_Tp> to provide the strong guarantee.
623 template<typename _Tp>
624 explicit
625 __shared_count(std::auto_ptr<_Tp>&& __r);
626#endif
627
628 // Special case for unique_ptr<_Tp,_Del> to provide the strong guarantee.
629 template<typename _Tp, typename _Del>
630 explicit
631 __shared_count(std::unique_ptr<_Tp, _Del>&& __r) : _M_pi(0)
632 {
633 // _GLIBCXX_RESOLVE_LIB_DEFECTS
634 // 2415. Inconsistency between unique_ptr and shared_ptr
635 if (__r.get() == nullptr)
636 return;
637
638 using _Ptr = typename unique_ptr<_Tp, _Del>::pointer;
639 using _Del2 = typename conditional<is_reference<_Del>::value,
640 reference_wrapper<typename remove_reference<_Del>::type>,
641 _Del>::type;
642 using _Sp_cd_type
643 = _Sp_counted_deleter<_Ptr, _Del2, allocator<void>, _Lp>;
644 using _Alloc = allocator<_Sp_cd_type>;
645 using _Alloc_traits = allocator_traits<_Alloc>;
646 _Alloc __a;
647 _Sp_cd_type* __mem = _Alloc_traits::allocate(__a, 1);
648 _Alloc_traits::construct(__a, __mem, __r.release(),
649 __r.get_deleter()); // non-throwing
650 _M_pi = __mem;
651 }
652
653 // Throw bad_weak_ptr when __r._M_get_use_count() == 0.
654 explicit __shared_count(const __weak_count<_Lp>& __r);
655
656 // Does not throw if __r._M_get_use_count() == 0, caller must check.
657 explicit __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t);
658
659 ~__shared_count() noexcept
660 {
661 if (_M_pi != nullptr)
662 _M_pi->_M_release();
663 }
664
665 __shared_count(const __shared_count& __r) noexcept
666 : _M_pi(__r._M_pi)
667 {
668 if (_M_pi != 0)
669 _M_pi->_M_add_ref_copy();
670 }
671
672 __shared_count&
673 operator=(const __shared_count& __r) noexcept
674 {
675 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
676 if (__tmp != _M_pi)
677 {
678 if (__tmp != 0)
679 __tmp->_M_add_ref_copy();
680 if (_M_pi != 0)
681 _M_pi->_M_release();
682 _M_pi = __tmp;
683 }
684 return *this;
685 }
686
687 void
688 _M_swap(__shared_count& __r) noexcept
689 {
690 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
691 __r._M_pi = _M_pi;
692 _M_pi = __tmp;
693 }
694
695 long
696 _M_get_use_count() const noexcept
697 { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
698
699 bool
700 _M_unique() const noexcept
701 { return this->_M_get_use_count() == 1; }
702
703 void*
704 _M_get_deleter(const std::type_info& __ti) const noexcept
705 { return _M_pi ? _M_pi->_M_get_deleter(__ti) : nullptr; }
706
707 bool
708 _M_less(const __shared_count& __rhs) const noexcept
709 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
710
711 bool
712 _M_less(const __weak_count<_Lp>& __rhs) const noexcept
713 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
714
715 // Friend function injected into enclosing namespace and found by ADL
716 friend inline bool
717 operator==(const __shared_count& __a, const __shared_count& __b) noexcept
718 { return __a._M_pi == __b._M_pi; }
719
720 private:
721 friend class __weak_count<_Lp>;
722
723 _Sp_counted_base<_Lp>* _M_pi;
724 };
725
726
727 template<_Lock_policy _Lp>
728 class __weak_count
729 {
730 public:
731 constexpr __weak_count() noexcept : _M_pi(nullptr)
732 { }
733
734 __weak_count(const __shared_count<_Lp>& __r) noexcept
735 : _M_pi(__r._M_pi)
736 {
737 if (_M_pi != nullptr)
738 _M_pi->_M_weak_add_ref();
739 }
740
741 __weak_count(const __weak_count& __r) noexcept
742 : _M_pi(__r._M_pi)
743 {
744 if (_M_pi != nullptr)
745 _M_pi->_M_weak_add_ref();
746 }
747
748 __weak_count(__weak_count&& __r) noexcept
749 : _M_pi(__r._M_pi)
750 { __r._M_pi = nullptr; }
751
752 ~__weak_count() noexcept
753 {
754 if (_M_pi != nullptr)
755 _M_pi->_M_weak_release();
756 }
757
758 __weak_count&
759 operator=(const __shared_count<_Lp>& __r) noexcept
760 {
761 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
762 if (__tmp != nullptr)
763 __tmp->_M_weak_add_ref();
764 if (_M_pi != nullptr)
765 _M_pi->_M_weak_release();
766 _M_pi = __tmp;
767 return *this;
768 }
769
770 __weak_count&
771 operator=(const __weak_count& __r) noexcept
772 {
773 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
774 if (__tmp != nullptr)
775 __tmp->_M_weak_add_ref();
776 if (_M_pi != nullptr)
777 _M_pi->_M_weak_release();
778 _M_pi = __tmp;
779 return *this;
780 }
781
782 __weak_count&
783 operator=(__weak_count&& __r) noexcept
784 {
785 if (_M_pi != nullptr)
786 _M_pi->_M_weak_release();
787 _M_pi = __r._M_pi;
788 __r._M_pi = nullptr;
789 return *this;
790 }
791
792 void
793 _M_swap(__weak_count& __r) noexcept
794 {
795 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
796 __r._M_pi = _M_pi;
797 _M_pi = __tmp;
798 }
799
800 long
801 _M_get_use_count() const noexcept
802 { return _M_pi != nullptr ? _M_pi->_M_get_use_count() : 0; }
803
804 bool
805 _M_less(const __weak_count& __rhs) const noexcept
806 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
807
808 bool
809 _M_less(const __shared_count<_Lp>& __rhs) const noexcept
810 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
811
812 // Friend function injected into enclosing namespace and found by ADL
813 friend inline bool
814 operator==(const __weak_count& __a, const __weak_count& __b) noexcept
815 { return __a._M_pi == __b._M_pi; }
816
817 private:
818 friend class __shared_count<_Lp>;
819
820 _Sp_counted_base<_Lp>* _M_pi;
821 };
822
823 // Now that __weak_count is defined we can define this constructor:
824 template<_Lock_policy _Lp>
825 inline
826 __shared_count<_Lp>::__shared_count(const __weak_count<_Lp>& __r)
827 : _M_pi(__r._M_pi)
828 {
829 if (_M_pi != nullptr)
830 _M_pi->_M_add_ref_lock();
831 else
832 __throw_bad_weak_ptr();
833 }
834
835 // Now that __weak_count is defined we can define this constructor:
836 template<_Lock_policy _Lp>
837 inline
838 __shared_count<_Lp>::
839 __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t)
840 : _M_pi(__r._M_pi)
841 {
842 if (_M_pi != nullptr)
843 if (!_M_pi->_M_add_ref_lock_nothrow())
844 _M_pi = nullptr;
845 }
846
847 // Support for enable_shared_from_this.
848
849 // Friend of __enable_shared_from_this.
850 template<_Lock_policy _Lp, typename _Tp1, typename _Tp2>
851 void
852 __enable_shared_from_this_helper(const __shared_count<_Lp>&,
853 const __enable_shared_from_this<_Tp1,
854 _Lp>*, const _Tp2*) noexcept;
855
856 // Friend of enable_shared_from_this.
857 template<typename _Tp1, typename _Tp2>
858 void
859 __enable_shared_from_this_helper(const __shared_count<>&,
860 const enable_shared_from_this<_Tp1>*,
861 const _Tp2*) noexcept;
862
863 template<_Lock_policy _Lp>
864 inline void
865 __enable_shared_from_this_helper(const __shared_count<_Lp>&, ...) noexcept
866 { }
867
868
869 template<typename _Tp, _Lock_policy _Lp>
870 class __shared_ptr
871 {
872 template<typename _Ptr>
873 using _Convertible
874 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
875
876 public:
877 typedef _Tp element_type;
878
879 constexpr __shared_ptr() noexcept
880 : _M_ptr(0), _M_refcount()
881 { }
882
883 template<typename _Tp1>
884 explicit __shared_ptr(_Tp1* __p)
885 : _M_ptr(__p), _M_refcount(__p)
886 {
887 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
888 static_assert( !is_void<_Tp1>::value, "incomplete type" );
889 static_assert( sizeof(_Tp1) > 0, "incomplete type" );
890 __enable_shared_from_this_helper(_M_refcount, __p, __p);
891 }
892
893 template<typename _Tp1, typename _Deleter>
894 __shared_ptr(_Tp1* __p, _Deleter __d)
895 : _M_ptr(__p), _M_refcount(__p, __d)
896 {
897 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
898 // TODO requires _Deleter CopyConstructible and __d(__p) well-formed
899 __enable_shared_from_this_helper(_M_refcount, __p, __p);
900 }
901
902 template<typename _Tp1, typename _Deleter, typename _Alloc>
903 __shared_ptr(_Tp1* __p, _Deleter __d, _Alloc __a)
904 : _M_ptr(__p), _M_refcount(__p, __d, std::move(__a))
905 {
906 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
907 // TODO requires _Deleter CopyConstructible and __d(__p) well-formed
908 __enable_shared_from_this_helper(_M_refcount, __p, __p);
909 }
910
911 template<typename _Deleter>
912 __shared_ptr(nullptr_t __p, _Deleter __d)
913 : _M_ptr(0), _M_refcount(__p, __d)
914 { }
915
916 template<typename _Deleter, typename _Alloc>
917 __shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
918 : _M_ptr(0), _M_refcount(__p, __d, std::move(__a))
919 { }
920
921 template<typename _Tp1>
922 __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, _Tp* __p) noexcept
923 : _M_ptr(__p), _M_refcount(__r._M_refcount) // never throws
924 { }
925
926 __shared_ptr(const __shared_ptr&) noexcept = default;
927 __shared_ptr& operator=(const __shared_ptr&) noexcept = default;
928 ~__shared_ptr() = default;
929
930 template<typename _Tp1, typename = _Convertible<_Tp1*>>
931 __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
932 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
933 { }
934
935 __shared_ptr(__shared_ptr&& __r) noexcept
936 : _M_ptr(__r._M_ptr), _M_refcount()
937 {
938 _M_refcount._M_swap(__r._M_refcount);
939 __r._M_ptr = 0;
940 }
941
942 template<typename _Tp1, typename = _Convertible<_Tp1*>>
943 __shared_ptr(__shared_ptr<_Tp1, _Lp>&& __r) noexcept
944 : _M_ptr(__r._M_ptr), _M_refcount()
945 {
946 _M_refcount._M_swap(__r._M_refcount);
947 __r._M_ptr = 0;
948 }
949
950 template<typename _Tp1>
951 explicit __shared_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
952 : _M_refcount(__r._M_refcount) // may throw
953 {
954 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
955
956 // It is now safe to copy __r._M_ptr, as
957 // _M_refcount(__r._M_refcount) did not throw.
958 _M_ptr = __r._M_ptr;
959 }
960
961 // If an exception is thrown this constructor has no effect.
962 template<typename _Tp1, typename _Del, typename
963 = _Convertible<typename unique_ptr<_Tp1, _Del>::pointer>>
964 __shared_ptr(std::unique_ptr<_Tp1, _Del>&& __r)
965 : _M_ptr(__r.get()), _M_refcount()
966 {
967 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
968 auto __raw = _S_raw_ptr(__r.get());
969 _M_refcount = __shared_count<_Lp>(std::move(__r));
970 __enable_shared_from_this_helper(_M_refcount, __raw, __raw);
971 }
972
973#if _GLIBCXX_USE_DEPRECATED1
974 // Postcondition: use_count() == 1 and __r.get() == 0
975 template<typename _Tp1>
976 __shared_ptr(std::auto_ptr<_Tp1>&& __r);
977#endif
978
979 constexpr __shared_ptr(nullptr_t) noexcept : __shared_ptr() { }
980
981 template<typename _Tp1>
982 __shared_ptr&
983 operator=(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
984 {
985 _M_ptr = __r._M_ptr;
986 _M_refcount = __r._M_refcount; // __shared_count::op= doesn't throw
987 return *this;
988 }
989
990#if _GLIBCXX_USE_DEPRECATED1
991 template<typename _Tp1>
992 __shared_ptr&
993 operator=(std::auto_ptr<_Tp1>&& __r)
994 {
995 __shared_ptr(std::move(__r)).swap(*this);
996 return *this;
997 }
998#endif
999
1000 __shared_ptr&
1001 operator=(__shared_ptr&& __r) noexcept
1002 {
1003 __shared_ptr(std::move(__r)).swap(*this);
1004 return *this;
1005 }
1006
1007 template<class _Tp1>
1008 __shared_ptr&
1009 operator=(__shared_ptr<_Tp1, _Lp>&& __r) noexcept
1010 {
1011 __shared_ptr(std::move(__r)).swap(*this);
1012 return *this;
1013 }
1014
1015 template<typename _Tp1, typename _Del>
1016 __shared_ptr&
1017 operator=(std::unique_ptr<_Tp1, _Del>&& __r)
1018 {
1019 __shared_ptr(std::move(__r)).swap(*this);
1020 return *this;
1021 }
1022
1023 void
1024 reset() noexcept
1025 { __shared_ptr().swap(*this); }
1026
1027 template<typename _Tp1>
1028 void
1029 reset(_Tp1* __p) // _Tp1 must be complete.
1030 {
1031 // Catch self-reset errors.
1032 __glibcxx_assert(__p == 0 || __p != _M_ptr);
1033 __shared_ptr(__p).swap(*this);
1034 }
1035
1036 template<typename _Tp1, typename _Deleter>
1037 void
1038 reset(_Tp1* __p, _Deleter __d)
1039 { __shared_ptr(__p, __d).swap(*this); }
1040
1041 template<typename _Tp1, typename _Deleter, typename _Alloc>
1042 void
1043 reset(_Tp1* __p, _Deleter __d, _Alloc __a)
1044 { __shared_ptr(__p, __d, std::move(__a)).swap(*this); }
1045
1046 // Allow class instantiation when _Tp is [cv-qual] void.
1047 typename std::add_lvalue_reference<_Tp>::type
1048 operator*() const noexcept
1049 {
1050 __glibcxx_assert(_M_ptr != 0);
1051 return *_M_ptr;
1052 }
1053
1054 _Tp*
1055 operator->() const noexcept
1056 {
1057 _GLIBCXX_DEBUG_PEDASSERT(_M_ptr != 0);
1058 return _M_ptr;
1059 }
1060
1061 _Tp*
1062 get() const noexcept
1063 { return _M_ptr; }
1064
1065 explicit operator bool() const // never throws
1066 { return _M_ptr == 0 ? false : true; }
1067
1068 bool
1069 unique() const noexcept
1070 { return _M_refcount._M_unique(); }
1071
1072 long
1073 use_count() const noexcept
1074 { return _M_refcount._M_get_use_count(); }
1075
1076 void
1077 swap(__shared_ptr<_Tp, _Lp>& __other) noexcept
1078 {
1079 std::swap(_M_ptr, __other._M_ptr);
1080 _M_refcount._M_swap(__other._M_refcount);
1081 }
1082
1083 template<typename _Tp1>
1084 bool
1085 owner_before(__shared_ptr<_Tp1, _Lp> const& __rhs) const
1086 { return _M_refcount._M_less(__rhs._M_refcount); }
1087
1088 template<typename _Tp1>
1089 bool
1090 owner_before(__weak_ptr<_Tp1, _Lp> const& __rhs) const
1091 { return _M_refcount._M_less(__rhs._M_refcount); }
1092
1093#if __cpp_rtti199711L
1094 protected:
1095 // This constructor is non-standard, it is used by allocate_shared.
1096 template<typename _Alloc, typename... _Args>
1097 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1098 _Args&&... __args)
1099 : _M_ptr(), _M_refcount(__tag, (_Tp*)0, __a,
16
Calling constructor for '__shared_count<__gnu_cxx::_S_atomic>'
1100 std::forward<_Args>(__args)...)
1101 {
1102 // _M_ptr needs to point to the newly constructed object.
1103 // This relies on _Sp_counted_ptr_inplace::_M_get_deleter.
1104 void* __p = _M_refcount._M_get_deleter(typeid(__tag));
1105 _M_ptr = static_cast<_Tp*>(__p);
1106 __enable_shared_from_this_helper(_M_refcount, _M_ptr, _M_ptr);
1107 }
1108#else
1109 template<typename _Alloc>
1110 struct _Deleter
1111 {
1112 void operator()(typename _Alloc::value_type* __ptr)
1113 {
1114 __allocated_ptr<_Alloc> __guard{ _M_alloc, __ptr };
1115 allocator_traits<_Alloc>::destroy(_M_alloc, __guard.get());
1116 }
1117 _Alloc _M_alloc;
1118 };
1119
1120 template<typename _Alloc, typename... _Args>
1121 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1122 _Args&&... __args)
1123 : _M_ptr(), _M_refcount()
1124 {
1125 typedef typename allocator_traits<_Alloc>::template
1126 rebind_traits<typename std::remove_cv<_Tp>::type> __traits;
1127 _Deleter<typename __traits::allocator_type> __del = { __a };
1128 auto __guard = std::__allocate_guarded(__del._M_alloc);
1129 auto __ptr = __guard.get();
1130 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1131 // 2070. allocate_shared should use allocator_traits<A>::construct
1132 __traits::construct(__del._M_alloc, __ptr,
1133 std::forward<_Args>(__args)...);
1134 __guard = nullptr;
1135 __shared_count<_Lp> __count(__ptr, __del, __del._M_alloc);
1136 _M_refcount._M_swap(__count);
1137 _M_ptr = __ptr;
1138 __enable_shared_from_this_helper(_M_refcount, _M_ptr, _M_ptr);
1139 }
1140#endif
1141
1142 template<typename _Tp1, _Lock_policy _Lp1, typename _Alloc,
1143 typename... _Args>
1144 friend __shared_ptr<_Tp1, _Lp1>
1145 __allocate_shared(const _Alloc& __a, _Args&&... __args);
1146
1147 // This constructor is used by __weak_ptr::lock() and
1148 // shared_ptr::shared_ptr(const weak_ptr&, std::nothrow_t).
1149 __shared_ptr(const __weak_ptr<_Tp, _Lp>& __r, std::nothrow_t)
1150 : _M_refcount(__r._M_refcount, std::nothrow)
1151 {
1152 _M_ptr = _M_refcount._M_get_use_count() ? __r._M_ptr : nullptr;
1153 }
1154
1155 friend class __weak_ptr<_Tp, _Lp>;
1156
1157 private:
1158 void*
1159 _M_get_deleter(const std::type_info& __ti) const noexcept
1160 { return _M_refcount._M_get_deleter(__ti); }
1161
1162 template<typename _Tp1>
1163 static _Tp1*
1164 _S_raw_ptr(_Tp1* __ptr)
1165 { return __ptr; }
1166
1167 template<typename _Tp1>
1168 static auto
1169 _S_raw_ptr(_Tp1 __ptr) -> decltype(std::__addressof(*__ptr))
1170 { return std::__addressof(*__ptr); }
1171
1172 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1173 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1174
1175 template<typename _Del, typename _Tp1, _Lock_policy _Lp1>
1176 friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&) noexcept;
1177
1178 _Tp* _M_ptr; // Contained pointer.
1179 __shared_count<_Lp> _M_refcount; // Reference counter.
1180 };
1181
1182
1183 // 20.7.2.2.7 shared_ptr comparisons
1184 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1185 inline bool
1186 operator==(const __shared_ptr<_Tp1, _Lp>& __a,
1187 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1188 { return __a.get() == __b.get(); }
1189
1190 template<typename _Tp, _Lock_policy _Lp>
1191 inline bool
1192 operator==(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1193 { return !__a; }
1194
1195 template<typename _Tp, _Lock_policy _Lp>
1196 inline bool
1197 operator==(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1198 { return !__a; }
1199
1200 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1201 inline bool
1202 operator!=(const __shared_ptr<_Tp1, _Lp>& __a,
1203 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1204 { return __a.get() != __b.get(); }
1205
1206 template<typename _Tp, _Lock_policy _Lp>
1207 inline bool
1208 operator!=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1209 { return (bool)__a; }
1210
1211 template<typename _Tp, _Lock_policy _Lp>
1212 inline bool
1213 operator!=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1214 { return (bool)__a; }
1215
1216 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1217 inline bool
1218 operator<(const __shared_ptr<_Tp1, _Lp>& __a,
1219 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1220 {
1221 typedef typename std::common_type<_Tp1*, _Tp2*>::type _CT;
1222 return std::less<_CT>()(__a.get(), __b.get());
1223 }
1224
1225 template<typename _Tp, _Lock_policy _Lp>
1226 inline bool
1227 operator<(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1228 { return std::less<_Tp*>()(__a.get(), nullptr); }
1229
1230 template<typename _Tp, _Lock_policy _Lp>
1231 inline bool
1232 operator<(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1233 { return std::less<_Tp*>()(nullptr, __a.get()); }
1234
1235 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1236 inline bool
1237 operator<=(const __shared_ptr<_Tp1, _Lp>& __a,
1238 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1239 { return !(__b < __a); }
1240
1241 template<typename _Tp, _Lock_policy _Lp>
1242 inline bool
1243 operator<=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1244 { return !(nullptr < __a); }
1245
1246 template<typename _Tp, _Lock_policy _Lp>
1247 inline bool
1248 operator<=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1249 { return !(__a < nullptr); }
1250
1251 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1252 inline bool
1253 operator>(const __shared_ptr<_Tp1, _Lp>& __a,
1254 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1255 { return (__b < __a); }
1256
1257 template<typename _Tp, _Lock_policy _Lp>
1258 inline bool
1259 operator>(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1260 { return std::less<_Tp*>()(nullptr, __a.get()); }
1261
1262 template<typename _Tp, _Lock_policy _Lp>
1263 inline bool
1264 operator>(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1265 { return std::less<_Tp*>()(__a.get(), nullptr); }
1266
1267 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1268 inline bool
1269 operator>=(const __shared_ptr<_Tp1, _Lp>& __a,
1270 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1271 { return !(__a < __b); }
1272
1273 template<typename _Tp, _Lock_policy _Lp>
1274 inline bool
1275 operator>=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1276 { return !(__a < nullptr); }
1277
1278 template<typename _Tp, _Lock_policy _Lp>
1279 inline bool
1280 operator>=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1281 { return !(nullptr < __a); }
1282
1283 template<typename _Sp>
1284 struct _Sp_less : public binary_function<_Sp, _Sp, bool>
1285 {
1286 bool
1287 operator()(const _Sp& __lhs, const _Sp& __rhs) const noexcept
1288 {
1289 typedef typename _Sp::element_type element_type;
1290 return std::less<element_type*>()(__lhs.get(), __rhs.get());
1291 }
1292 };
1293
1294 template<typename _Tp, _Lock_policy _Lp>
1295 struct less<__shared_ptr<_Tp, _Lp>>
1296 : public _Sp_less<__shared_ptr<_Tp, _Lp>>
1297 { };
1298
1299 // 20.7.2.2.8 shared_ptr specialized algorithms.
1300 template<typename _Tp, _Lock_policy _Lp>
1301 inline void
1302 swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b) noexcept
1303 { __a.swap(__b); }
1304
1305 // 20.7.2.2.9 shared_ptr casts
1306
1307 // The seemingly equivalent code:
1308 // shared_ptr<_Tp, _Lp>(static_cast<_Tp*>(__r.get()))
1309 // will eventually result in undefined behaviour, attempting to
1310 // delete the same object twice.
1311 /// static_pointer_cast
1312 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1313 inline __shared_ptr<_Tp, _Lp>
1314 static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1315 { return __shared_ptr<_Tp, _Lp>(__r, static_cast<_Tp*>(__r.get())); }
1316
1317 // The seemingly equivalent code:
1318 // shared_ptr<_Tp, _Lp>(const_cast<_Tp*>(__r.get()))
1319 // will eventually result in undefined behaviour, attempting to
1320 // delete the same object twice.
1321 /// const_pointer_cast
1322 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1323 inline __shared_ptr<_Tp, _Lp>
1324 const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1325 { return __shared_ptr<_Tp, _Lp>(__r, const_cast<_Tp*>(__r.get())); }
1326
1327 // The seemingly equivalent code:
1328 // shared_ptr<_Tp, _Lp>(dynamic_cast<_Tp*>(__r.get()))
1329 // will eventually result in undefined behaviour, attempting to
1330 // delete the same object twice.
1331 /// dynamic_pointer_cast
1332 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1333 inline __shared_ptr<_Tp, _Lp>
1334 dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1335 {
1336 if (_Tp* __p = dynamic_cast<_Tp*>(__r.get()))
1337 return __shared_ptr<_Tp, _Lp>(__r, __p);
1338 return __shared_ptr<_Tp, _Lp>();
1339 }
1340
1341
1342 template<typename _Tp, _Lock_policy _Lp>
1343 class __weak_ptr
1344 {
1345 template<typename _Ptr>
1346 using _Convertible
1347 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
1348
1349 public:
1350 typedef _Tp element_type;
1351
1352 constexpr __weak_ptr() noexcept
1353 : _M_ptr(nullptr), _M_refcount()
1354 { }
1355
1356 __weak_ptr(const __weak_ptr&) noexcept = default;
1357
1358 ~__weak_ptr() = default;
1359
1360 // The "obvious" converting constructor implementation:
1361 //
1362 // template<typename _Tp1>
1363 // __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
1364 // : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
1365 // { }
1366 //
1367 // has a serious problem.
1368 //
1369 // __r._M_ptr may already have been invalidated. The _M_ptr(__r._M_ptr)
1370 // conversion may require access to *__r._M_ptr (virtual inheritance).
1371 //
1372 // It is not possible to avoid spurious access violations since
1373 // in multithreaded programs __r._M_ptr may be invalidated at any point.
1374 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1375 __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r) noexcept
1376 : _M_refcount(__r._M_refcount)
1377 { _M_ptr = __r.lock().get(); }
1378
1379 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1380 __weak_ptr(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1381 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1382 { }
1383
1384 __weak_ptr(__weak_ptr&& __r) noexcept
1385 : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
1386 { __r._M_ptr = nullptr; }
1387
1388 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1389 __weak_ptr(__weak_ptr<_Tp1, _Lp>&& __r) noexcept
1390 : _M_ptr(__r.lock().get()), _M_refcount(std::move(__r._M_refcount))
1391 { __r._M_ptr = nullptr; }
1392
1393 __weak_ptr&
1394 operator=(const __weak_ptr& __r) noexcept = default;
1395
1396 template<typename _Tp1>
1397 __weak_ptr&
1398 operator=(const __weak_ptr<_Tp1, _Lp>& __r) noexcept
1399 {
1400 _M_ptr = __r.lock().get();
1401 _M_refcount = __r._M_refcount;
1402 return *this;
1403 }
1404
1405 template<typename _Tp1>
1406 __weak_ptr&
1407 operator=(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1408 {
1409 _M_ptr = __r._M_ptr;
1410 _M_refcount = __r._M_refcount;
1411 return *this;
1412 }
1413
1414 __weak_ptr&
1415 operator=(__weak_ptr&& __r) noexcept
1416 {
1417 _M_ptr = __r._M_ptr;
1418 _M_refcount = std::move(__r._M_refcount);
1419 __r._M_ptr = nullptr;
1420 return *this;
1421 }
1422
1423 template<typename _Tp1>
1424 __weak_ptr&
1425 operator=(__weak_ptr<_Tp1, _Lp>&& __r) noexcept
1426 {
1427 _M_ptr = __r.lock().get();
1428 _M_refcount = std::move(__r._M_refcount);
1429 __r._M_ptr = nullptr;
1430 return *this;
1431 }
1432
1433 __shared_ptr<_Tp, _Lp>
1434 lock() const noexcept
1435 { return __shared_ptr<element_type, _Lp>(*this, std::nothrow); }
1436
1437 long
1438 use_count() const noexcept
1439 { return _M_refcount._M_get_use_count(); }
1440
1441 bool
1442 expired() const noexcept
1443 { return _M_refcount._M_get_use_count() == 0; }
1444
1445 template<typename _Tp1>
1446 bool
1447 owner_before(const __shared_ptr<_Tp1, _Lp>& __rhs) const
1448 { return _M_refcount._M_less(__rhs._M_refcount); }
1449
1450 template<typename _Tp1>
1451 bool
1452 owner_before(const __weak_ptr<_Tp1, _Lp>& __rhs) const
1453 { return _M_refcount._M_less(__rhs._M_refcount); }
1454
1455 void
1456 reset() noexcept
1457 { __weak_ptr().swap(*this); }
1458
1459 void
1460 swap(__weak_ptr& __s) noexcept
1461 {
1462 std::swap(_M_ptr, __s._M_ptr);
1463 _M_refcount._M_swap(__s._M_refcount);
1464 }
1465
1466 private:
1467 // Used by __enable_shared_from_this.
1468 void
1469 _M_assign(_Tp* __ptr, const __shared_count<_Lp>& __refcount) noexcept
1470 {
1471 if (use_count() == 0)
1472 {
1473 _M_ptr = __ptr;
1474 _M_refcount = __refcount;
1475 }
1476 }
1477
1478 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1479 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1480 friend class __enable_shared_from_this<_Tp, _Lp>;
1481 friend class enable_shared_from_this<_Tp>;
1482
1483 _Tp* _M_ptr; // Contained pointer.
1484 __weak_count<_Lp> _M_refcount; // Reference counter.
1485 };
1486
1487 // 20.7.2.3.6 weak_ptr specialized algorithms.
1488 template<typename _Tp, _Lock_policy _Lp>
1489 inline void
1490 swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b) noexcept
1491 { __a.swap(__b); }
1492
1493 template<typename _Tp, typename _Tp1>
1494 struct _Sp_owner_less : public binary_function<_Tp, _Tp, bool>
1495 {
1496 bool
1497 operator()(const _Tp& __lhs, const _Tp& __rhs) const
1498 { return __lhs.owner_before(__rhs); }
1499
1500 bool
1501 operator()(const _Tp& __lhs, const _Tp1& __rhs) const
1502 { return __lhs.owner_before(__rhs); }
1503
1504 bool
1505 operator()(const _Tp1& __lhs, const _Tp& __rhs) const
1506 { return __lhs.owner_before(__rhs); }
1507 };
1508
1509 template<typename _Tp, _Lock_policy _Lp>
1510 struct owner_less<__shared_ptr<_Tp, _Lp>>
1511 : public _Sp_owner_less<__shared_ptr<_Tp, _Lp>, __weak_ptr<_Tp, _Lp>>
1512 { };
1513
1514 template<typename _Tp, _Lock_policy _Lp>
1515 struct owner_less<__weak_ptr<_Tp, _Lp>>
1516 : public _Sp_owner_less<__weak_ptr<_Tp, _Lp>, __shared_ptr<_Tp, _Lp>>
1517 { };
1518
1519
1520 template<typename _Tp, _Lock_policy _Lp>
1521 class __enable_shared_from_this
1522 {
1523 protected:
1524 constexpr __enable_shared_from_this() noexcept { }
1525
1526 __enable_shared_from_this(const __enable_shared_from_this&) noexcept { }
1527
1528 __enable_shared_from_this&
1529 operator=(const __enable_shared_from_this&) noexcept
1530 { return *this; }
1531
1532 ~__enable_shared_from_this() { }
1533
1534 public:
1535 __shared_ptr<_Tp, _Lp>
1536 shared_from_this()
1537 { return __shared_ptr<_Tp, _Lp>(this->_M_weak_this); }
1538
1539 __shared_ptr<const _Tp, _Lp>
1540 shared_from_this() const
1541 { return __shared_ptr<const _Tp, _Lp>(this->_M_weak_this); }
1542
1543 private:
1544 template<typename _Tp1>
1545 void
1546 _M_weak_assign(_Tp1* __p, const __shared_count<_Lp>& __n) const noexcept
1547 { _M_weak_this._M_assign(__p, __n); }
1548
1549 template<_Lock_policy _Lp1, typename _Tp1, typename _Tp2>
1550 friend void
1551 __enable_shared_from_this_helper(const __shared_count<_Lp1>&,
1552 const __enable_shared_from_this<_Tp1,
1553 _Lp1>*, const _Tp2*) noexcept;
1554
1555 mutable __weak_ptr<_Tp, _Lp> _M_weak_this;
1556 };
1557
1558 template<_Lock_policy _Lp1, typename _Tp1, typename _Tp2>
1559 inline void
1560 __enable_shared_from_this_helper(const __shared_count<_Lp1>& __pn,
1561 const __enable_shared_from_this<_Tp1,
1562 _Lp1>* __pe,
1563 const _Tp2* __px) noexcept
1564 {
1565 if (__pe != nullptr)
1566 __pe->_M_weak_assign(const_cast<_Tp2*>(__px), __pn);
1567 }
1568
1569 template<typename _Tp, _Lock_policy _Lp, typename _Alloc, typename... _Args>
1570 inline __shared_ptr<_Tp, _Lp>
1571 __allocate_shared(const _Alloc& __a, _Args&&... __args)
1572 {
1573 return __shared_ptr<_Tp, _Lp>(_Sp_make_shared_tag(), __a,
1574 std::forward<_Args>(__args)...);
1575 }
1576
1577 template<typename _Tp, _Lock_policy _Lp, typename... _Args>
1578 inline __shared_ptr<_Tp, _Lp>
1579 __make_shared(_Args&&... __args)
1580 {
1581 typedef typename std::remove_const<_Tp>::type _Tp_nc;
1582 return std::__allocate_shared<_Tp, _Lp>(std::allocator<_Tp_nc>(),
1583 std::forward<_Args>(__args)...);
1584 }
1585
1586 /// std::hash specialization for __shared_ptr.
1587 template<typename _Tp, _Lock_policy _Lp>
1588 struct hash<__shared_ptr<_Tp, _Lp>>
1589 : public __hash_base<size_t, __shared_ptr<_Tp, _Lp>>
1590 {
1591 size_t
1592 operator()(const __shared_ptr<_Tp, _Lp>& __s) const noexcept
1593 { return std::hash<_Tp*>()(__s.get()); }
1594 };
1595
1596_GLIBCXX_END_NAMESPACE_VERSION
1597} // namespace
1598
1599#endif // _SHARED_PTR_BASE_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/alloc_traits.h

1// Allocator traits -*- C++ -*-
2
3// Copyright (C) 2011-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/alloc_traits.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _ALLOC_TRAITS_H1
31#define _ALLOC_TRAITS_H1 1
32
33#if __cplusplus201103L >= 201103L
34
35#include <bits/memoryfwd.h>
36#include <bits/ptr_traits.h>
37#include <ext/numeric_traits.h>
38
39#define __cpp_lib_allocator_traits_is_always_equal201411 201411
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 struct __allocator_traits_base
46 {
47 template<typename _Tp, typename _Up, typename = void>
48 struct __rebind : __replace_first_arg<_Tp, _Up> { };
49
50 template<typename _Tp, typename _Up>
51 struct __rebind<_Tp, _Up,
52 __void_t<typename _Tp::template rebind<_Up>::other>>
53 { using type = typename _Tp::template rebind<_Up>::other; };
54
55 protected:
56 template<typename _Tp>
57 using __pointer = typename _Tp::pointer;
58 template<typename _Tp>
59 using __c_pointer = typename _Tp::const_pointer;
60 template<typename _Tp>
61 using __v_pointer = typename _Tp::void_pointer;
62 template<typename _Tp>
63 using __cv_pointer = typename _Tp::const_void_pointer;
64 template<typename _Tp>
65 using __pocca = typename _Tp::propagate_on_container_copy_assignment;
66 template<typename _Tp>
67 using __pocma = typename _Tp::propagate_on_container_move_assignment;
68 template<typename _Tp>
69 using __pocs = typename _Tp::propagate_on_container_swap;
70 template<typename _Tp>
71 using __equal = typename _Tp::is_always_equal;
72 };
73
74 template<typename _Alloc, typename _Up>
75 using __alloc_rebind
76 = typename __allocator_traits_base::template __rebind<_Alloc, _Up>::type;
77
78 /**
79 * @brief Uniform interface to all allocator types.
80 * @ingroup allocators
81 */
82 template<typename _Alloc>
83 struct allocator_traits : __allocator_traits_base
84 {
85 /// The allocator type
86 typedef _Alloc allocator_type;
87 /// The allocated type
88 typedef typename _Alloc::value_type value_type;
89
90 /**
91 * @brief The allocator's pointer type.
92 *
93 * @c Alloc::pointer if that type exists, otherwise @c value_type*
94 */
95 using pointer = __detected_or_t<value_type*, __pointer, _Alloc>;
96
97 private:
98 // Select _Func<_Alloc> or pointer_traits<pointer>::rebind<_Tp>
99 template<template<typename> class _Func, typename _Tp, typename = void>
100 struct _Ptr
101 {
102 using type = typename pointer_traits<pointer>::template rebind<_Tp>;
103 };
104
105 template<template<typename> class _Func, typename _Tp>
106 struct _Ptr<_Func, _Tp, __void_t<_Func<_Alloc>>>
107 {
108 using type = _Func<_Alloc>;
109 };
110
111 // Select _A2::difference_type or pointer_traits<_Ptr>::difference_type
112 template<typename _A2, typename _PtrT, typename = void>
113 struct _Diff
114 { using type = typename pointer_traits<_PtrT>::difference_type; };
115
116 template<typename _A2, typename _PtrT>
117 struct _Diff<_A2, _PtrT, __void_t<typename _A2::difference_type>>
118 { using type = typename _A2::difference_type; };
119
120 // Select _A2::size_type or make_unsigned<_DiffT>::type
121 template<typename _A2, typename _DiffT, typename = void>
122 struct _Size : make_unsigned<_DiffT> { };
123
124 template<typename _A2, typename _DiffT>
125 struct _Size<_A2, _DiffT, __void_t<typename _A2::size_type>>
126 { using type = typename _A2::size_type; };
127
128 public:
129 /**
130 * @brief The allocator's const pointer type.
131 *
132 * @c Alloc::const_pointer if that type exists, otherwise
133 * <tt> pointer_traits<pointer>::rebind<const value_type> </tt>
134 */
135 using const_pointer = typename _Ptr<__c_pointer, const value_type>::type;
136
137 /**
138 * @brief The allocator's void pointer type.
139 *
140 * @c Alloc::void_pointer if that type exists, otherwise
141 * <tt> pointer_traits<pointer>::rebind<void> </tt>
142 */
143 using void_pointer = typename _Ptr<__v_pointer, void>::type;
144
145 /**
146 * @brief The allocator's const void pointer type.
147 *
148 * @c Alloc::const_void_pointer if that type exists, otherwise
149 * <tt> pointer_traits<pointer>::rebind<const void> </tt>
150 */
151 using const_void_pointer = typename _Ptr<__cv_pointer, const void>::type;
152
153 /**
154 * @brief The allocator's difference type
155 *
156 * @c Alloc::difference_type if that type exists, otherwise
157 * <tt> pointer_traits<pointer>::difference_type </tt>
158 */
159 using difference_type = typename _Diff<_Alloc, pointer>::type;
160
161 /**
162 * @brief The allocator's size type
163 *
164 * @c Alloc::size_type if that type exists, otherwise
165 * <tt> make_unsigned<difference_type>::type </tt>
166 */
167 using size_type = typename _Size<_Alloc, difference_type>::type;
168
169 /**
170 * @brief How the allocator is propagated on copy assignment
171 *
172 * @c Alloc::propagate_on_container_copy_assignment if that type exists,
173 * otherwise @c false_type
174 */
175 using propagate_on_container_copy_assignment
176 = __detected_or_t<false_type, __pocca, _Alloc>;
177
178 /**
179 * @brief How the allocator is propagated on move assignment
180 *
181 * @c Alloc::propagate_on_container_move_assignment if that type exists,
182 * otherwise @c false_type
183 */
184 using propagate_on_container_move_assignment
185 = __detected_or_t<false_type, __pocma, _Alloc>;
186
187 /**
188 * @brief How the allocator is propagated on swap
189 *
190 * @c Alloc::propagate_on_container_swap if that type exists,
191 * otherwise @c false_type
192 */
193 using propagate_on_container_swap
194 = __detected_or_t<false_type, __pocs, _Alloc>;
195
196 /**
197 * @brief Whether all instances of the allocator type compare equal.
198 *
199 * @c Alloc::is_always_equal if that type exists,
200 * otherwise @c is_empty<Alloc>::type
201 */
202 using is_always_equal
203 = __detected_or_t<typename is_empty<_Alloc>::type, __equal, _Alloc>;
204
205 template<typename _Tp>
206 using rebind_alloc = __alloc_rebind<_Alloc, _Tp>;
207 template<typename _Tp>
208 using rebind_traits = allocator_traits<rebind_alloc<_Tp>>;
209
210 private:
211 template<typename _Alloc2>
212 static auto
213 _S_allocate(_Alloc2& __a, size_type __n, const_void_pointer __hint, int)
214 -> decltype(__a.allocate(__n, __hint))
215 { return __a.allocate(__n, __hint); }
216
217 template<typename _Alloc2>
218 static pointer
219 _S_allocate(_Alloc2& __a, size_type __n, const_void_pointer, ...)
220 { return __a.allocate(__n); }
221
222 template<typename _Tp, typename... _Args>
223 struct __construct_helper
224 {
225 template<typename _Alloc2,
226 typename = decltype(std::declval<_Alloc2*>()->construct(
227 std::declval<_Tp*>(), std::declval<_Args>()...))>
228 static true_type __test(int);
229
230 template<typename>
231 static false_type __test(...);
232
233 using type = decltype(__test<_Alloc>(0));
234 };
235
236 template<typename _Tp, typename... _Args>
237 using __has_construct
238 = typename __construct_helper<_Tp, _Args...>::type;
239
240 template<typename _Tp, typename... _Args>
241 static _Require<__has_construct<_Tp, _Args...>>
242 _S_construct(_Alloc& __a, _Tp* __p, _Args&&... __args)
243 { __a.construct(__p, std::forward<_Args>(__args)...); }
244
245 template<typename _Tp, typename... _Args>
246 static
247 _Require<__and_<__not_<__has_construct<_Tp, _Args...>>,
248 is_constructible<_Tp, _Args...>>>
249 _S_construct(_Alloc&, _Tp* __p, _Args&&... __args)
250 { ::new((void*)__p) _Tp(std::forward<_Args>(__args)...); }
251
252 template<typename _Alloc2, typename _Tp>
253 static auto
254 _S_destroy(_Alloc2& __a, _Tp* __p, int)
255 -> decltype(__a.destroy(__p))
256 { __a.destroy(__p); }
257
258 template<typename _Alloc2, typename _Tp>
259 static void
260 _S_destroy(_Alloc2&, _Tp* __p, ...)
261 { __p->~_Tp(); }
262
263 template<typename _Alloc2>
264 static auto
265 _S_max_size(_Alloc2& __a, int)
266 -> decltype(__a.max_size())
267 { return __a.max_size(); }
268
269 template<typename _Alloc2>
270 static size_type
271 _S_max_size(_Alloc2&, ...)
272 {
273 // _GLIBCXX_RESOLVE_LIB_DEFECTS
274 // 2466. allocator_traits::max_size() default behavior is incorrect
275 return __gnu_cxx::__numeric_traits<size_type>::__max
276 / sizeof(value_type);
277 }
278
279 template<typename _Alloc2>
280 static auto
281 _S_select(_Alloc2& __a, int)
282 -> decltype(__a.select_on_container_copy_construction())
283 { return __a.select_on_container_copy_construction(); }
284
285 template<typename _Alloc2>
286 static _Alloc2
287 _S_select(_Alloc2& __a, ...)
288 { return __a; }
289
290 public:
291
292 /**
293 * @brief Allocate memory.
294 * @param __a An allocator.
295 * @param __n The number of objects to allocate space for.
296 *
297 * Calls @c a.allocate(n)
298 */
299 static pointer
300 allocate(_Alloc& __a, size_type __n)
301 { return __a.allocate(__n); }
302
303 /**
304 * @brief Allocate memory.
305 * @param __a An allocator.
306 * @param __n The number of objects to allocate space for.
307 * @param __hint Aid to locality.
308 * @return Memory of suitable size and alignment for @a n objects
309 * of type @c value_type
310 *
311 * Returns <tt> a.allocate(n, hint) </tt> if that expression is
312 * well-formed, otherwise returns @c a.allocate(n)
313 */
314 static pointer
315 allocate(_Alloc& __a, size_type __n, const_void_pointer __hint)
316 { return _S_allocate(__a, __n, __hint, 0); }
317
318 /**
319 * @brief Deallocate memory.
320 * @param __a An allocator.
321 * @param __p Pointer to the memory to deallocate.
322 * @param __n The number of objects space was allocated for.
323 *
324 * Calls <tt> a.deallocate(p, n) </tt>
325 */
326 static void
327 deallocate(_Alloc& __a, pointer __p, size_type __n)
328 { __a.deallocate(__p, __n); }
329
330 /**
331 * @brief Construct an object of type @a _Tp
332 * @param __a An allocator.
333 * @param __p Pointer to memory of suitable size and alignment for Tp
334 * @param __args Constructor arguments.
335 *
336 * Calls <tt> __a.construct(__p, std::forward<Args>(__args)...) </tt>
337 * if that expression is well-formed, otherwise uses placement-new
338 * to construct an object of type @a _Tp at location @a __p from the
339 * arguments @a __args...
340 */
341 template<typename _Tp, typename... _Args>
342 static auto construct(_Alloc& __a, _Tp* __p, _Args&&... __args)
343 -> decltype(_S_construct(__a, __p, std::forward<_Args>(__args)...))
344 { _S_construct(__a, __p, std::forward<_Args>(__args)...); }
345
346 /**
347 * @brief Destroy an object of type @a _Tp
348 * @param __a An allocator.
349 * @param __p Pointer to the object to destroy
350 *
351 * Calls @c __a.destroy(__p) if that expression is well-formed,
352 * otherwise calls @c __p->~_Tp()
353 */
354 template<typename _Tp>
355 static void destroy(_Alloc& __a, _Tp* __p)
356 { _S_destroy(__a, __p, 0); }
357
358 /**
359 * @brief The maximum supported allocation size
360 * @param __a An allocator.
361 * @return @c __a.max_size() or @c numeric_limits<size_type>::max()
362 *
363 * Returns @c __a.max_size() if that expression is well-formed,
364 * otherwise returns @c numeric_limits<size_type>::max()
365 */
366 static size_type max_size(const _Alloc& __a) noexcept
367 { return _S_max_size(__a, 0); }
368
369 /**
370 * @brief Obtain an allocator to use when copying a container.
371 * @param __rhs An allocator.
372 * @return @c __rhs.select_on_container_copy_construction() or @a __rhs
373 *
374 * Returns @c __rhs.select_on_container_copy_construction() if that
375 * expression is well-formed, otherwise returns @a __rhs
376 */
377 static _Alloc
378 select_on_container_copy_construction(const _Alloc& __rhs)
379 { return _S_select(__rhs, 0); }
380 };
381
382 /// Partial specialization for std::allocator.
383 template<typename _Tp>
384 struct allocator_traits<allocator<_Tp>>
385 {
386 /// The allocator type
387 using allocator_type = allocator<_Tp>;
388 /// The allocated type
389 using value_type = _Tp;
390
391 /// The allocator's pointer type.
392 using pointer = _Tp*;
393
394 /// The allocator's const pointer type.
395 using const_pointer = const _Tp*;
396
397 /// The allocator's void pointer type.
398 using void_pointer = void*;
399
400 /// The allocator's const void pointer type.
401 using const_void_pointer = const void*;
402
403 /// The allocator's difference type
404 using difference_type = std::ptrdiff_t;
405
406 /// The allocator's size type
407 using size_type = std::size_t;
408
409 /// How the allocator is propagated on copy assignment
410 using propagate_on_container_copy_assignment = false_type;
411
412 /// How the allocator is propagated on move assignment
413 using propagate_on_container_move_assignment = true_type;
414
415 /// How the allocator is propagated on swap
416 using propagate_on_container_swap = false_type;
417
418 /// Whether all instances of the allocator type compare equal.
419 using is_always_equal = true_type;
420
421 template<typename _Up>
422 using rebind_alloc = allocator<_Up>;
423
424 template<typename _Up>
425 using rebind_traits = allocator_traits<allocator<_Up>>;
426
427 /**
428 * @brief Allocate memory.
429 * @param __a An allocator.
430 * @param __n The number of objects to allocate space for.
431 *
432 * Calls @c a.allocate(n)
433 */
434 static pointer
435 allocate(allocator_type& __a, size_type __n)
436 { return __a.allocate(__n); }
437
438 /**
439 * @brief Allocate memory.
440 * @param __a An allocator.
441 * @param __n The number of objects to allocate space for.
442 * @param __hint Aid to locality.
443 * @return Memory of suitable size and alignment for @a n objects
444 * of type @c value_type
445 *
446 * Returns <tt> a.allocate(n, hint) </tt>
447 */
448 static pointer
449 allocate(allocator_type& __a, size_type __n, const_void_pointer __hint)
450 { return __a.allocate(__n, __hint); }
451
452 /**
453 * @brief Deallocate memory.
454 * @param __a An allocator.
455 * @param __p Pointer to the memory to deallocate.
456 * @param __n The number of objects space was allocated for.
457 *
458 * Calls <tt> a.deallocate(p, n) </tt>
459 */
460 static void
461 deallocate(allocator_type& __a, pointer __p, size_type __n)
462 { __a.deallocate(__p, __n); }
463
464 /**
465 * @brief Construct an object of type @a _Up
466 * @param __a An allocator.
467 * @param __p Pointer to memory of suitable size and alignment for Tp
468 * @param __args Constructor arguments.
469 *
470 * Calls <tt> __a.construct(__p, std::forward<Args>(__args)...) </tt>
471 */
472 template<typename _Up, typename... _Args>
473 static void
474 construct(allocator_type& __a, _Up* __p, _Args&&... __args)
475 { __a.construct(__p, std::forward<_Args>(__args)...); }
19
Calling 'new_allocator::construct'
476
477 /**
478 * @brief Destroy an object of type @a _Up
479 * @param __a An allocator.
480 * @param __p Pointer to the object to destroy
481 *
482 * Calls @c __a.destroy(__p).
483 */
484 template<typename _Up>
485 static void
486 destroy(allocator_type& __a, _Up* __p)
487 { __a.destroy(__p); }
488
489 /**
490 * @brief The maximum supported allocation size
491 * @param __a An allocator.
492 * @return @c __a.max_size()
493 */
494 static size_type
495 max_size(const allocator_type& __a) noexcept
496 { return __a.max_size(); }
497
498 /**
499 * @brief Obtain an allocator to use when copying a container.
500 * @param __rhs An allocator.
501 * @return @c __rhs
502 */
503 static allocator_type
504 select_on_container_copy_construction(const allocator_type& __rhs)
505 { return __rhs; }
506 };
507
508
509 template<typename _Alloc>
510 inline void
511 __do_alloc_on_copy(_Alloc& __one, const _Alloc& __two, true_type)
512 { __one = __two; }
513
514 template<typename _Alloc>
515 inline void
516 __do_alloc_on_copy(_Alloc&, const _Alloc&, false_type)
517 { }
518
519 template<typename _Alloc>
520 inline void __alloc_on_copy(_Alloc& __one, const _Alloc& __two)
521 {
522 typedef allocator_traits<_Alloc> __traits;
523 typedef typename __traits::propagate_on_container_copy_assignment __pocca;
524 __do_alloc_on_copy(__one, __two, __pocca());
525 }
526
527 template<typename _Alloc>
528 inline _Alloc __alloc_on_copy(const _Alloc& __a)
529 {
530 typedef allocator_traits<_Alloc> __traits;
531 return __traits::select_on_container_copy_construction(__a);
532 }
533
534 template<typename _Alloc>
535 inline void __do_alloc_on_move(_Alloc& __one, _Alloc& __two, true_type)
536 { __one = std::move(__two); }
537
538 template<typename _Alloc>
539 inline void __do_alloc_on_move(_Alloc&, _Alloc&, false_type)
540 { }
541
542 template<typename _Alloc>
543 inline void __alloc_on_move(_Alloc& __one, _Alloc& __two)
544 {
545 typedef allocator_traits<_Alloc> __traits;
546 typedef typename __traits::propagate_on_container_move_assignment __pocma;
547 __do_alloc_on_move(__one, __two, __pocma());
548 }
549
550 template<typename _Alloc>
551 inline void __do_alloc_on_swap(_Alloc& __one, _Alloc& __two, true_type)
552 {
553 using std::swap;
554 swap(__one, __two);
555 }
556
557 template<typename _Alloc>
558 inline void __do_alloc_on_swap(_Alloc&, _Alloc&, false_type)
559 { }
560
561 template<typename _Alloc>
562 inline void __alloc_on_swap(_Alloc& __one, _Alloc& __two)
563 {
564 typedef allocator_traits<_Alloc> __traits;
565 typedef typename __traits::propagate_on_container_swap __pocs;
566 __do_alloc_on_swap(__one, __two, __pocs());
567 }
568
569 template<typename _Alloc>
570 class __is_copy_insertable_impl
571 {
572 typedef allocator_traits<_Alloc> _Traits;
573
574 template<typename _Up, typename
575 = decltype(_Traits::construct(std::declval<_Alloc&>(),
576 std::declval<_Up*>(),
577 std::declval<const _Up&>()))>
578 static true_type
579 _M_select(int);
580
581 template<typename _Up>
582 static false_type
583 _M_select(...);
584
585 public:
586 typedef decltype(_M_select<typename _Alloc::value_type>(0)) type;
587 };
588
589 // true if _Alloc::value_type is CopyInsertable into containers using _Alloc
590 template<typename _Alloc>
591 struct __is_copy_insertable
592 : __is_copy_insertable_impl<_Alloc>::type
593 { };
594
595 // std::allocator<_Tp> just requires CopyConstructible
596 template<typename _Tp>
597 struct __is_copy_insertable<allocator<_Tp>>
598 : is_copy_constructible<_Tp>
599 { };
600
601_GLIBCXX_END_NAMESPACE_VERSION
602} // namespace std
603
604#endif
605#endif

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/ext/new_allocator.h

1// Allocator that wraps operator new -*- C++ -*-
2
3// Copyright (C) 2001-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/new_allocator.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _NEW_ALLOCATOR_H1
30#define _NEW_ALLOCATOR_H1 1
31
32#include <bits/c++config.h>
33#include <new>
34#include <bits/functexcept.h>
35#include <bits/move.h>
36#if __cplusplus201103L >= 201103L
37#include <type_traits>
38#endif
39
40namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
41{
42_GLIBCXX_BEGIN_NAMESPACE_VERSION
43
44 using std::size_t;
45 using std::ptrdiff_t;
46
47 /**
48 * @brief An allocator that uses global new, as per [20.4].
49 * @ingroup allocators
50 *
51 * This is precisely the allocator defined in the C++ Standard.
52 * - all allocation calls operator new
53 * - all deallocation calls operator delete
54 *
55 * @tparam _Tp Type of allocated object.
56 */
57 template<typename _Tp>
58 class new_allocator
59 {
60 public:
61 typedef size_t size_type;
62 typedef ptrdiff_t difference_type;
63 typedef _Tp* pointer;
64 typedef const _Tp* const_pointer;
65 typedef _Tp& reference;
66 typedef const _Tp& const_reference;
67 typedef _Tp value_type;
68
69 template<typename _Tp1>
70 struct rebind
71 { typedef new_allocator<_Tp1> other; };
72
73#if __cplusplus201103L >= 201103L
74 // _GLIBCXX_RESOLVE_LIB_DEFECTS
75 // 2103. propagate_on_container_move_assignment
76 typedef std::true_type propagate_on_container_move_assignment;
77#endif
78
79 new_allocator() _GLIBCXX_USE_NOEXCEPTnoexcept { }
80
81 new_allocator(const new_allocator&) _GLIBCXX_USE_NOEXCEPTnoexcept { }
82
83 template<typename _Tp1>
84 new_allocator(const new_allocator<_Tp1>&) _GLIBCXX_USE_NOEXCEPTnoexcept { }
85
86 ~new_allocator() _GLIBCXX_USE_NOEXCEPTnoexcept { }
87
88 pointer
89 address(reference __x) const _GLIBCXX_NOEXCEPTnoexcept
90 { return std::__addressof(__x); }
91
92 const_pointer
93 address(const_reference __x) const _GLIBCXX_NOEXCEPTnoexcept
94 { return std::__addressof(__x); }
95
96 // NB: __n is permitted to be 0. The C++ standard says nothing
97 // about what the return value is when __n == 0.
98 pointer
99 allocate(size_type __n, const void* = 0)
100 {
101 if (__n > this->max_size())
102 std::__throw_bad_alloc();
103
104 return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp)));
105 }
106
107 // __p is not permitted to be a null pointer.
108 void
109 deallocate(pointer __p, size_type)
110 { ::operator delete(__p); }
111
112 size_type
113 max_size() const _GLIBCXX_USE_NOEXCEPTnoexcept
114 { return size_t(-1) / sizeof(_Tp); }
115
116#if __cplusplus201103L >= 201103L
117 template<typename _Up, typename... _Args>
118 void
119 construct(_Up* __p, _Args&&... __args)
120 { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
20
Calling constructor for 'PoolEntry'
121
122 template<typename _Up>
123 void
124 destroy(_Up* __p) { __p->~_Up(); }
125#else
126 // _GLIBCXX_RESOLVE_LIB_DEFECTS
127 // 402. wrong new expression in [some_] allocator::construct
128 void
129 construct(pointer __p, const _Tp& __val)
130 { ::new((void *)__p) _Tp(__val); }
131
132 void
133 destroy(pointer __p) { __p->~_Tp(); }
134#endif
135 };
136
137 template<typename _Tp>
138 inline bool
139 operator==(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
140 { return true; }
141
142 template<typename _Tp>
143 inline bool
144 operator!=(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
145 { return false; }
146
147_GLIBCXX_END_NAMESPACE_VERSION
148} // namespace
149
150#endif

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h

1//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_CODEGEN_PBQP_MATH_H
11#define LLVM_CODEGEN_PBQP_MATH_H
12
13#include "llvm/ADT/Hashing.h"
14#include "llvm/ADT/STLExtras.h"
15#include <algorithm>
16#include <cassert>
17#include <functional>
18#include <memory>
19
20namespace llvm {
21namespace PBQP {
22
23using PBQPNum = float;
24
25/// PBQP Vector class.
26class Vector {
27 friend hash_code hash_value(const Vector &);
28
29public:
30 /// Construct a PBQP vector of the given size.
31 explicit Vector(unsigned Length)
32 : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
33
34 /// Construct a PBQP vector with initializer.
35 Vector(unsigned Length, PBQPNum InitVal)
36 : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
37 std::fill(Data.get(), Data.get() + Length, InitVal);
38 }
39
40 /// Copy construct a PBQP vector.
41 Vector(const Vector &V)
42 : Length(V.Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
43 std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
44 }
45
46 /// Move construct a PBQP vector.
47 Vector(Vector &&V)
48 : Length(V.Length), Data(std::move(V.Data)) {
49 V.Length = 0;
50 }
51
52 /// Comparison operator.
53 bool operator==(const Vector &V) const {
54 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 54, __PRETTY_FUNCTION__))
;
55 if (Length != V.Length)
56 return false;
57 return std::equal(Data.get(), Data.get() + Length, V.Data.get());
58 }
59
60 /// Return the length of the vector
61 unsigned getLength() const {
62 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 62, __PRETTY_FUNCTION__))
;
63 return Length;
64 }
65
66 /// Element access.
67 PBQPNum& operator[](unsigned Index) {
68 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 68, __PRETTY_FUNCTION__))
;
69 assert(Index < Length && "Vector element access out of bounds.")((Index < Length && "Vector element access out of bounds."
) ? static_cast<void> (0) : __assert_fail ("Index < Length && \"Vector element access out of bounds.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 69, __PRETTY_FUNCTION__))
;
70 return Data[Index];
71 }
72
73 /// Const element access.
74 const PBQPNum& operator[](unsigned Index) const {
75 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 75, __PRETTY_FUNCTION__))
;
76 assert(Index < Length && "Vector element access out of bounds.")((Index < Length && "Vector element access out of bounds."
) ? static_cast<void> (0) : __assert_fail ("Index < Length && \"Vector element access out of bounds.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 76, __PRETTY_FUNCTION__))
;
77 return Data[Index];
78 }
79
80 /// Add another vector to this one.
81 Vector& operator+=(const Vector &V) {
82 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 82, __PRETTY_FUNCTION__))
;
83 assert(Length == V.Length && "Vector length mismatch.")((Length == V.Length && "Vector length mismatch.") ? static_cast
<void> (0) : __assert_fail ("Length == V.Length && \"Vector length mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 83, __PRETTY_FUNCTION__))
;
84 std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
85 std::plus<PBQPNum>());
86 return *this;
87 }
88
89 /// Returns the index of the minimum value in this vector
90 unsigned minIndex() const {
91 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 91, __PRETTY_FUNCTION__))
;
92 return std::min_element(Data.get(), Data.get() + Length) - Data.get();
93 }
94
95private:
96 unsigned Length;
97 std::unique_ptr<PBQPNum []> Data;
98};
99
100/// Return a hash_value for the given vector.
101inline hash_code hash_value(const Vector &V) {
102 unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
103 unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
104 return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
105}
106
107/// Output a textual representation of the given vector on the given
108/// output stream.
109template <typename OStream>
110OStream& operator<<(OStream &OS, const Vector &V) {
111 assert((V.getLength() != 0) && "Zero-length vector badness.")(((V.getLength() != 0) && "Zero-length vector badness."
) ? static_cast<void> (0) : __assert_fail ("(V.getLength() != 0) && \"Zero-length vector badness.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 111, __PRETTY_FUNCTION__))
;
112
113 OS << "[ " << V[0];
114 for (unsigned i = 1; i < V.getLength(); ++i)
115 OS << ", " << V[i];
116 OS << " ]";
117
118 return OS;
119}
120
121/// PBQP Matrix class
122class Matrix {
123private:
124 friend hash_code hash_value(const Matrix &);
125
126public:
127 /// Construct a PBQP Matrix with the given dimensions.
128 Matrix(unsigned Rows, unsigned Cols) :
129 Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
130 }
131
132 /// Construct a PBQP Matrix with the given dimensions and initial
133 /// value.
134 Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
135 : Rows(Rows), Cols(Cols),
136 Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
137 std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
138 }
139
140 /// Copy construct a PBQP matrix.
141 Matrix(const Matrix &M)
142 : Rows(M.Rows), Cols(M.Cols),
143 Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
144 std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
145 }
146
147 /// Move construct a PBQP matrix.
148 Matrix(Matrix &&M)
149 : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
150 M.Rows = M.Cols = 0;
151 }
152
153 /// Comparison operator.
154 bool operator==(const Matrix &M) const {
155 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 155, __PRETTY_FUNCTION__))
;
156 if (Rows != M.Rows || Cols != M.Cols)
157 return false;
158 return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
159 }
160
161 /// Return the number of rows in this matrix.
162 unsigned getRows() const {
163 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 163, __PRETTY_FUNCTION__))
;
164 return Rows;
165 }
166
167 /// Return the number of cols in this matrix.
168 unsigned getCols() const {
169 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 169, __PRETTY_FUNCTION__))
;
170 return Cols;
171 }
172
173 /// Matrix element access.
174 PBQPNum* operator[](unsigned R) {
175 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 175, __PRETTY_FUNCTION__))
;
176 assert(R < Rows && "Row out of bounds.")((R < Rows && "Row out of bounds.") ? static_cast<
void> (0) : __assert_fail ("R < Rows && \"Row out of bounds.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 176, __PRETTY_FUNCTION__))
;
177 return Data.get() + (R * Cols);
178 }
179
180 /// Matrix element access.
181 const PBQPNum* operator[](unsigned R) const {
182 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 182, __PRETTY_FUNCTION__))
;
183 assert(R < Rows && "Row out of bounds.")((R < Rows && "Row out of bounds.") ? static_cast<
void> (0) : __assert_fail ("R < Rows && \"Row out of bounds.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 183, __PRETTY_FUNCTION__))
;
184 return Data.get() + (R * Cols);
185 }
186
187 /// Returns the given row as a vector.
188 Vector getRowAsVector(unsigned R) const {
189 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 189, __PRETTY_FUNCTION__))
;
190 Vector V(Cols);
191 for (unsigned C = 0; C < Cols; ++C)
192 V[C] = (*this)[R][C];
193 return V;
194 }
195
196 /// Returns the given column as a vector.
197 Vector getColAsVector(unsigned C) const {
198 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 198, __PRETTY_FUNCTION__))
;
199 Vector V(Rows);
200 for (unsigned R = 0; R < Rows; ++R)
201 V[R] = (*this)[R][C];
202 return V;
203 }
204
205 /// Matrix transpose.
206 Matrix transpose() const {
207 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 207, __PRETTY_FUNCTION__))
;
208 Matrix M(Cols, Rows);
209 for (unsigned r = 0; r < Rows; ++r)
210 for (unsigned c = 0; c < Cols; ++c)
211 M[c][r] = (*this)[r][c];
212 return M;
213 }
214
215 /// Add the given matrix to this one.
216 Matrix& operator+=(const Matrix &M) {
217 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 217, __PRETTY_FUNCTION__))
;
218 assert(Rows == M.Rows && Cols == M.Cols &&((Rows == M.Rows && Cols == M.Cols && "Matrix dimensions mismatch."
) ? static_cast<void> (0) : __assert_fail ("Rows == M.Rows && Cols == M.Cols && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 219, __PRETTY_FUNCTION__))
219 "Matrix dimensions mismatch.")((Rows == M.Rows && Cols == M.Cols && "Matrix dimensions mismatch."
) ? static_cast<void> (0) : __assert_fail ("Rows == M.Rows && Cols == M.Cols && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 219, __PRETTY_FUNCTION__))
;
220 std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
221 Data.get(), std::plus<PBQPNum>());
222 return *this;
223 }
224
225 Matrix operator+(const Matrix &M) {
226 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 226, __PRETTY_FUNCTION__))
;
227 Matrix Tmp(*this);
228 Tmp += M;
229 return Tmp;
230 }
231
232private:
233 unsigned Rows, Cols;
234 std::unique_ptr<PBQPNum []> Data;
235};
236
237/// Return a hash_code for the given matrix.
238inline hash_code hash_value(const Matrix &M) {
239 unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
240 unsigned *MEnd =
241 reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
242 return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
243}
244
245/// Output a textual representation of the given matrix on the given
246/// output stream.
247template <typename OStream>
248OStream& operator<<(OStream &OS, const Matrix &M) {
249 assert((M.getRows() != 0) && "Zero-row matrix badness.")(((M.getRows() != 0) && "Zero-row matrix badness.") ?
static_cast<void> (0) : __assert_fail ("(M.getRows() != 0) && \"Zero-row matrix badness.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/PBQP/Math.h"
, 249, __PRETTY_FUNCTION__))
;
250 for (unsigned i = 0; i < M.getRows(); ++i)
251 OS << M.getRowAsVector(i) << "\n";
252 return OS;
253}
254
255template <typename Metadata>
256class MDVector : public Vector {
257public:
258 MDVector(const Vector &v) : Vector(v), md(*this) {}
259 MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
260
261 const Metadata& getMetadata() const { return md; }
262
263private:
264 Metadata md;
265};
266
267template <typename Metadata>
268inline hash_code hash_value(const MDVector<Metadata> &V) {
269 return hash_value(static_cast<const Vector&>(V));
270}
271
272template <typename Metadata>
273class MDMatrix : public Matrix {
274public:
275 MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
276 MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
22
Calling constructor for 'MatrixMetadata'
277
278 const Metadata& getMetadata() const { return md; }
279
280private:
281 Metadata md;
282};
283
284template <typename Metadata>
285inline hash_code hash_value(const MDMatrix<Metadata> &M) {
286 return hash_value(static_cast<const Matrix&>(M));
287}
288
289} // end namespace PBQP
290} // end namespace llvm
291
292#endif // LLVM_CODEGEN_PBQP_MATH_H

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h

1//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the PBQPBuilder interface, for classes which build PBQP
11// instances to represent register allocation problems, and the RegAllocPBQP
12// interface.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17#define LLVM_CODEGEN_REGALLOCPBQP_H
18
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/CodeGen/PBQP/CostAllocator.h"
22#include "llvm/CodeGen/PBQP/Graph.h"
23#include "llvm/CodeGen/PBQP/Math.h"
24#include "llvm/CodeGen/PBQP/ReductionRules.h"
25#include "llvm/CodeGen/PBQP/Solution.h"
26#include "llvm/Support/ErrorHandling.h"
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <limits>
31#include <memory>
32#include <set>
33#include <vector>
34
35namespace llvm {
36
37class FunctionPass;
38class LiveIntervals;
39class MachineBlockFrequencyInfo;
40class MachineFunction;
41class raw_ostream;
42
43namespace PBQP {
44namespace RegAlloc {
45
46/// Spill option index.
47inline unsigned getSpillOptionIdx() { return 0; }
48
49/// Metadata to speed allocatability test.
50///
51/// Keeps track of the number of infinities in each row and column.
52class MatrixMetadata {
53public:
54 MatrixMetadata(const Matrix& M)
55 : UnsafeRows(new bool[M.getRows() - 1]()),
56 UnsafeCols(new bool[M.getCols() - 1]()) {
57 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
23
Memory is allocated
58
59 for (unsigned i = 1; i < M.getRows(); ++i) {
24
Loop condition is true. Entering loop body
26
Loop condition is false. Execution continues on line 72
60 unsigned RowCount = 0;
61 for (unsigned j = 1; j < M.getCols(); ++j) {
25
Loop condition is false. Execution continues on line 69
62 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
63 ++RowCount;
64 ++ColCounts[j - 1];
65 UnsafeRows[i - 1] = true;
66 UnsafeCols[j - 1] = true;
67 }
68 }
69 WorstRow = std::max(WorstRow, RowCount);
70 }
71 unsigned WorstColCountForCurRow =
72 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
27
Use of zero-allocated memory
73 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
74 delete[] ColCounts;
75 }
76
77 MatrixMetadata(const MatrixMetadata &) = delete;
78 MatrixMetadata &operator=(const MatrixMetadata &) = delete;
79
80 unsigned getWorstRow() const { return WorstRow; }
81 unsigned getWorstCol() const { return WorstCol; }
82 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
83 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
84
85private:
86 unsigned WorstRow = 0;
87 unsigned WorstCol = 0;
88 std::unique_ptr<bool[]> UnsafeRows;
89 std::unique_ptr<bool[]> UnsafeCols;
90};
91
92/// Holds a vector of the allowed physical regs for a vreg.
93class AllowedRegVector {
94 friend hash_code hash_value(const AllowedRegVector &);
95
96public:
97 AllowedRegVector() = default;
98 AllowedRegVector(AllowedRegVector &&) = default;
99
100 AllowedRegVector(const std::vector<unsigned> &OptVec)
101 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
102 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
103 }
104
105 unsigned size() const { return NumOpts; }
106 unsigned operator[](size_t I) const { return Opts[I]; }
107
108 bool operator==(const AllowedRegVector &Other) const {
109 if (NumOpts != Other.NumOpts)
110 return false;
111 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
112 }
113
114 bool operator!=(const AllowedRegVector &Other) const {
115 return !(*this == Other);
116 }
117
118private:
119 unsigned NumOpts = 0;
120 std::unique_ptr<unsigned[]> Opts;
121};
122
123inline hash_code hash_value(const AllowedRegVector &OptRegs) {
124 unsigned *OStart = OptRegs.Opts.get();
125 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
126 return hash_combine(OptRegs.NumOpts,
127 hash_combine_range(OStart, OEnd));
128}
129
130/// Holds graph-level metadata relevant to PBQP RA problems.
131class GraphMetadata {
132private:
133 using AllowedRegVecPool = ValuePool<AllowedRegVector>;
134
135public:
136 using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
137
138 GraphMetadata(MachineFunction &MF,
139 LiveIntervals &LIS,
140 MachineBlockFrequencyInfo &MBFI)
141 : MF(MF), LIS(LIS), MBFI(MBFI) {}
142
143 MachineFunction &MF;
144 LiveIntervals &LIS;
145 MachineBlockFrequencyInfo &MBFI;
146
147 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
148 VRegToNodeId[VReg] = NId;
149 }
150
151 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
152 auto VRegItr = VRegToNodeId.find(VReg);
153 if (VRegItr == VRegToNodeId.end())
154 return GraphBase::invalidNodeId();
155 return VRegItr->second;
156 }
157
158 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
159 return AllowedRegVecs.getValue(std::move(Allowed));
160 }
161
162private:
163 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
164 AllowedRegVecPool AllowedRegVecs;
165};
166
167/// Holds solver state and other metadata relevant to each PBQP RA node.
168class NodeMetadata {
169public:
170 using AllowedRegVector = RegAlloc::AllowedRegVector;
171
172 // The node's reduction state. The order in this enum is important,
173 // as it is assumed nodes can only progress up (i.e. towards being
174 // optimally reducible) when reducing the graph.
175 using ReductionState = enum {
176 Unprocessed,
177 NotProvablyAllocatable,
178 ConservativelyAllocatable,
179 OptimallyReducible
180 };
181
182 NodeMetadata() = default;
183
184 NodeMetadata(const NodeMetadata &Other)
185 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
186 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
187 AllowedRegs(Other.AllowedRegs)
188#ifndef NDEBUG
189 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
190#endif
191 {
192 if (NumOpts > 0) {
193 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
194 &OptUnsafeEdges[0]);
195 }
196 }
197
198 NodeMetadata(NodeMetadata &&) = default;
199 NodeMetadata& operator=(NodeMetadata &&) = default;
200
201 void setVReg(unsigned VReg) { this->VReg = VReg; }
202 unsigned getVReg() const { return VReg; }
203
204 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
205 this->AllowedRegs = std::move(AllowedRegs);
206 }
207 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
208
209 void setup(const Vector& Costs) {
210 NumOpts = Costs.getLength() - 1;
211 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
212 }
213
214 ReductionState getReductionState() const { return RS; }
215 void setReductionState(ReductionState RS) {
216 assert(RS >= this->RS && "A node's reduction state can not be downgraded")((RS >= this->RS && "A node's reduction state can not be downgraded"
) ? static_cast<void> (0) : __assert_fail ("RS >= this->RS && \"A node's reduction state can not be downgraded\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 216, __PRETTY_FUNCTION__))
;
217 this->RS = RS;
218
219#ifndef NDEBUG
220 // Remember this state to assert later that a non-infinite register
221 // option was available.
222 if (RS == ConservativelyAllocatable)
223 everConservativelyAllocatable = true;
224#endif
225 }
226
227 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
228 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
229 const bool* UnsafeOpts =
230 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
231 for (unsigned i = 0; i < NumOpts; ++i)
232 OptUnsafeEdges[i] += UnsafeOpts[i];
233 }
234
235 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
236 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
237 const bool* UnsafeOpts =
238 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
239 for (unsigned i = 0; i < NumOpts; ++i)
240 OptUnsafeEdges[i] -= UnsafeOpts[i];
241 }
242
243 bool isConservativelyAllocatable() const {
244 return (DeniedOpts < NumOpts) ||
245 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
246 &OptUnsafeEdges[NumOpts]);
247 }
248
249#ifndef NDEBUG
250 bool wasConservativelyAllocatable() const {
251 return everConservativelyAllocatable;
252 }
253#endif
254
255private:
256 ReductionState RS = Unprocessed;
257 unsigned NumOpts = 0;
258 unsigned DeniedOpts = 0;
259 std::unique_ptr<unsigned[]> OptUnsafeEdges;
260 unsigned VReg = 0;
261 GraphMetadata::AllowedRegVecRef AllowedRegs;
262
263#ifndef NDEBUG
264 bool everConservativelyAllocatable = false;
265#endif
266};
267
268class RegAllocSolverImpl {
269private:
270 using RAMatrix = MDMatrix<MatrixMetadata>;
271
272public:
273 using RawVector = PBQP::Vector;
274 using RawMatrix = PBQP::Matrix;
275 using Vector = PBQP::Vector;
276 using Matrix = RAMatrix;
277 using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
278
279 using NodeId = GraphBase::NodeId;
280 using EdgeId = GraphBase::EdgeId;
281
282 using NodeMetadata = RegAlloc::NodeMetadata;
283 struct EdgeMetadata {};
284 using GraphMetadata = RegAlloc::GraphMetadata;
285
286 using Graph = PBQP::Graph<RegAllocSolverImpl>;
287
288 RegAllocSolverImpl(Graph &G) : G(G) {}
289
290 Solution solve() {
291 G.setSolver(*this);
292 Solution S;
293 setup();
294 S = backpropagate(G, reduce());
295 G.unsetSolver();
296 return S;
297 }
298
299 void handleAddNode(NodeId NId) {
300 assert(G.getNodeCosts(NId).getLength() > 1 &&((G.getNodeCosts(NId).getLength() > 1 && "PBQP Graph should not contain single or zero-option nodes"
) ? static_cast<void> (0) : __assert_fail ("G.getNodeCosts(NId).getLength() > 1 && \"PBQP Graph should not contain single or zero-option nodes\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 301, __PRETTY_FUNCTION__))
301 "PBQP Graph should not contain single or zero-option nodes")((G.getNodeCosts(NId).getLength() > 1 && "PBQP Graph should not contain single or zero-option nodes"
) ? static_cast<void> (0) : __assert_fail ("G.getNodeCosts(NId).getLength() > 1 && \"PBQP Graph should not contain single or zero-option nodes\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 301, __PRETTY_FUNCTION__))
;
302 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
303 }
304
305 void handleRemoveNode(NodeId NId) {}
306 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
307
308 void handleAddEdge(EdgeId EId) {
309 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
310 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
311 }
312
313 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
314 NodeMetadata& NMd = G.getNodeMetadata(NId);
315 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
316 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
317 promote(NId, NMd);
318 }
319
320 void handleReconnectEdge(EdgeId EId, NodeId NId) {
321 NodeMetadata& NMd = G.getNodeMetadata(NId);
322 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
323 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
324 }
325
326 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
327 NodeId N1Id = G.getEdgeNode1Id(EId);
328 NodeId N2Id = G.getEdgeNode2Id(EId);
329 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
330 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
331 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
332
333 // Metadata are computed incrementally. First, update them
334 // by removing the old cost.
335 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
336 N1Md.handleRemoveEdge(OldMMd, Transpose);
337 N2Md.handleRemoveEdge(OldMMd, !Transpose);
338
339 // And update now the metadata with the new cost.
340 const MatrixMetadata& MMd = NewCosts.getMetadata();
341 N1Md.handleAddEdge(MMd, Transpose);
342 N2Md.handleAddEdge(MMd, !Transpose);
343
344 // As the metadata may have changed with the update, the nodes may have
345 // become ConservativelyAllocatable or OptimallyReducible.
346 promote(N1Id, N1Md);
347 promote(N2Id, N2Md);
348 }
349
350private:
351 void promote(NodeId NId, NodeMetadata& NMd) {
352 if (G.getNodeDegree(NId) == 3) {
353 // This node is becoming optimally reducible.
354 moveToOptimallyReducibleNodes(NId);
355 } else if (NMd.getReductionState() ==
356 NodeMetadata::NotProvablyAllocatable &&
357 NMd.isConservativelyAllocatable()) {
358 // This node just became conservatively allocatable.
359 moveToConservativelyAllocatableNodes(NId);
360 }
361 }
362
363 void removeFromCurrentSet(NodeId NId) {
364 switch (G.getNodeMetadata(NId).getReductionState()) {
365 case NodeMetadata::Unprocessed: break;
366 case NodeMetadata::OptimallyReducible:
367 assert(OptimallyReducibleNodes.find(NId) !=((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 369, __PRETTY_FUNCTION__))
368 OptimallyReducibleNodes.end() &&((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 369, __PRETTY_FUNCTION__))
369 "Node not in optimally reducible set.")((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 369, __PRETTY_FUNCTION__))
;
370 OptimallyReducibleNodes.erase(NId);
371 break;
372 case NodeMetadata::ConservativelyAllocatable:
373 assert(ConservativelyAllocatableNodes.find(NId) !=((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 375, __PRETTY_FUNCTION__))
374 ConservativelyAllocatableNodes.end() &&((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 375, __PRETTY_FUNCTION__))
375 "Node not in conservatively allocatable set.")((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 375, __PRETTY_FUNCTION__))
;
376 ConservativelyAllocatableNodes.erase(NId);
377 break;
378 case NodeMetadata::NotProvablyAllocatable:
379 assert(NotProvablyAllocatableNodes.find(NId) !=((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 381, __PRETTY_FUNCTION__))
380 NotProvablyAllocatableNodes.end() &&((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 381, __PRETTY_FUNCTION__))
381 "Node not in not-provably-allocatable set.")((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 381, __PRETTY_FUNCTION__))
;
382 NotProvablyAllocatableNodes.erase(NId);
383 break;
384 }
385 }
386
387 void moveToOptimallyReducibleNodes(NodeId NId) {
388 removeFromCurrentSet(NId);
389 OptimallyReducibleNodes.insert(NId);
390 G.getNodeMetadata(NId).setReductionState(
391 NodeMetadata::OptimallyReducible);
392 }
393
394 void moveToConservativelyAllocatableNodes(NodeId NId) {
395 removeFromCurrentSet(NId);
396 ConservativelyAllocatableNodes.insert(NId);
397 G.getNodeMetadata(NId).setReductionState(
398 NodeMetadata::ConservativelyAllocatable);
399 }
400
401 void moveToNotProvablyAllocatableNodes(NodeId NId) {
402 removeFromCurrentSet(NId);
403 NotProvablyAllocatableNodes.insert(NId);
404 G.getNodeMetadata(NId).setReductionState(
405 NodeMetadata::NotProvablyAllocatable);
406 }
407
408 void setup() {
409 // Set up worklists.
410 for (auto NId : G.nodeIds()) {
411 if (G.getNodeDegree(NId) < 3)
412 moveToOptimallyReducibleNodes(NId);
413 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
414 moveToConservativelyAllocatableNodes(NId);
415 else
416 moveToNotProvablyAllocatableNodes(NId);
417 }
418 }
419
420 // Compute a reduction order for the graph by iteratively applying PBQP
421 // reduction rules. Locally optimal rules are applied whenever possible (R0,
422 // R1, R2). If no locally-optimal rules apply then any conservatively
423 // allocatable node is reduced. Finally, if no conservatively allocatable
424 // node exists then the node with the lowest spill-cost:degree ratio is
425 // selected.
426 std::vector<GraphBase::NodeId> reduce() {
427 assert(!G.empty() && "Cannot reduce empty graph.")((!G.empty() && "Cannot reduce empty graph.") ? static_cast
<void> (0) : __assert_fail ("!G.empty() && \"Cannot reduce empty graph.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 427, __PRETTY_FUNCTION__))
;
428
429 using NodeId = GraphBase::NodeId;
430 std::vector<NodeId> NodeStack;
431
432 // Consume worklists.
433 while (true) {
434 if (!OptimallyReducibleNodes.empty()) {
435 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
436 NodeId NId = *NItr;
437 OptimallyReducibleNodes.erase(NItr);
438 NodeStack.push_back(NId);
439 switch (G.getNodeDegree(NId)) {
440 case 0:
441 break;
442 case 1:
443 applyR1(G, NId);
444 break;
445 case 2:
446 applyR2(G, NId);
447 break;
448 default: llvm_unreachable("Not an optimally reducible node.")::llvm::llvm_unreachable_internal("Not an optimally reducible node."
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/CodeGen/RegAllocPBQP.h"
, 448)
;
449 }
450 } else if (!ConservativelyAllocatableNodes.empty()) {
451 // Conservatively allocatable nodes will never spill. For now just
452 // take the first node in the set and push it on the stack. When we
453 // start optimizing more heavily for register preferencing, it may
454 // would be better to push nodes with lower 'expected' or worst-case
455 // register costs first (since early nodes are the most
456 // constrained).
457 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
458 NodeId NId = *NItr;
459 ConservativelyAllocatableNodes.erase(NItr);
460 NodeStack.push_back(NId);
461 G.disconnectAllNeighborsFromNode(NId);
462 } else if (!NotProvablyAllocatableNodes.empty()) {
463 NodeSet::iterator NItr =
464 std::min_element(NotProvablyAllocatableNodes.begin(),
465 NotProvablyAllocatableNodes.end(),
466 SpillCostComparator(G));
467 NodeId NId = *NItr;
468 NotProvablyAllocatableNodes.erase(NItr);
469 NodeStack.push_back(NId);
470 G.disconnectAllNeighborsFromNode(NId);
471 } else
472 break;
473 }
474
475 return NodeStack;
476 }
477
478 class SpillCostComparator {
479 public:
480 SpillCostComparator(const Graph& G) : G(G) {}
481
482 bool operator()(NodeId N1Id, NodeId N2Id) {
483 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
484 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
485 if (N1SC == N2SC)
486 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
487 return N1SC < N2SC;
488 }
489
490 private:
491 const Graph& G;
492 };
493
494 Graph& G;
495 using NodeSet = std::set<NodeId>;
496 NodeSet OptimallyReducibleNodes;
497 NodeSet ConservativelyAllocatableNodes;
498 NodeSet NotProvablyAllocatableNodes;
499};
500
501class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
502private:
503 using BaseT = PBQP::Graph<RegAllocSolverImpl>;
504
505public:
506 PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
507
508 /// Dump this graph to dbgs().
509 void dump() const;
510
511 /// Dump this graph to an output stream.
512 /// @param OS Output stream to print on.
513 void dump(raw_ostream &OS) const;
514
515 /// Print a representation of this graph in DOT format.
516 /// @param OS Output stream to print on.
517 void printDot(raw_ostream &OS) const;
518};
519
520inline Solution solve(PBQPRAGraph& G) {
521 if (G.empty())
522 return Solution();
523 RegAllocSolverImpl RegAllocSolver(G);
524 return RegAllocSolver.solve();
525}
526
527} // end namespace RegAlloc
528} // end namespace PBQP
529
530/// Create a PBQP register allocator instance.
531FunctionPass *
532createPBQPRegisterAllocator(char *customPassID = nullptr);
533
534} // end namespace llvm
535
536#endif // LLVM_CODEGEN_REGALLOCPBQP_H