Bug Summary

File:llvm/include/llvm/MC/MCRegisterInfo.h
Warning:line 217, column 21
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CriticalAntiDepBreaker.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp

1//===- CriticalAntiDepBreaker.cpp - Anti-dep breaker ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the CriticalAntiDepBreaker class, which
10// implements register anti-dependence breaking along a blocks
11// critical path during post-RA scheduler.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CriticalAntiDepBreaker.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/CodeGen/MachineOperand.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/RegisterClassInfo.h"
26#include "llvm/CodeGen/ScheduleDAG.h"
27#include "llvm/CodeGen/TargetInstrInfo.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/CodeGen/TargetSubtargetInfo.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCRegisterInfo.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34#include <cassert>
35#include <utility>
36
37using namespace llvm;
38
39#define DEBUG_TYPE"post-RA-sched" "post-RA-sched"
40
41CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction &MFi,
42 const RegisterClassInfo &RCI)
43 : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
44 TII(MF.getSubtarget().getInstrInfo()),
45 TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI),
46 Classes(TRI->getNumRegs(), nullptr), KillIndices(TRI->getNumRegs(), 0),
47 DefIndices(TRI->getNumRegs(), 0), KeepRegs(TRI->getNumRegs(), false) {}
48
49CriticalAntiDepBreaker::~CriticalAntiDepBreaker() = default;
50
51void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
52 const unsigned BBSize = BB->size();
53 for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
54 // Clear out the register class data.
55 Classes[i] = nullptr;
56
57 // Initialize the indices to indicate that no registers are live.
58 KillIndices[i] = ~0u;
59 DefIndices[i] = BBSize;
60 }
61
62 // Clear "do not change" set.
63 KeepRegs.reset();
64
65 bool IsReturnBlock = BB->isReturnBlock();
66
67 // Examine the live-in regs of all successors.
68 for (const MachineBasicBlock *Succ : BB->successors())
69 for (const auto &LI : Succ->liveins()) {
70 for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI) {
71 unsigned Reg = *AI;
72 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
73 KillIndices[Reg] = BBSize;
74 DefIndices[Reg] = ~0u;
75 }
76 }
77
78 // Mark live-out callee-saved registers. In a return block this is
79 // all callee-saved registers. In non-return this is any
80 // callee-saved register that is not saved in the prolog.
81 const MachineFrameInfo &MFI = MF.getFrameInfo();
82 BitVector Pristine = MFI.getPristineRegs(MF);
83 for (const MCPhysReg *I = MF.getRegInfo().getCalleeSavedRegs(); *I;
84 ++I) {
85 unsigned Reg = *I;
86 if (!IsReturnBlock && !Pristine.test(Reg))
87 continue;
88 for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
89 unsigned Reg = *AI;
90 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
91 KillIndices[Reg] = BBSize;
92 DefIndices[Reg] = ~0u;
93 }
94 }
95}
96
97void CriticalAntiDepBreaker::FinishBlock() {
98 RegRefs.clear();
99 KeepRegs.reset();
100}
101
102void CriticalAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
103 unsigned InsertPosIndex) {
104 // Kill instructions can define registers but are really nops, and there might
105 // be a real definition earlier that needs to be paired with uses dominated by
106 // this kill.
107
108 // FIXME: It may be possible to remove the isKill() restriction once PR18663
109 // has been properly fixed. There can be value in processing kills as seen in
110 // the AggressiveAntiDepBreaker class.
111 if (MI.isDebugInstr() || MI.isKill())
112 return;
113 assert(Count < InsertPosIndex && "Instruction index out of expected range!")(static_cast<void> (0));
114
115 for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
116 if (KillIndices[Reg] != ~0u) {
117 // If Reg is currently live, then mark that it can't be renamed as
118 // we don't know the extent of its live-range anymore (now that it
119 // has been scheduled).
120 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
121 KillIndices[Reg] = Count;
122 } else if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
123 // Any register which was defined within the previous scheduling region
124 // may have been rescheduled and its lifetime may overlap with registers
125 // in ways not reflected in our current liveness state. For each such
126 // register, adjust the liveness state to be conservatively correct.
127 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
128
129 // Move the def index to the end of the previous region, to reflect
130 // that the def could theoretically have been scheduled at the end.
131 DefIndices[Reg] = InsertPosIndex;
132 }
133 }
134
135 PrescanInstruction(MI);
136 ScanInstruction(MI, Count);
137}
138
139/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
140/// critical path.
141static const SDep *CriticalPathStep(const SUnit *SU) {
142 const SDep *Next = nullptr;
143 unsigned NextDepth = 0;
144 // Find the predecessor edge with the greatest depth.
145 for (const SDep &P : SU->Preds) {
146 const SUnit *PredSU = P.getSUnit();
147 unsigned PredLatency = P.getLatency();
148 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
149 // In the case of a latency tie, prefer an anti-dependency edge over
150 // other types of edges.
151 if (NextDepth < PredTotalLatency ||
152 (NextDepth == PredTotalLatency && P.getKind() == SDep::Anti)) {
153 NextDepth = PredTotalLatency;
154 Next = &P;
155 }
156 }
157 return Next;
158}
159
160void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr &MI) {
161 // It's not safe to change register allocation for source operands of
162 // instructions that have special allocation requirements. Also assume all
163 // registers used in a call must not be changed (ABI).
164 // FIXME: The issue with predicated instruction is more complex. We are being
165 // conservative here because the kill markers cannot be trusted after
166 // if-conversion:
167 // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14]
168 // ...
169 // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395]
170 // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12]
171 // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8)
172 //
173 // The first R6 kill is not really a kill since it's killed by a predicated
174 // instruction which may not be executed. The second R6 def may or may not
175 // re-define R6 so it's not safe to change it since the last R6 use cannot be
176 // changed.
177 bool Special =
178 MI.isCall() || MI.hasExtraSrcRegAllocReq() || TII->isPredicated(MI);
179
180 // Scan the register operands for this instruction and update
181 // Classes and RegRefs.
182 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
183 MachineOperand &MO = MI.getOperand(i);
184 if (!MO.isReg()) continue;
185 Register Reg = MO.getReg();
186 if (Reg == 0) continue;
187 const TargetRegisterClass *NewRC = nullptr;
188
189 if (i < MI.getDesc().getNumOperands())
190 NewRC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
191
192 // For now, only allow the register to be changed if its register
193 // class is consistent across all uses.
194 if (!Classes[Reg] && NewRC)
195 Classes[Reg] = NewRC;
196 else if (!NewRC || Classes[Reg] != NewRC)
197 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
198
199 // Now check for aliases.
200 for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
201 // If an alias of the reg is used during the live range, give up.
202 // Note that this allows us to skip checking if AntiDepReg
203 // overlaps with any of the aliases, among other things.
204 unsigned AliasReg = *AI;
205 if (Classes[AliasReg]) {
206 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
207 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
208 }
209 }
210
211 // If we're still willing to consider this register, note the reference.
212 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
213 RegRefs.insert(std::make_pair(Reg, &MO));
214
215 // If this reg is tied and live (Classes[Reg] is set to -1), we can't change
216 // it or any of its sub or super regs. We need to use KeepRegs to mark the
217 // reg because not all uses of the same reg within an instruction are
218 // necessarily tagged as tied.
219 // Example: an x86 "xor %eax, %eax" will have one source operand tied to the
220 // def register but not the second (see PR20020 for details).
221 // FIXME: can this check be relaxed to account for undef uses
222 // of a register? In the above 'xor' example, the uses of %eax are undef, so
223 // earlier instructions could still replace %eax even though the 'xor'
224 // itself can't be changed.
225 if (MI.isRegTiedToUseOperand(i) &&
226 Classes[Reg] == reinterpret_cast<TargetRegisterClass *>(-1)) {
227 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
228 SubRegs.isValid(); ++SubRegs) {
229 KeepRegs.set(*SubRegs);
230 }
231 for (MCSuperRegIterator SuperRegs(Reg, TRI);
232 SuperRegs.isValid(); ++SuperRegs) {
233 KeepRegs.set(*SuperRegs);
234 }
235 }
236
237 if (MO.isUse() && Special) {
238 if (!KeepRegs.test(Reg)) {
239 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
240 SubRegs.isValid(); ++SubRegs)
241 KeepRegs.set(*SubRegs);
242 }
243 }
244 }
245}
246
247void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) {
248 // Update liveness.
249 // Proceeding upwards, registers that are defed but not used in this
250 // instruction are now dead.
251 assert(!MI.isKill() && "Attempting to scan a kill instruction")(static_cast<void> (0));
252
253 if (!TII->isPredicated(MI)) {
20
Assuming the condition is true
21
Taking true branch
254 // Predicated defs are modeled as read + write, i.e. similar to two
255 // address updates.
256 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
22
Loop condition is true. Entering loop body
257 MachineOperand &MO = MI.getOperand(i);
258
259 if (MO.isRegMask()) {
23
Taking true branch
260 auto ClobbersPhysRegAndSubRegs = [&](unsigned PhysReg) {
261 for (MCSubRegIterator SRI(PhysReg, TRI, true); SRI.isValid(); ++SRI)
262 if (!MO.clobbersPhysReg(*SRI))
263 return false;
264
265 return true;
266 };
267
268 for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
24
Assuming 'i' is not equal to 'e'
25
Loop condition is true. Entering loop body
28
Assuming 'i' is equal to 'e'
29
Loop condition is false. Execution continues on line 279
269 if (ClobbersPhysRegAndSubRegs(i)) {
26
Assuming the condition is false
27
Taking false branch
270 DefIndices[i] = Count;
271 KillIndices[i] = ~0u;
272 KeepRegs.reset(i);
273 Classes[i] = nullptr;
274 RegRefs.erase(i);
275 }
276 }
277 }
278
279 if (!MO.isReg()) continue;
30
Taking false branch
280 Register Reg = MO.getReg();
281 if (Reg == 0) continue;
31
Taking false branch
282 if (!MO.isDef()) continue;
32
Assuming the condition is false
33
Taking false branch
283
284 // Ignore two-addr defs.
285 if (MI.isRegTiedToUseOperand(i))
34
Taking false branch
286 continue;
287
288 // If we've already marked this reg as unchangeable, don't remove
289 // it or any of its subregs from KeepRegs.
290 bool Keep = KeepRegs.test(Reg);
291
292 // For the reg itself and all subregs: update the def to current;
293 // reset the kill state, any restrictions, and references.
294 for (MCSubRegIterator SRI(Reg, TRI, true); SRI.isValid(); ++SRI) {
35
Loop condition is false. Execution continues on line 304
295 unsigned SubregReg = *SRI;
296 DefIndices[SubregReg] = Count;
297 KillIndices[SubregReg] = ~0u;
298 Classes[SubregReg] = nullptr;
299 RegRefs.erase(SubregReg);
300 if (!Keep)
301 KeepRegs.reset(SubregReg);
302 }
303 // Conservatively mark super-registers as unusable.
304 for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
36
Calling constructor for 'MCSuperRegIterator'
305 Classes[*SR] = reinterpret_cast<TargetRegisterClass *>(-1);
306 }
307 }
308 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
309 MachineOperand &MO = MI.getOperand(i);
310 if (!MO.isReg()) continue;
311 Register Reg = MO.getReg();
312 if (Reg == 0) continue;
313 if (!MO.isUse()) continue;
314
315 const TargetRegisterClass *NewRC = nullptr;
316 if (i < MI.getDesc().getNumOperands())
317 NewRC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
318
319 // For now, only allow the register to be changed if its register
320 // class is consistent across all uses.
321 if (!Classes[Reg] && NewRC)
322 Classes[Reg] = NewRC;
323 else if (!NewRC || Classes[Reg] != NewRC)
324 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
325
326 RegRefs.insert(std::make_pair(Reg, &MO));
327
328 // It wasn't previously live but now it is, this is a kill.
329 // Repeat for all aliases.
330 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
331 unsigned AliasReg = *AI;
332 if (KillIndices[AliasReg] == ~0u) {
333 KillIndices[AliasReg] = Count;
334 DefIndices[AliasReg] = ~0u;
335 }
336 }
337 }
338}
339
340// Check all machine operands that reference the antidependent register and must
341// be replaced by NewReg. Return true if any of their parent instructions may
342// clobber the new register.
343//
344// Note: AntiDepReg may be referenced by a two-address instruction such that
345// it's use operand is tied to a def operand. We guard against the case in which
346// the two-address instruction also defines NewReg, as may happen with
347// pre/postincrement loads. In this case, both the use and def operands are in
348// RegRefs because the def is inserted by PrescanInstruction and not erased
349// during ScanInstruction. So checking for an instruction with definitions of
350// both NewReg and AntiDepReg covers it.
351bool
352CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
353 RegRefIter RegRefEnd,
354 unsigned NewReg) {
355 for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
356 MachineOperand *RefOper = I->second;
357
358 // Don't allow the instruction defining AntiDepReg to earlyclobber its
359 // operands, in case they may be assigned to NewReg. In this case antidep
360 // breaking must fail, but it's too rare to bother optimizing.
361 if (RefOper->isDef() && RefOper->isEarlyClobber())
362 return true;
363
364 // Handle cases in which this instruction defines NewReg.
365 MachineInstr *MI = RefOper->getParent();
366 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
367 const MachineOperand &CheckOper = MI->getOperand(i);
368
369 if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
370 return true;
371
372 if (!CheckOper.isReg() || !CheckOper.isDef() ||
373 CheckOper.getReg() != NewReg)
374 continue;
375
376 // Don't allow the instruction to define NewReg and AntiDepReg.
377 // When AntiDepReg is renamed it will be an illegal op.
378 if (RefOper->isDef())
379 return true;
380
381 // Don't allow an instruction using AntiDepReg to be earlyclobbered by
382 // NewReg.
383 if (CheckOper.isEarlyClobber())
384 return true;
385
386 // Don't allow inline asm to define NewReg at all. Who knows what it's
387 // doing with it.
388 if (MI->isInlineAsm())
389 return true;
390 }
391 }
392 return false;
393}
394
395unsigned CriticalAntiDepBreaker::
396findSuitableFreeRegister(RegRefIter RegRefBegin,
397 RegRefIter RegRefEnd,
398 unsigned AntiDepReg,
399 unsigned LastNewReg,
400 const TargetRegisterClass *RC,
401 SmallVectorImpl<unsigned> &Forbid) {
402 ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(RC);
403 for (unsigned i = 0; i != Order.size(); ++i) {
404 unsigned NewReg = Order[i];
405 // Don't replace a register with itself.
406 if (NewReg == AntiDepReg) continue;
407 // Don't replace a register with one that was recently used to repair
408 // an anti-dependence with this AntiDepReg, because that would
409 // re-introduce that anti-dependence.
410 if (NewReg == LastNewReg) continue;
411 // If any instructions that define AntiDepReg also define the NewReg, it's
412 // not suitable. For example, Instruction with multiple definitions can
413 // result in this condition.
414 if (isNewRegClobberedByRefs(RegRefBegin, RegRefEnd, NewReg)) continue;
415 // If NewReg is dead and NewReg's most recent def is not before
416 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
417 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u))(static_cast<void> (0))
418 && "Kill and Def maps aren't consistent for AntiDepReg!")(static_cast<void> (0));
419 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u))(static_cast<void> (0))
420 && "Kill and Def maps aren't consistent for NewReg!")(static_cast<void> (0));
421 if (KillIndices[NewReg] != ~0u ||
422 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
423 KillIndices[AntiDepReg] > DefIndices[NewReg])
424 continue;
425 // If NewReg overlaps any of the forbidden registers, we can't use it.
426 bool Forbidden = false;
427 for (unsigned R : Forbid)
428 if (TRI->regsOverlap(NewReg, R)) {
429 Forbidden = true;
430 break;
431 }
432 if (Forbidden) continue;
433 return NewReg;
434 }
435
436 // No registers are free and available!
437 return 0;
438}
439
440unsigned CriticalAntiDepBreaker::
441BreakAntiDependencies(const std::vector<SUnit> &SUnits,
442 MachineBasicBlock::iterator Begin,
443 MachineBasicBlock::iterator End,
444 unsigned InsertPosIndex,
445 DbgValueVector &DbgValues) {
446 // The code below assumes that there is at least one instruction,
447 // so just duck out immediately if the block is empty.
448 if (SUnits.empty()) return 0;
1
Assuming the condition is false
2
Taking false branch
449
450 // Keep a map of the MachineInstr*'s back to the SUnit representing them.
451 // This is used for updating debug information.
452 //
453 // FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
454 DenseMap<MachineInstr *, const SUnit *> MISUnitMap;
455
456 // Find the node at the bottom of the critical path.
457 const SUnit *Max = nullptr;
458 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
3
Assuming 'i' is not equal to 'e'
4
Loop condition is true. Entering loop body
5
Assuming 'i' is equal to 'e'
6
Loop condition is false. Execution continues on line 464
459 const SUnit *SU = &SUnits[i];
460 MISUnitMap[SU->getInstr()] = SU;
461 if (!Max
4.1
'Max' is null
4.1
'Max' is null
4.1
'Max' is null
|| SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
462 Max = SU;
463 }
464 assert(Max && "Failed to find bottom of the critical path")(static_cast<void> (0));
465
466#ifndef NDEBUG1
467 {
468 LLVM_DEBUG(dbgs() << "Critical path has total latency "do { } while (false)
469 << (Max->getDepth() + Max->Latency) << "\n")do { } while (false);
470 LLVM_DEBUG(dbgs() << "Available regs:")do { } while (false);
471 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
472 if (KillIndices[Reg] == ~0u)
473 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI))do { } while (false);
474 }
475 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
476 }
477#endif
478
479 // Track progress along the critical path through the SUnit graph as we walk
480 // the instructions.
481 const SUnit *CriticalPathSU = Max;
482 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
483
484 // Consider this pattern:
485 // A = ...
486 // ... = A
487 // A = ...
488 // ... = A
489 // A = ...
490 // ... = A
491 // A = ...
492 // ... = A
493 // There are three anti-dependencies here, and without special care,
494 // we'd break all of them using the same register:
495 // A = ...
496 // ... = A
497 // B = ...
498 // ... = B
499 // B = ...
500 // ... = B
501 // B = ...
502 // ... = B
503 // because at each anti-dependence, B is the first register that
504 // isn't A which is free. This re-introduces anti-dependencies
505 // at all but one of the original anti-dependencies that we were
506 // trying to break. To avoid this, keep track of the most recent
507 // register that each register was replaced with, avoid
508 // using it to repair an anti-dependence on the same register.
509 // This lets us produce this:
510 // A = ...
511 // ... = A
512 // B = ...
513 // ... = B
514 // C = ...
515 // ... = C
516 // B = ...
517 // ... = B
518 // This still has an anti-dependence on B, but at least it isn't on the
519 // original critical path.
520 //
521 // TODO: If we tracked more than one register here, we could potentially
522 // fix that remaining critical edge too. This is a little more involved,
523 // because unlike the most recent register, less recent registers should
524 // still be considered, though only if no other registers are available.
525 std::vector<unsigned> LastNewReg(TRI->getNumRegs(), 0);
526
527 // Attempt to break anti-dependence edges on the critical path. Walk the
528 // instructions from the bottom up, tracking information about liveness
529 // as we go to help determine which registers are available.
530 unsigned Broken = 0;
531 unsigned Count = InsertPosIndex - 1;
532 for (MachineBasicBlock::iterator I = End, E = Begin; I != E; --Count) {
7
Loop condition is true. Entering loop body
533 MachineInstr &MI = *--I;
534 // Kill instructions can define registers but are really nops, and there
535 // might be a real definition earlier that needs to be paired with uses
536 // dominated by this kill.
537
538 // FIXME: It may be possible to remove the isKill() restriction once PR18663
539 // has been properly fixed. There can be value in processing kills as seen
540 // in the AggressiveAntiDepBreaker class.
541 if (MI.isDebugInstr() || MI.isKill())
8
Taking false branch
542 continue;
543
544 // Check if this instruction has a dependence on the critical path that
545 // is an anti-dependence that we may be able to break. If it is, set
546 // AntiDepReg to the non-zero register associated with the anti-dependence.
547 //
548 // We limit our attention to the critical path as a heuristic to avoid
549 // breaking anti-dependence edges that aren't going to significantly
550 // impact the overall schedule. There are a limited number of registers
551 // and we want to save them for the important edges.
552 //
553 // TODO: Instructions with multiple defs could have multiple
554 // anti-dependencies. The current code here only knows how to break one
555 // edge per instruction. Note that we'd have to be able to break all of
556 // the anti-dependencies in an instruction in order to be effective.
557 unsigned AntiDepReg = 0;
558 if (&MI == CriticalPathMI) {
9
Assuming the condition is false
10
Taking false branch
559 if (const SDep *Edge = CriticalPathStep(CriticalPathSU)) {
560 const SUnit *NextSU = Edge->getSUnit();
561
562 // Only consider anti-dependence edges.
563 if (Edge->getKind() == SDep::Anti) {
564 AntiDepReg = Edge->getReg();
565 assert(AntiDepReg != 0 && "Anti-dependence on reg0?")(static_cast<void> (0));
566 if (!MRI.isAllocatable(AntiDepReg))
567 // Don't break anti-dependencies on non-allocatable registers.
568 AntiDepReg = 0;
569 else if (KeepRegs.test(AntiDepReg))
570 // Don't break anti-dependencies if a use down below requires
571 // this exact register.
572 AntiDepReg = 0;
573 else {
574 // If the SUnit has other dependencies on the SUnit that it
575 // anti-depends on, don't bother breaking the anti-dependency
576 // since those edges would prevent such units from being
577 // scheduled past each other regardless.
578 //
579 // Also, if there are dependencies on other SUnits with the
580 // same register as the anti-dependency, don't attempt to
581 // break it.
582 for (const SDep &P : CriticalPathSU->Preds)
583 if (P.getSUnit() == NextSU
584 ? (P.getKind() != SDep::Anti || P.getReg() != AntiDepReg)
585 : (P.getKind() == SDep::Data &&
586 P.getReg() == AntiDepReg)) {
587 AntiDepReg = 0;
588 break;
589 }
590 }
591 }
592 CriticalPathSU = NextSU;
593 CriticalPathMI = CriticalPathSU->getInstr();
594 } else {
595 // We've reached the end of the critical path.
596 CriticalPathSU = nullptr;
597 CriticalPathMI = nullptr;
598 }
599 }
600
601 PrescanInstruction(MI);
602
603 SmallVector<unsigned, 2> ForbidRegs;
604
605 // If MI's defs have a special allocation requirement, don't allow
606 // any def registers to be changed. Also assume all registers
607 // defined in a call must not be changed (ABI).
608 if (MI.isCall() || MI.hasExtraDefRegAllocReq() || TII->isPredicated(MI))
11
Assuming the condition is false
12
Assuming the condition is false
13
Assuming the condition is false
14
Taking false branch
609 // If this instruction's defs have special allocation requirement, don't
610 // break this anti-dependency.
611 AntiDepReg = 0;
612 else if (AntiDepReg
14.1
'AntiDepReg' is 0
14.1
'AntiDepReg' is 0
14.1
'AntiDepReg' is 0
) {
613 // If this instruction has a use of AntiDepReg, breaking it
614 // is invalid. If the instruction defines other registers,
615 // save a list of them so that we don't pick a new register
616 // that overlaps any of them.
617 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
618 MachineOperand &MO = MI.getOperand(i);
619 if (!MO.isReg()) continue;
620 Register Reg = MO.getReg();
621 if (Reg == 0) continue;
622 if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
623 AntiDepReg = 0;
624 break;
625 }
626 if (MO.isDef() && Reg != AntiDepReg)
627 ForbidRegs.push_back(Reg);
628 }
629 }
630
631 // Determine AntiDepReg's register class, if it is live and is
632 // consistently used within a single class.
633 const TargetRegisterClass *RC = AntiDepReg
15.1
'AntiDepReg' is equal to 0
15.1
'AntiDepReg' is equal to 0
15.1
'AntiDepReg' is equal to 0
!= 0 ? Classes[AntiDepReg]
15
Taking false branch
16
'?' condition is false
634 : nullptr;
635 assert((AntiDepReg == 0 || RC != nullptr) &&(static_cast<void> (0))
636 "Register should be live if it's causing an anti-dependence!")(static_cast<void> (0));
637 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
17
Taking false branch
638 AntiDepReg = 0;
639
640 // Look for a suitable register to use to break the anti-dependence.
641 //
642 // TODO: Instead of picking the first free register, consider which might
643 // be the best.
644 if (AntiDepReg
17.1
'AntiDepReg' is equal to 0
17.1
'AntiDepReg' is equal to 0
17.1
'AntiDepReg' is equal to 0
!= 0) {
18
Taking false branch
645 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
646 std::multimap<unsigned, MachineOperand *>::iterator>
647 Range = RegRefs.equal_range(AntiDepReg);
648 if (unsigned NewReg = findSuitableFreeRegister(Range.first, Range.second,
649 AntiDepReg,
650 LastNewReg[AntiDepReg],
651 RC, ForbidRegs)) {
652 LLVM_DEBUG(dbgs() << "Breaking anti-dependence edge on "do { } while (false)
653 << printReg(AntiDepReg, TRI) << " with "do { } while (false)
654 << RegRefs.count(AntiDepReg) << " references"do { } while (false)
655 << " using " << printReg(NewReg, TRI) << "!\n")do { } while (false);
656
657 // Update the references to the old register to refer to the new
658 // register.
659 for (std::multimap<unsigned, MachineOperand *>::iterator
660 Q = Range.first, QE = Range.second; Q != QE; ++Q) {
661 Q->second->setReg(NewReg);
662 // If the SU for the instruction being updated has debug information
663 // related to the anti-dependency register, make sure to update that
664 // as well.
665 const SUnit *SU = MISUnitMap[Q->second->getParent()];
666 if (!SU) continue;
667 UpdateDbgValues(DbgValues, Q->second->getParent(),
668 AntiDepReg, NewReg);
669 }
670
671 // We just went back in time and modified history; the
672 // liveness information for the anti-dependence reg is now
673 // inconsistent. Set the state as if it were dead.
674 Classes[NewReg] = Classes[AntiDepReg];
675 DefIndices[NewReg] = DefIndices[AntiDepReg];
676 KillIndices[NewReg] = KillIndices[AntiDepReg];
677 assert(((KillIndices[NewReg] == ~0u) !=(static_cast<void> (0))
678 (DefIndices[NewReg] == ~0u)) &&(static_cast<void> (0))
679 "Kill and Def maps aren't consistent for NewReg!")(static_cast<void> (0));
680
681 Classes[AntiDepReg] = nullptr;
682 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
683 KillIndices[AntiDepReg] = ~0u;
684 assert(((KillIndices[AntiDepReg] == ~0u) !=(static_cast<void> (0))
685 (DefIndices[AntiDepReg] == ~0u)) &&(static_cast<void> (0))
686 "Kill and Def maps aren't consistent for AntiDepReg!")(static_cast<void> (0));
687
688 RegRefs.erase(AntiDepReg);
689 LastNewReg[AntiDepReg] = NewReg;
690 ++Broken;
691 }
692 }
693
694 ScanInstruction(MI, Count);
19
Calling 'CriticalAntiDepBreaker::ScanInstruction'
695 }
696
697 return Broken;
698}
699
700AntiDepBreaker *
701llvm::createCriticalAntiDepBreaker(MachineFunction &MFi,
702 const RegisterClassInfo &RCI) {
703 return new CriticalAntiDepBreaker(MFi, RCI);
704}

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/stl_vector.h

1// Vector implementation -*- C++ -*-
2
3// Copyright (C) 2001-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1994
28 * Hewlett-Packard Company
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Hewlett-Packard Company makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1996
40 * Silicon Graphics Computer Systems, Inc.
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Silicon Graphics makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 */
50
51/** @file bits/stl_vector.h
52 * This is an internal header file, included by other library headers.
53 * Do not attempt to use it directly. @headername{vector}
54 */
55
56#ifndef _STL_VECTOR_H1
57#define _STL_VECTOR_H1 1
58
59#include <bits/stl_iterator_base_funcs.h>
60#include <bits/functexcept.h>
61#include <bits/concept_check.h>
62#if __cplusplus201402L >= 201103L
63#include <initializer_list>
64#endif
65#if __cplusplus201402L > 201703L
66# include <compare>
67#endif
68
69#include <debug/assertions.h>
70
71#if _GLIBCXX_SANITIZE_STD_ALLOCATOR && _GLIBCXX_SANITIZE_VECTOR
72extern "C" void
73__sanitizer_annotate_contiguous_container(const void*, const void*,
74 const void*, const void*);
75#endif
76
77namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
78{
79_GLIBCXX_BEGIN_NAMESPACE_VERSION
80_GLIBCXX_BEGIN_NAMESPACE_CONTAINER
81
82 /// See bits/stl_deque.h's _Deque_base for an explanation.
83 template<typename _Tp, typename _Alloc>
84 struct _Vector_base
85 {
86 typedef typename __gnu_cxx::__alloc_traits<_Alloc>::template
87 rebind<_Tp>::other _Tp_alloc_type;
88 typedef typename __gnu_cxx::__alloc_traits<_Tp_alloc_type>::pointer
89 pointer;
90
91 struct _Vector_impl_data
92 {
93 pointer _M_start;
94 pointer _M_finish;
95 pointer _M_end_of_storage;
96
97 _Vector_impl_data() _GLIBCXX_NOEXCEPTnoexcept
98 : _M_start(), _M_finish(), _M_end_of_storage()
99 { }
100
101#if __cplusplus201402L >= 201103L
102 _Vector_impl_data(_Vector_impl_data&& __x) noexcept
103 : _M_start(__x._M_start), _M_finish(__x._M_finish),
104 _M_end_of_storage(__x._M_end_of_storage)
105 { __x._M_start = __x._M_finish = __x._M_end_of_storage = pointer(); }
106#endif
107
108 void
109 _M_copy_data(_Vector_impl_data const& __x) _GLIBCXX_NOEXCEPTnoexcept
110 {
111 _M_start = __x._M_start;
112 _M_finish = __x._M_finish;
113 _M_end_of_storage = __x._M_end_of_storage;
114 }
115
116 void
117 _M_swap_data(_Vector_impl_data& __x) _GLIBCXX_NOEXCEPTnoexcept
118 {
119 // Do not use std::swap(_M_start, __x._M_start), etc as it loses
120 // information used by TBAA.
121 _Vector_impl_data __tmp;
122 __tmp._M_copy_data(*this);
123 _M_copy_data(__x);
124 __x._M_copy_data(__tmp);
125 }
126 };
127
128 struct _Vector_impl
129 : public _Tp_alloc_type, public _Vector_impl_data
130 {
131 _Vector_impl() _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Tp_alloc_type>
::value)
132 is_nothrow_default_constructible<_Tp_alloc_type>::value)noexcept(is_nothrow_default_constructible<_Tp_alloc_type>
::value)
133 : _Tp_alloc_type()
134 { }
135
136 _Vector_impl(_Tp_alloc_type const& __a) _GLIBCXX_NOEXCEPTnoexcept
137 : _Tp_alloc_type(__a)
138 { }
139
140#if __cplusplus201402L >= 201103L
141 // Not defaulted, to enforce noexcept(true) even when
142 // !is_nothrow_move_constructible<_Tp_alloc_type>.
143 _Vector_impl(_Vector_impl&& __x) noexcept
144 : _Tp_alloc_type(std::move(__x)), _Vector_impl_data(std::move(__x))
145 { }
146
147 _Vector_impl(_Tp_alloc_type&& __a) noexcept
148 : _Tp_alloc_type(std::move(__a))
149 { }
150
151 _Vector_impl(_Tp_alloc_type&& __a, _Vector_impl&& __rv) noexcept
152 : _Tp_alloc_type(std::move(__a)), _Vector_impl_data(std::move(__rv))
153 { }
154#endif
155
156#if _GLIBCXX_SANITIZE_STD_ALLOCATOR && _GLIBCXX_SANITIZE_VECTOR
157 template<typename = _Tp_alloc_type>
158 struct _Asan
159 {
160 typedef typename __gnu_cxx::__alloc_traits<_Tp_alloc_type>
161 ::size_type size_type;
162
163 static void _S_shrink(_Vector_impl&, size_type) { }
164 static void _S_on_dealloc(_Vector_impl&) { }
165
166 typedef _Vector_impl& _Reinit;
167
168 struct _Grow
169 {
170 _Grow(_Vector_impl&, size_type) { }
171 void _M_grew(size_type) { }
172 };
173 };
174
175 // Enable ASan annotations for memory obtained from std::allocator.
176 template<typename _Up>
177 struct _Asan<allocator<_Up> >
178 {
179 typedef typename __gnu_cxx::__alloc_traits<_Tp_alloc_type>
180 ::size_type size_type;
181
182 // Adjust ASan annotation for [_M_start, _M_end_of_storage) to
183 // mark end of valid region as __curr instead of __prev.
184 static void
185 _S_adjust(_Vector_impl& __impl, pointer __prev, pointer __curr)
186 {
187 __sanitizer_annotate_contiguous_container(__impl._M_start,
188 __impl._M_end_of_storage, __prev, __curr);
189 }
190
191 static void
192 _S_grow(_Vector_impl& __impl, size_type __n)
193 { _S_adjust(__impl, __impl._M_finish, __impl._M_finish + __n); }
194
195 static void
196 _S_shrink(_Vector_impl& __impl, size_type __n)
197 { _S_adjust(__impl, __impl._M_finish + __n, __impl._M_finish); }
198
199 static void
200 _S_on_dealloc(_Vector_impl& __impl)
201 {
202 if (__impl._M_start)
203 _S_adjust(__impl, __impl._M_finish, __impl._M_end_of_storage);
204 }
205
206 // Used on reallocation to tell ASan unused capacity is invalid.
207 struct _Reinit
208 {
209 explicit _Reinit(_Vector_impl& __impl) : _M_impl(__impl)
210 {
211 // Mark unused capacity as valid again before deallocating it.
212 _S_on_dealloc(_M_impl);
213 }
214
215 ~_Reinit()
216 {
217 // Mark unused capacity as invalid after reallocation.
218 if (_M_impl._M_start)
219 _S_adjust(_M_impl, _M_impl._M_end_of_storage,
220 _M_impl._M_finish);
221 }
222
223 _Vector_impl& _M_impl;
224
225#if __cplusplus201402L >= 201103L
226 _Reinit(const _Reinit&) = delete;
227 _Reinit& operator=(const _Reinit&) = delete;
228#endif
229 };
230
231 // Tell ASan when unused capacity is initialized to be valid.
232 struct _Grow
233 {
234 _Grow(_Vector_impl& __impl, size_type __n)
235 : _M_impl(__impl), _M_n(__n)
236 { _S_grow(_M_impl, __n); }
237
238 ~_Grow() { if (_M_n) _S_shrink(_M_impl, _M_n); }
239
240 void _M_grew(size_type __n) { _M_n -= __n; }
241
242#if __cplusplus201402L >= 201103L
243 _Grow(const _Grow&) = delete;
244 _Grow& operator=(const _Grow&) = delete;
245#endif
246 private:
247 _Vector_impl& _M_impl;
248 size_type _M_n;
249 };
250 };
251
252#define _GLIBCXX_ASAN_ANNOTATE_REINIT \
253 typename _Base::_Vector_impl::template _Asan<>::_Reinit const \
254 __attribute__((__unused__)) __reinit_guard(this->_M_impl)
255#define _GLIBCXX_ASAN_ANNOTATE_GROW(n) \
256 typename _Base::_Vector_impl::template _Asan<>::_Grow \
257 __attribute__((__unused__)) __grow_guard(this->_M_impl, (n))
258#define _GLIBCXX_ASAN_ANNOTATE_GREW(n) __grow_guard._M_grew(n)
259#define _GLIBCXX_ASAN_ANNOTATE_SHRINK(n) \
260 _Base::_Vector_impl::template _Asan<>::_S_shrink(this->_M_impl, n)
261#define _GLIBCXX_ASAN_ANNOTATE_BEFORE_DEALLOC \
262 _Base::_Vector_impl::template _Asan<>::_S_on_dealloc(this->_M_impl)
263#else // ! (_GLIBCXX_SANITIZE_STD_ALLOCATOR && _GLIBCXX_SANITIZE_VECTOR)
264#define _GLIBCXX_ASAN_ANNOTATE_REINIT
265#define _GLIBCXX_ASAN_ANNOTATE_GROW(n)
266#define _GLIBCXX_ASAN_ANNOTATE_GREW(n)
267#define _GLIBCXX_ASAN_ANNOTATE_SHRINK(n)
268#define _GLIBCXX_ASAN_ANNOTATE_BEFORE_DEALLOC
269#endif // _GLIBCXX_SANITIZE_STD_ALLOCATOR && _GLIBCXX_SANITIZE_VECTOR
270 };
271
272 public:
273 typedef _Alloc allocator_type;
274
275 _Tp_alloc_type&
276 _M_get_Tp_allocator() _GLIBCXX_NOEXCEPTnoexcept
277 { return this->_M_impl; }
278
279 const _Tp_alloc_type&
280 _M_get_Tp_allocator() const _GLIBCXX_NOEXCEPTnoexcept
281 { return this->_M_impl; }
282
283 allocator_type
284 get_allocator() const _GLIBCXX_NOEXCEPTnoexcept
285 { return allocator_type(_M_get_Tp_allocator()); }
286
287#if __cplusplus201402L >= 201103L
288 _Vector_base() = default;
289#else
290 _Vector_base() { }
291#endif
292
293 _Vector_base(const allocator_type& __a) _GLIBCXX_NOEXCEPTnoexcept
294 : _M_impl(__a) { }
295
296 // Kept for ABI compatibility.
297#if !_GLIBCXX_INLINE_VERSION0
298 _Vector_base(size_t __n)
299 : _M_impl()
300 { _M_create_storage(__n); }
301#endif
302
303 _Vector_base(size_t __n, const allocator_type& __a)
304 : _M_impl(__a)
305 { _M_create_storage(__n); }
306
307#if __cplusplus201402L >= 201103L
308 _Vector_base(_Vector_base&&) = default;
309
310 // Kept for ABI compatibility.
311# if !_GLIBCXX_INLINE_VERSION0
312 _Vector_base(_Tp_alloc_type&& __a) noexcept
313 : _M_impl(std::move(__a)) { }
314
315 _Vector_base(_Vector_base&& __x, const allocator_type& __a)
316 : _M_impl(__a)
317 {
318 if (__x.get_allocator() == __a)
319 this->_M_impl._M_swap_data(__x._M_impl);
320 else
321 {
322 size_t __n = __x._M_impl._M_finish - __x._M_impl._M_start;
323 _M_create_storage(__n);
324 }
325 }
326# endif
327
328 _Vector_base(const allocator_type& __a, _Vector_base&& __x)
329 : _M_impl(_Tp_alloc_type(__a), std::move(__x._M_impl))
330 { }
331#endif
332
333 ~_Vector_base() _GLIBCXX_NOEXCEPTnoexcept
334 {
335 _M_deallocate(_M_impl._M_start,
336 _M_impl._M_end_of_storage - _M_impl._M_start);
337 }
338
339 public:
340 _Vector_impl _M_impl;
341
342 pointer
343 _M_allocate(size_t __n)
344 {
345 typedef __gnu_cxx::__alloc_traits<_Tp_alloc_type> _Tr;
346 return __n != 0 ? _Tr::allocate(_M_impl, __n) : pointer();
347 }
348
349 void
350 _M_deallocate(pointer __p, size_t __n)
351 {
352 typedef __gnu_cxx::__alloc_traits<_Tp_alloc_type> _Tr;
353 if (__p)
354 _Tr::deallocate(_M_impl, __p, __n);
355 }
356
357 protected:
358 void
359 _M_create_storage(size_t __n)
360 {
361 this->_M_impl._M_start = this->_M_allocate(__n);
362 this->_M_impl._M_finish = this->_M_impl._M_start;
363 this->_M_impl._M_end_of_storage = this->_M_impl._M_start + __n;
364 }
365 };
366
367 /**
368 * @brief A standard container which offers fixed time access to
369 * individual elements in any order.
370 *
371 * @ingroup sequences
372 *
373 * @tparam _Tp Type of element.
374 * @tparam _Alloc Allocator type, defaults to allocator<_Tp>.
375 *
376 * Meets the requirements of a <a href="tables.html#65">container</a>, a
377 * <a href="tables.html#66">reversible container</a>, and a
378 * <a href="tables.html#67">sequence</a>, including the
379 * <a href="tables.html#68">optional sequence requirements</a> with the
380 * %exception of @c push_front and @c pop_front.
381 *
382 * In some terminology a %vector can be described as a dynamic
383 * C-style array, it offers fast and efficient access to individual
384 * elements in any order and saves the user from worrying about
385 * memory and size allocation. Subscripting ( @c [] ) access is
386 * also provided as with C-style arrays.
387 */
388 template<typename _Tp, typename _Alloc = std::allocator<_Tp> >
389 class vector : protected _Vector_base<_Tp, _Alloc>
390 {
391#ifdef _GLIBCXX_CONCEPT_CHECKS
392 // Concept requirements.
393 typedef typename _Alloc::value_type _Alloc_value_type;
394# if __cplusplus201402L < 201103L
395 __glibcxx_class_requires(_Tp, _SGIAssignableConcept)
396# endif
397 __glibcxx_class_requires2(_Tp, _Alloc_value_type, _SameTypeConcept)
398#endif
399
400#if __cplusplus201402L >= 201103L
401 static_assert(is_same<typename remove_cv<_Tp>::type, _Tp>::value,
402 "std::vector must have a non-const, non-volatile value_type");
403# if __cplusplus201402L > 201703L || defined __STRICT_ANSI__1
404 static_assert(is_same<typename _Alloc::value_type, _Tp>::value,
405 "std::vector must have the same value_type as its allocator");
406# endif
407#endif
408
409 typedef _Vector_base<_Tp, _Alloc> _Base;
410 typedef typename _Base::_Tp_alloc_type _Tp_alloc_type;
411 typedef __gnu_cxx::__alloc_traits<_Tp_alloc_type> _Alloc_traits;
412
413 public:
414 typedef _Tp value_type;
415 typedef typename _Base::pointer pointer;
416 typedef typename _Alloc_traits::const_pointer const_pointer;
417 typedef typename _Alloc_traits::reference reference;
418 typedef typename _Alloc_traits::const_reference const_reference;
419 typedef __gnu_cxx::__normal_iterator<pointer, vector> iterator;
420 typedef __gnu_cxx::__normal_iterator<const_pointer, vector>
421 const_iterator;
422 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
423 typedef std::reverse_iterator<iterator> reverse_iterator;
424 typedef size_t size_type;
425 typedef ptrdiff_t difference_type;
426 typedef _Alloc allocator_type;
427
428 private:
429#if __cplusplus201402L >= 201103L
430 static constexpr bool
431 _S_nothrow_relocate(true_type)
432 {
433 return noexcept(std::__relocate_a(std::declval<pointer>(),
434 std::declval<pointer>(),
435 std::declval<pointer>(),
436 std::declval<_Tp_alloc_type&>()));
437 }
438
439 static constexpr bool
440 _S_nothrow_relocate(false_type)
441 { return false; }
442
443 static constexpr bool
444 _S_use_relocate()
445 {
446 // Instantiating std::__relocate_a might cause an error outside the
447 // immediate context (in __relocate_object_a's noexcept-specifier),
448 // so only do it if we know the type can be move-inserted into *this.
449 return _S_nothrow_relocate(__is_move_insertable<_Tp_alloc_type>{});
450 }
451
452 static pointer
453 _S_do_relocate(pointer __first, pointer __last, pointer __result,
454 _Tp_alloc_type& __alloc, true_type) noexcept
455 {
456 return std::__relocate_a(__first, __last, __result, __alloc);
457 }
458
459 static pointer
460 _S_do_relocate(pointer, pointer, pointer __result,
461 _Tp_alloc_type&, false_type) noexcept
462 { return __result; }
463
464 static pointer
465 _S_relocate(pointer __first, pointer __last, pointer __result,
466 _Tp_alloc_type& __alloc) noexcept
467 {
468 using __do_it = __bool_constant<_S_use_relocate()>;
469 return _S_do_relocate(__first, __last, __result, __alloc, __do_it{});
470 }
471#endif // C++11
472
473 protected:
474 using _Base::_M_allocate;
475 using _Base::_M_deallocate;
476 using _Base::_M_impl;
477 using _Base::_M_get_Tp_allocator;
478
479 public:
480 // [23.2.4.1] construct/copy/destroy
481 // (assign() and get_allocator() are also listed in this section)
482
483 /**
484 * @brief Creates a %vector with no elements.
485 */
486#if __cplusplus201402L >= 201103L
487 vector() = default;
488#else
489 vector() { }
490#endif
491
492 /**
493 * @brief Creates a %vector with no elements.
494 * @param __a An allocator object.
495 */
496 explicit
497 vector(const allocator_type& __a) _GLIBCXX_NOEXCEPTnoexcept
498 : _Base(__a) { }
499
500#if __cplusplus201402L >= 201103L
501 /**
502 * @brief Creates a %vector with default constructed elements.
503 * @param __n The number of elements to initially create.
504 * @param __a An allocator.
505 *
506 * This constructor fills the %vector with @a __n default
507 * constructed elements.
508 */
509 explicit
510 vector(size_type __n, const allocator_type& __a = allocator_type())
511 : _Base(_S_check_init_len(__n, __a), __a)
512 { _M_default_initialize(__n); }
513
514 /**
515 * @brief Creates a %vector with copies of an exemplar element.
516 * @param __n The number of elements to initially create.
517 * @param __value An element to copy.
518 * @param __a An allocator.
519 *
520 * This constructor fills the %vector with @a __n copies of @a __value.
521 */
522 vector(size_type __n, const value_type& __value,
523 const allocator_type& __a = allocator_type())
524 : _Base(_S_check_init_len(__n, __a), __a)
525 { _M_fill_initialize(__n, __value); }
526#else
527 /**
528 * @brief Creates a %vector with copies of an exemplar element.
529 * @param __n The number of elements to initially create.
530 * @param __value An element to copy.
531 * @param __a An allocator.
532 *
533 * This constructor fills the %vector with @a __n copies of @a __value.
534 */
535 explicit
536 vector(size_type __n, const value_type& __value = value_type(),
537 const allocator_type& __a = allocator_type())
538 : _Base(_S_check_init_len(__n, __a), __a)
539 { _M_fill_initialize(__n, __value); }
540#endif
541
542 /**
543 * @brief %Vector copy constructor.
544 * @param __x A %vector of identical element and allocator types.
545 *
546 * All the elements of @a __x are copied, but any unused capacity in
547 * @a __x will not be copied
548 * (i.e. capacity() == size() in the new %vector).
549 *
550 * The newly-created %vector uses a copy of the allocator object used
551 * by @a __x (unless the allocator traits dictate a different object).
552 */
553 vector(const vector& __x)
554 : _Base(__x.size(),
555 _Alloc_traits::_S_select_on_copy(__x._M_get_Tp_allocator()))
556 {
557 this->_M_impl._M_finish =
558 std::__uninitialized_copy_a(__x.begin(), __x.end(),
559 this->_M_impl._M_start,
560 _M_get_Tp_allocator());
561 }
562
563#if __cplusplus201402L >= 201103L
564 /**
565 * @brief %Vector move constructor.
566 *
567 * The newly-created %vector contains the exact contents of the
568 * moved instance.
569 * The contents of the moved instance are a valid, but unspecified
570 * %vector.
571 */
572 vector(vector&&) noexcept = default;
573
574 /// Copy constructor with alternative allocator
575 vector(const vector& __x, const allocator_type& __a)
576 : _Base(__x.size(), __a)
577 {
578 this->_M_impl._M_finish =
579 std::__uninitialized_copy_a(__x.begin(), __x.end(),
580 this->_M_impl._M_start,
581 _M_get_Tp_allocator());
582 }
583
584 private:
585 vector(vector&& __rv, const allocator_type& __m, true_type) noexcept
586 : _Base(__m, std::move(__rv))
587 { }
588
589 vector(vector&& __rv, const allocator_type& __m, false_type)
590 : _Base(__m)
591 {
592 if (__rv.get_allocator() == __m)
593 this->_M_impl._M_swap_data(__rv._M_impl);
594 else if (!__rv.empty())
595 {
596 this->_M_create_storage(__rv.size());
597 this->_M_impl._M_finish =
598 std::__uninitialized_move_a(__rv.begin(), __rv.end(),
599 this->_M_impl._M_start,
600 _M_get_Tp_allocator());
601 __rv.clear();
602 }
603 }
604
605 public:
606 /// Move constructor with alternative allocator
607 vector(vector&& __rv, const allocator_type& __m)
608 noexcept( noexcept(
609 vector(std::declval<vector&&>(), std::declval<const allocator_type&>(),
610 std::declval<typename _Alloc_traits::is_always_equal>())) )
611 : vector(std::move(__rv), __m, typename _Alloc_traits::is_always_equal{})
612 { }
613
614 /**
615 * @brief Builds a %vector from an initializer list.
616 * @param __l An initializer_list.
617 * @param __a An allocator.
618 *
619 * Create a %vector consisting of copies of the elements in the
620 * initializer_list @a __l.
621 *
622 * This will call the element type's copy constructor N times
623 * (where N is @a __l.size()) and do no memory reallocation.
624 */
625 vector(initializer_list<value_type> __l,
626 const allocator_type& __a = allocator_type())
627 : _Base(__a)
628 {
629 _M_range_initialize(__l.begin(), __l.end(),
630 random_access_iterator_tag());
631 }
632#endif
633
634 /**
635 * @brief Builds a %vector from a range.
636 * @param __first An input iterator.
637 * @param __last An input iterator.
638 * @param __a An allocator.
639 *
640 * Create a %vector consisting of copies of the elements from
641 * [first,last).
642 *
643 * If the iterators are forward, bidirectional, or
644 * random-access, then this will call the elements' copy
645 * constructor N times (where N is distance(first,last)) and do
646 * no memory reallocation. But if only input iterators are
647 * used, then this will do at most 2N calls to the copy
648 * constructor, and logN memory reallocations.
649 */
650#if __cplusplus201402L >= 201103L
651 template<typename _InputIterator,
652 typename = std::_RequireInputIter<_InputIterator>>
653 vector(_InputIterator __first, _InputIterator __last,
654 const allocator_type& __a = allocator_type())
655 : _Base(__a)
656 {
657 _M_range_initialize(__first, __last,
658 std::__iterator_category(__first));
659 }
660#else
661 template<typename _InputIterator>
662 vector(_InputIterator __first, _InputIterator __last,
663 const allocator_type& __a = allocator_type())
664 : _Base(__a)
665 {
666 // Check whether it's an integral type. If so, it's not an iterator.
667 typedef typename std::__is_integer<_InputIterator>::__type _Integral;
668 _M_initialize_dispatch(__first, __last, _Integral());
669 }
670#endif
671
672 /**
673 * The dtor only erases the elements, and note that if the
674 * elements themselves are pointers, the pointed-to memory is
675 * not touched in any way. Managing the pointer is the user's
676 * responsibility.
677 */
678 ~vector() _GLIBCXX_NOEXCEPTnoexcept
679 {
680 std::_Destroy(this->_M_impl._M_start, this->_M_impl._M_finish,
681 _M_get_Tp_allocator());
682 _GLIBCXX_ASAN_ANNOTATE_BEFORE_DEALLOC;
683 }
684
685 /**
686 * @brief %Vector assignment operator.
687 * @param __x A %vector of identical element and allocator types.
688 *
689 * All the elements of @a __x are copied, but any unused capacity in
690 * @a __x will not be copied.
691 *
692 * Whether the allocator is copied depends on the allocator traits.
693 */
694 vector&
695 operator=(const vector& __x);
696
697#if __cplusplus201402L >= 201103L
698 /**
699 * @brief %Vector move assignment operator.
700 * @param __x A %vector of identical element and allocator types.
701 *
702 * The contents of @a __x are moved into this %vector (without copying,
703 * if the allocators permit it).
704 * Afterwards @a __x is a valid, but unspecified %vector.
705 *
706 * Whether the allocator is moved depends on the allocator traits.
707 */
708 vector&
709 operator=(vector&& __x) noexcept(_Alloc_traits::_S_nothrow_move())
710 {
711 constexpr bool __move_storage =
712 _Alloc_traits::_S_propagate_on_move_assign()
713 || _Alloc_traits::_S_always_equal();
714 _M_move_assign(std::move(__x), __bool_constant<__move_storage>());
715 return *this;
716 }
717
718 /**
719 * @brief %Vector list assignment operator.
720 * @param __l An initializer_list.
721 *
722 * This function fills a %vector with copies of the elements in the
723 * initializer list @a __l.
724 *
725 * Note that the assignment completely changes the %vector and
726 * that the resulting %vector's size is the same as the number
727 * of elements assigned.
728 */
729 vector&
730 operator=(initializer_list<value_type> __l)
731 {
732 this->_M_assign_aux(__l.begin(), __l.end(),
733 random_access_iterator_tag());
734 return *this;
735 }
736#endif
737
738 /**
739 * @brief Assigns a given value to a %vector.
740 * @param __n Number of elements to be assigned.
741 * @param __val Value to be assigned.
742 *
743 * This function fills a %vector with @a __n copies of the given
744 * value. Note that the assignment completely changes the
745 * %vector and that the resulting %vector's size is the same as
746 * the number of elements assigned.
747 */
748 void
749 assign(size_type __n, const value_type& __val)
750 { _M_fill_assign(__n, __val); }
751
752 /**
753 * @brief Assigns a range to a %vector.
754 * @param __first An input iterator.
755 * @param __last An input iterator.
756 *
757 * This function fills a %vector with copies of the elements in the
758 * range [__first,__last).
759 *
760 * Note that the assignment completely changes the %vector and
761 * that the resulting %vector's size is the same as the number
762 * of elements assigned.
763 */
764#if __cplusplus201402L >= 201103L
765 template<typename _InputIterator,
766 typename = std::_RequireInputIter<_InputIterator>>
767 void
768 assign(_InputIterator __first, _InputIterator __last)
769 { _M_assign_dispatch(__first, __last, __false_type()); }
770#else
771 template<typename _InputIterator>
772 void
773 assign(_InputIterator __first, _InputIterator __last)
774 {
775 // Check whether it's an integral type. If so, it's not an iterator.
776 typedef typename std::__is_integer<_InputIterator>::__type _Integral;
777 _M_assign_dispatch(__first, __last, _Integral());
778 }
779#endif
780
781#if __cplusplus201402L >= 201103L
782 /**
783 * @brief Assigns an initializer list to a %vector.
784 * @param __l An initializer_list.
785 *
786 * This function fills a %vector with copies of the elements in the
787 * initializer list @a __l.
788 *
789 * Note that the assignment completely changes the %vector and
790 * that the resulting %vector's size is the same as the number
791 * of elements assigned.
792 */
793 void
794 assign(initializer_list<value_type> __l)
795 {
796 this->_M_assign_aux(__l.begin(), __l.end(),
797 random_access_iterator_tag());
798 }
799#endif
800
801 /// Get a copy of the memory allocation object.
802 using _Base::get_allocator;
803
804 // iterators
805 /**
806 * Returns a read/write iterator that points to the first
807 * element in the %vector. Iteration is done in ordinary
808 * element order.
809 */
810 iterator
811 begin() _GLIBCXX_NOEXCEPTnoexcept
812 { return iterator(this->_M_impl._M_start); }
813
814 /**
815 * Returns a read-only (constant) iterator that points to the
816 * first element in the %vector. Iteration is done in ordinary
817 * element order.
818 */
819 const_iterator
820 begin() const _GLIBCXX_NOEXCEPTnoexcept
821 { return const_iterator(this->_M_impl._M_start); }
822
823 /**
824 * Returns a read/write iterator that points one past the last
825 * element in the %vector. Iteration is done in ordinary
826 * element order.
827 */
828 iterator
829 end() _GLIBCXX_NOEXCEPTnoexcept
830 { return iterator(this->_M_impl._M_finish); }
831
832 /**
833 * Returns a read-only (constant) iterator that points one past
834 * the last element in the %vector. Iteration is done in
835 * ordinary element order.
836 */
837 const_iterator
838 end() const _GLIBCXX_NOEXCEPTnoexcept
839 { return const_iterator(this->_M_impl._M_finish); }
840
841 /**
842 * Returns a read/write reverse iterator that points to the
843 * last element in the %vector. Iteration is done in reverse
844 * element order.
845 */
846 reverse_iterator
847 rbegin() _GLIBCXX_NOEXCEPTnoexcept
848 { return reverse_iterator(end()); }
849
850 /**
851 * Returns a read-only (constant) reverse iterator that points
852 * to the last element in the %vector. Iteration is done in
853 * reverse element order.
854 */
855 const_reverse_iterator
856 rbegin() const _GLIBCXX_NOEXCEPTnoexcept
857 { return const_reverse_iterator(end()); }
858
859 /**
860 * Returns a read/write reverse iterator that points to one
861 * before the first element in the %vector. Iteration is done
862 * in reverse element order.
863 */
864 reverse_iterator
865 rend() _GLIBCXX_NOEXCEPTnoexcept
866 { return reverse_iterator(begin()); }
867
868 /**
869 * Returns a read-only (constant) reverse iterator that points
870 * to one before the first element in the %vector. Iteration
871 * is done in reverse element order.
872 */
873 const_reverse_iterator
874 rend() const _GLIBCXX_NOEXCEPTnoexcept
875 { return const_reverse_iterator(begin()); }
876
877#if __cplusplus201402L >= 201103L
878 /**
879 * Returns a read-only (constant) iterator that points to the
880 * first element in the %vector. Iteration is done in ordinary
881 * element order.
882 */
883 const_iterator
884 cbegin() const noexcept
885 { return const_iterator(this->_M_impl._M_start); }
886
887 /**
888 * Returns a read-only (constant) iterator that points one past
889 * the last element in the %vector. Iteration is done in
890 * ordinary element order.
891 */
892 const_iterator
893 cend() const noexcept
894 { return const_iterator(this->_M_impl._M_finish); }
895
896 /**
897 * Returns a read-only (constant) reverse iterator that points
898 * to the last element in the %vector. Iteration is done in
899 * reverse element order.
900 */
901 const_reverse_iterator
902 crbegin() const noexcept
903 { return const_reverse_iterator(end()); }
904
905 /**
906 * Returns a read-only (constant) reverse iterator that points
907 * to one before the first element in the %vector. Iteration
908 * is done in reverse element order.
909 */
910 const_reverse_iterator
911 crend() const noexcept
912 { return const_reverse_iterator(begin()); }
913#endif
914
915 // [23.2.4.2] capacity
916 /** Returns the number of elements in the %vector. */
917 size_type
918 size() const _GLIBCXX_NOEXCEPTnoexcept
919 { return size_type(this->_M_impl._M_finish - this->_M_impl._M_start); }
920
921 /** Returns the size() of the largest possible %vector. */
922 size_type
923 max_size() const _GLIBCXX_NOEXCEPTnoexcept
924 { return _S_max_size(_M_get_Tp_allocator()); }
925
926#if __cplusplus201402L >= 201103L
927 /**
928 * @brief Resizes the %vector to the specified number of elements.
929 * @param __new_size Number of elements the %vector should contain.
930 *
931 * This function will %resize the %vector to the specified
932 * number of elements. If the number is smaller than the
933 * %vector's current size the %vector is truncated, otherwise
934 * default constructed elements are appended.
935 */
936 void
937 resize(size_type __new_size)
938 {
939 if (__new_size > size())
940 _M_default_append(__new_size - size());
941 else if (__new_size < size())
942 _M_erase_at_end(this->_M_impl._M_start + __new_size);
943 }
944
945 /**
946 * @brief Resizes the %vector to the specified number of elements.
947 * @param __new_size Number of elements the %vector should contain.
948 * @param __x Data with which new elements should be populated.
949 *
950 * This function will %resize the %vector to the specified
951 * number of elements. If the number is smaller than the
952 * %vector's current size the %vector is truncated, otherwise
953 * the %vector is extended and new elements are populated with
954 * given data.
955 */
956 void
957 resize(size_type __new_size, const value_type& __x)
958 {
959 if (__new_size > size())
960 _M_fill_insert(end(), __new_size - size(), __x);
961 else if (__new_size < size())
962 _M_erase_at_end(this->_M_impl._M_start + __new_size);
963 }
964#else
965 /**
966 * @brief Resizes the %vector to the specified number of elements.
967 * @param __new_size Number of elements the %vector should contain.
968 * @param __x Data with which new elements should be populated.
969 *
970 * This function will %resize the %vector to the specified
971 * number of elements. If the number is smaller than the
972 * %vector's current size the %vector is truncated, otherwise
973 * the %vector is extended and new elements are populated with
974 * given data.
975 */
976 void
977 resize(size_type __new_size, value_type __x = value_type())
978 {
979 if (__new_size > size())
980 _M_fill_insert(end(), __new_size - size(), __x);
981 else if (__new_size < size())
982 _M_erase_at_end(this->_M_impl._M_start + __new_size);
983 }
984#endif
985
986#if __cplusplus201402L >= 201103L
987 /** A non-binding request to reduce capacity() to size(). */
988 void
989 shrink_to_fit()
990 { _M_shrink_to_fit(); }
991#endif
992
993 /**
994 * Returns the total number of elements that the %vector can
995 * hold before needing to allocate more memory.
996 */
997 size_type
998 capacity() const _GLIBCXX_NOEXCEPTnoexcept
999 { return size_type(this->_M_impl._M_end_of_storage
1000 - this->_M_impl._M_start); }
1001
1002 /**
1003 * Returns true if the %vector is empty. (Thus begin() would
1004 * equal end().)
1005 */
1006 _GLIBCXX_NODISCARD bool
1007 empty() const _GLIBCXX_NOEXCEPTnoexcept
1008 { return begin() == end(); }
1009
1010 /**
1011 * @brief Attempt to preallocate enough memory for specified number of
1012 * elements.
1013 * @param __n Number of elements required.
1014 * @throw std::length_error If @a n exceeds @c max_size().
1015 *
1016 * This function attempts to reserve enough memory for the
1017 * %vector to hold the specified number of elements. If the
1018 * number requested is more than max_size(), length_error is
1019 * thrown.
1020 *
1021 * The advantage of this function is that if optimal code is a
1022 * necessity and the user can determine the number of elements
1023 * that will be required, the user can reserve the memory in
1024 * %advance, and thus prevent a possible reallocation of memory
1025 * and copying of %vector data.
1026 */
1027 void
1028 reserve(size_type __n);
1029
1030 // element access
1031 /**
1032 * @brief Subscript access to the data contained in the %vector.
1033 * @param __n The index of the element for which data should be
1034 * accessed.
1035 * @return Read/write reference to data.
1036 *
1037 * This operator allows for easy, array-style, data access.
1038 * Note that data access with this operator is unchecked and
1039 * out_of_range lookups are not defined. (For checked lookups
1040 * see at().)
1041 */
1042 reference
1043 operator[](size_type __n) _GLIBCXX_NOEXCEPTnoexcept
1044 {
1045 __glibcxx_requires_subscript(__n);
1046 return *(this->_M_impl._M_start + __n);
1047 }
1048
1049 /**
1050 * @brief Subscript access to the data contained in the %vector.
1051 * @param __n The index of the element for which data should be
1052 * accessed.
1053 * @return Read-only (constant) reference to data.
1054 *
1055 * This operator allows for easy, array-style, data access.
1056 * Note that data access with this operator is unchecked and
1057 * out_of_range lookups are not defined. (For checked lookups
1058 * see at().)
1059 */
1060 const_reference
1061 operator[](size_type __n) const _GLIBCXX_NOEXCEPTnoexcept
1062 {
1063 __glibcxx_requires_subscript(__n);
1064 return *(this->_M_impl._M_start + __n);
1065 }
1066
1067 protected:
1068 /// Safety check used only from at().
1069 void
1070 _M_range_check(size_type __n) const
1071 {
1072 if (__n >= this->size())
1073 __throw_out_of_range_fmt(__N("vector::_M_range_check: __n "("vector::_M_range_check: __n " "(which is %zu) >= this->size() "
"(which is %zu)")
1074 "(which is %zu) >= this->size() "("vector::_M_range_check: __n " "(which is %zu) >= this->size() "
"(which is %zu)")
1075 "(which is %zu)")("vector::_M_range_check: __n " "(which is %zu) >= this->size() "
"(which is %zu)")
,
1076 __n, this->size());
1077 }
1078
1079 public:
1080 /**
1081 * @brief Provides access to the data contained in the %vector.
1082 * @param __n The index of the element for which data should be
1083 * accessed.
1084 * @return Read/write reference to data.
1085 * @throw std::out_of_range If @a __n is an invalid index.
1086 *
1087 * This function provides for safer data access. The parameter
1088 * is first checked that it is in the range of the vector. The
1089 * function throws out_of_range if the check fails.
1090 */
1091 reference
1092 at(size_type __n)
1093 {
1094 _M_range_check(__n);
1095 return (*this)[__n];
1096 }
1097
1098 /**
1099 * @brief Provides access to the data contained in the %vector.
1100 * @param __n The index of the element for which data should be
1101 * accessed.
1102 * @return Read-only (constant) reference to data.
1103 * @throw std::out_of_range If @a __n is an invalid index.
1104 *
1105 * This function provides for safer data access. The parameter
1106 * is first checked that it is in the range of the vector. The
1107 * function throws out_of_range if the check fails.
1108 */
1109 const_reference
1110 at(size_type __n) const
1111 {
1112 _M_range_check(__n);
1113 return (*this)[__n];
1114 }
1115
1116 /**
1117 * Returns a read/write reference to the data at the first
1118 * element of the %vector.
1119 */
1120 reference
1121 front() _GLIBCXX_NOEXCEPTnoexcept
1122 {
1123 __glibcxx_requires_nonempty();
1124 return *begin();
1125 }
1126
1127 /**
1128 * Returns a read-only (constant) reference to the data at the first
1129 * element of the %vector.
1130 */
1131 const_reference
1132 front() const _GLIBCXX_NOEXCEPTnoexcept
1133 {
1134 __glibcxx_requires_nonempty();
1135 return *begin();
1136 }
1137
1138 /**
1139 * Returns a read/write reference to the data at the last
1140 * element of the %vector.
1141 */
1142 reference
1143 back() _GLIBCXX_NOEXCEPTnoexcept
1144 {
1145 __glibcxx_requires_nonempty();
1146 return *(end() - 1);
1147 }
1148
1149 /**
1150 * Returns a read-only (constant) reference to the data at the
1151 * last element of the %vector.
1152 */
1153 const_reference
1154 back() const _GLIBCXX_NOEXCEPTnoexcept
1155 {
1156 __glibcxx_requires_nonempty();
1157 return *(end() - 1);
1158 }
1159
1160 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1161 // DR 464. Suggestion for new member functions in standard containers.
1162 // data access
1163 /**
1164 * Returns a pointer such that [data(), data() + size()) is a valid
1165 * range. For a non-empty %vector, data() == &front().
1166 */
1167 _Tp*
1168 data() _GLIBCXX_NOEXCEPTnoexcept
1169 { return _M_data_ptr(this->_M_impl._M_start); }
1170
1171 const _Tp*
1172 data() const _GLIBCXX_NOEXCEPTnoexcept
1173 { return _M_data_ptr(this->_M_impl._M_start); }
1174
1175 // [23.2.4.3] modifiers
1176 /**
1177 * @brief Add data to the end of the %vector.
1178 * @param __x Data to be added.
1179 *
1180 * This is a typical stack operation. The function creates an
1181 * element at the end of the %vector and assigns the given data
1182 * to it. Due to the nature of a %vector this operation can be
1183 * done in constant time if the %vector has preallocated space
1184 * available.
1185 */
1186 void
1187 push_back(const value_type& __x)
1188 {
1189 if (this->_M_impl._M_finish != this->_M_impl._M_end_of_storage)
1190 {
1191 _GLIBCXX_ASAN_ANNOTATE_GROW(1);
1192 _Alloc_traits::construct(this->_M_impl, this->_M_impl._M_finish,
1193 __x);
1194 ++this->_M_impl._M_finish;
1195 _GLIBCXX_ASAN_ANNOTATE_GREW(1);
1196 }
1197 else
1198 _M_realloc_insert(end(), __x);
1199 }
1200
1201#if __cplusplus201402L >= 201103L
1202 void
1203 push_back(value_type&& __x)
1204 { emplace_back(std::move(__x)); }
1205
1206 template<typename... _Args>
1207#if __cplusplus201402L > 201402L
1208 reference
1209#else
1210 void
1211#endif
1212 emplace_back(_Args&&... __args);
1213#endif
1214
1215 /**
1216 * @brief Removes last element.
1217 *
1218 * This is a typical stack operation. It shrinks the %vector by one.
1219 *
1220 * Note that no data is returned, and if the last element's
1221 * data is needed, it should be retrieved before pop_back() is
1222 * called.
1223 */
1224 void
1225 pop_back() _GLIBCXX_NOEXCEPTnoexcept
1226 {
1227 __glibcxx_requires_nonempty();
1228 --this->_M_impl._M_finish;
1229 _Alloc_traits::destroy(this->_M_impl, this->_M_impl._M_finish);
1230 _GLIBCXX_ASAN_ANNOTATE_SHRINK(1);
1231 }
1232
1233#if __cplusplus201402L >= 201103L
1234 /**
1235 * @brief Inserts an object in %vector before specified iterator.
1236 * @param __position A const_iterator into the %vector.
1237 * @param __args Arguments.
1238 * @return An iterator that points to the inserted data.
1239 *
1240 * This function will insert an object of type T constructed
1241 * with T(std::forward<Args>(args)...) before the specified location.
1242 * Note that this kind of operation could be expensive for a %vector
1243 * and if it is frequently used the user should consider using
1244 * std::list.
1245 */
1246 template<typename... _Args>
1247 iterator
1248 emplace(const_iterator __position, _Args&&... __args)
1249 { return _M_emplace_aux(__position, std::forward<_Args>(__args)...); }
1250
1251 /**
1252 * @brief Inserts given value into %vector before specified iterator.
1253 * @param __position A const_iterator into the %vector.
1254 * @param __x Data to be inserted.
1255 * @return An iterator that points to the inserted data.
1256 *
1257 * This function will insert a copy of the given value before
1258 * the specified location. Note that this kind of operation
1259 * could be expensive for a %vector and if it is frequently
1260 * used the user should consider using std::list.
1261 */
1262 iterator
1263 insert(const_iterator __position, const value_type& __x);
1264#else
1265 /**
1266 * @brief Inserts given value into %vector before specified iterator.
1267 * @param __position An iterator into the %vector.
1268 * @param __x Data to be inserted.
1269 * @return An iterator that points to the inserted data.
1270 *
1271 * This function will insert a copy of the given value before
1272 * the specified location. Note that this kind of operation
1273 * could be expensive for a %vector and if it is frequently
1274 * used the user should consider using std::list.
1275 */
1276 iterator
1277 insert(iterator __position, const value_type& __x);
1278#endif
1279
1280#if __cplusplus201402L >= 201103L
1281 /**
1282 * @brief Inserts given rvalue into %vector before specified iterator.
1283 * @param __position A const_iterator into the %vector.
1284 * @param __x Data to be inserted.
1285 * @return An iterator that points to the inserted data.
1286 *
1287 * This function will insert a copy of the given rvalue before
1288 * the specified location. Note that this kind of operation
1289 * could be expensive for a %vector and if it is frequently
1290 * used the user should consider using std::list.
1291 */
1292 iterator
1293 insert(const_iterator __position, value_type&& __x)
1294 { return _M_insert_rval(__position, std::move(__x)); }
1295
1296 /**
1297 * @brief Inserts an initializer_list into the %vector.
1298 * @param __position An iterator into the %vector.
1299 * @param __l An initializer_list.
1300 *
1301 * This function will insert copies of the data in the
1302 * initializer_list @a l into the %vector before the location
1303 * specified by @a position.
1304 *
1305 * Note that this kind of operation could be expensive for a
1306 * %vector and if it is frequently used the user should
1307 * consider using std::list.
1308 */
1309 iterator
1310 insert(const_iterator __position, initializer_list<value_type> __l)
1311 {
1312 auto __offset = __position - cbegin();
1313 _M_range_insert(begin() + __offset, __l.begin(), __l.end(),
1314 std::random_access_iterator_tag());
1315 return begin() + __offset;
1316 }
1317#endif
1318
1319#if __cplusplus201402L >= 201103L
1320 /**
1321 * @brief Inserts a number of copies of given data into the %vector.
1322 * @param __position A const_iterator into the %vector.
1323 * @param __n Number of elements to be inserted.
1324 * @param __x Data to be inserted.
1325 * @return An iterator that points to the inserted data.
1326 *
1327 * This function will insert a specified number of copies of
1328 * the given data before the location specified by @a position.
1329 *
1330 * Note that this kind of operation could be expensive for a
1331 * %vector and if it is frequently used the user should
1332 * consider using std::list.
1333 */
1334 iterator
1335 insert(const_iterator __position, size_type __n, const value_type& __x)
1336 {
1337 difference_type __offset = __position - cbegin();
1338 _M_fill_insert(begin() + __offset, __n, __x);
1339 return begin() + __offset;
1340 }
1341#else
1342 /**
1343 * @brief Inserts a number of copies of given data into the %vector.
1344 * @param __position An iterator into the %vector.
1345 * @param __n Number of elements to be inserted.
1346 * @param __x Data to be inserted.
1347 *
1348 * This function will insert a specified number of copies of
1349 * the given data before the location specified by @a position.
1350 *
1351 * Note that this kind of operation could be expensive for a
1352 * %vector and if it is frequently used the user should
1353 * consider using std::list.
1354 */
1355 void
1356 insert(iterator __position, size_type __n, const value_type& __x)
1357 { _M_fill_insert(__position, __n, __x); }
1358#endif
1359
1360#if __cplusplus201402L >= 201103L
1361 /**
1362 * @brief Inserts a range into the %vector.
1363 * @param __position A const_iterator into the %vector.
1364 * @param __first An input iterator.
1365 * @param __last An input iterator.
1366 * @return An iterator that points to the inserted data.
1367 *
1368 * This function will insert copies of the data in the range
1369 * [__first,__last) into the %vector before the location specified
1370 * by @a pos.
1371 *
1372 * Note that this kind of operation could be expensive for a
1373 * %vector and if it is frequently used the user should
1374 * consider using std::list.
1375 */
1376 template<typename _InputIterator,
1377 typename = std::_RequireInputIter<_InputIterator>>
1378 iterator
1379 insert(const_iterator __position, _InputIterator __first,
1380 _InputIterator __last)
1381 {
1382 difference_type __offset = __position - cbegin();
1383 _M_insert_dispatch(begin() + __offset,
1384 __first, __last, __false_type());
1385 return begin() + __offset;
1386 }
1387#else
1388 /**
1389 * @brief Inserts a range into the %vector.
1390 * @param __position An iterator into the %vector.
1391 * @param __first An input iterator.
1392 * @param __last An input iterator.
1393 *
1394 * This function will insert copies of the data in the range
1395 * [__first,__last) into the %vector before the location specified
1396 * by @a pos.
1397 *
1398 * Note that this kind of operation could be expensive for a
1399 * %vector and if it is frequently used the user should
1400 * consider using std::list.
1401 */
1402 template<typename _InputIterator>
1403 void
1404 insert(iterator __position, _InputIterator __first,
1405 _InputIterator __last)
1406 {
1407 // Check whether it's an integral type. If so, it's not an iterator.
1408 typedef typename std::__is_integer<_InputIterator>::__type _Integral;
1409 _M_insert_dispatch(__position, __first, __last, _Integral());
1410 }
1411#endif
1412
1413 /**
1414 * @brief Remove element at given position.
1415 * @param __position Iterator pointing to element to be erased.
1416 * @return An iterator pointing to the next element (or end()).
1417 *
1418 * This function will erase the element at the given position and thus
1419 * shorten the %vector by one.
1420 *
1421 * Note This operation could be expensive and if it is
1422 * frequently used the user should consider using std::list.
1423 * The user is also cautioned that this function only erases
1424 * the element, and that if the element is itself a pointer,
1425 * the pointed-to memory is not touched in any way. Managing
1426 * the pointer is the user's responsibility.
1427 */
1428 iterator
1429#if __cplusplus201402L >= 201103L
1430 erase(const_iterator __position)
1431 { return _M_erase(begin() + (__position - cbegin())); }
1432#else
1433 erase(iterator __position)
1434 { return _M_erase(__position); }
1435#endif
1436
1437 /**
1438 * @brief Remove a range of elements.
1439 * @param __first Iterator pointing to the first element to be erased.
1440 * @param __last Iterator pointing to one past the last element to be
1441 * erased.
1442 * @return An iterator pointing to the element pointed to by @a __last
1443 * prior to erasing (or end()).
1444 *
1445 * This function will erase the elements in the range
1446 * [__first,__last) and shorten the %vector accordingly.
1447 *
1448 * Note This operation could be expensive and if it is
1449 * frequently used the user should consider using std::list.
1450 * The user is also cautioned that this function only erases
1451 * the elements, and that if the elements themselves are
1452 * pointers, the pointed-to memory is not touched in any way.
1453 * Managing the pointer is the user's responsibility.
1454 */
1455 iterator
1456#if __cplusplus201402L >= 201103L
1457 erase(const_iterator __first, const_iterator __last)
1458 {
1459 const auto __beg = begin();
1460 const auto __cbeg = cbegin();
1461 return _M_erase(__beg + (__first - __cbeg), __beg + (__last - __cbeg));
1462 }
1463#else
1464 erase(iterator __first, iterator __last)
1465 { return _M_erase(__first, __last); }
1466#endif
1467
1468 /**
1469 * @brief Swaps data with another %vector.
1470 * @param __x A %vector of the same element and allocator types.
1471 *
1472 * This exchanges the elements between two vectors in constant time.
1473 * (Three pointers, so it should be quite fast.)
1474 * Note that the global std::swap() function is specialized such that
1475 * std::swap(v1,v2) will feed to this function.
1476 *
1477 * Whether the allocators are swapped depends on the allocator traits.
1478 */
1479 void
1480 swap(vector& __x) _GLIBCXX_NOEXCEPTnoexcept
1481 {
1482#if __cplusplus201402L >= 201103L
1483 __glibcxx_assert(_Alloc_traits::propagate_on_container_swap::value
1484 || _M_get_Tp_allocator() == __x._M_get_Tp_allocator());
1485#endif
1486 this->_M_impl._M_swap_data(__x._M_impl);
1487 _Alloc_traits::_S_on_swap(_M_get_Tp_allocator(),
1488 __x._M_get_Tp_allocator());
1489 }
1490
1491 /**
1492 * Erases all the elements. Note that this function only erases the
1493 * elements, and that if the elements themselves are pointers, the
1494 * pointed-to memory is not touched in any way. Managing the pointer is
1495 * the user's responsibility.
1496 */
1497 void
1498 clear() _GLIBCXX_NOEXCEPTnoexcept
1499 { _M_erase_at_end(this->_M_impl._M_start); }
1500
1501 protected:
1502 /**
1503 * Memory expansion handler. Uses the member allocation function to
1504 * obtain @a n bytes of memory, and then copies [first,last) into it.
1505 */
1506 template<typename _ForwardIterator>
1507 pointer
1508 _M_allocate_and_copy(size_type __n,
1509 _ForwardIterator __first, _ForwardIterator __last)
1510 {
1511 pointer __result = this->_M_allocate(__n);
1512 __tryif (true)
1513 {
1514 std::__uninitialized_copy_a(__first, __last, __result,
1515 _M_get_Tp_allocator());
1516 return __result;
1517 }
1518 __catch(...)if (false)
1519 {
1520 _M_deallocate(__result, __n);
1521 __throw_exception_again;
1522 }
1523 }
1524
1525
1526 // Internal constructor functions follow.
1527
1528 // Called by the range constructor to implement [23.1.1]/9
1529
1530#if __cplusplus201402L < 201103L
1531 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1532 // 438. Ambiguity in the "do the right thing" clause
1533 template<typename _Integer>
1534 void
1535 _M_initialize_dispatch(_Integer __n, _Integer __value, __true_type)
1536 {
1537 this->_M_impl._M_start = _M_allocate(_S_check_init_len(
1538 static_cast<size_type>(__n), _M_get_Tp_allocator()));
1539 this->_M_impl._M_end_of_storage =
1540 this->_M_impl._M_start + static_cast<size_type>(__n);
1541 _M_fill_initialize(static_cast<size_type>(__n), __value);
1542 }
1543
1544 // Called by the range constructor to implement [23.1.1]/9
1545 template<typename _InputIterator>
1546 void
1547 _M_initialize_dispatch(_InputIterator __first, _InputIterator __last,
1548 __false_type)
1549 {
1550 _M_range_initialize(__first, __last,
1551 std::__iterator_category(__first));
1552 }
1553#endif
1554
1555 // Called by the second initialize_dispatch above
1556 template<typename _InputIterator>
1557 void
1558 _M_range_initialize(_InputIterator __first, _InputIterator __last,
1559 std::input_iterator_tag)
1560 {
1561 __tryif (true) {
1562 for (; __first != __last; ++__first)
1563#if __cplusplus201402L >= 201103L
1564 emplace_back(*__first);
1565#else
1566 push_back(*__first);
1567#endif
1568 } __catch(...)if (false) {
1569 clear();
1570 __throw_exception_again;
1571 }
1572 }
1573
1574 // Called by the second initialize_dispatch above
1575 template<typename _ForwardIterator>
1576 void
1577 _M_range_initialize(_ForwardIterator __first, _ForwardIterator __last,
1578 std::forward_iterator_tag)
1579 {
1580 const size_type __n = std::distance(__first, __last);
1581 this->_M_impl._M_start
1582 = this->_M_allocate(_S_check_init_len(__n, _M_get_Tp_allocator()));
1583 this->_M_impl._M_end_of_storage = this->_M_impl._M_start + __n;
1584 this->_M_impl._M_finish =
1585 std::__uninitialized_copy_a(__first, __last,
1586 this->_M_impl._M_start,
1587 _M_get_Tp_allocator());
1588 }
1589
1590 // Called by the first initialize_dispatch above and by the
1591 // vector(n,value,a) constructor.
1592 void
1593 _M_fill_initialize(size_type __n, const value_type& __value)
1594 {
1595 this->_M_impl._M_finish =
1596 std::__uninitialized_fill_n_a(this->_M_impl._M_start, __n, __value,
1597 _M_get_Tp_allocator());
1598 }
1599
1600#if __cplusplus201402L >= 201103L
1601 // Called by the vector(n) constructor.
1602 void
1603 _M_default_initialize(size_type __n)
1604 {
1605 this->_M_impl._M_finish =
1606 std::__uninitialized_default_n_a(this->_M_impl._M_start, __n,
1607 _M_get_Tp_allocator());
1608 }
1609#endif
1610
1611 // Internal assign functions follow. The *_aux functions do the actual
1612 // assignment work for the range versions.
1613
1614 // Called by the range assign to implement [23.1.1]/9
1615
1616 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1617 // 438. Ambiguity in the "do the right thing" clause
1618 template<typename _Integer>
1619 void
1620 _M_assign_dispatch(_Integer __n, _Integer __val, __true_type)
1621 { _M_fill_assign(__n, __val); }
1622
1623 // Called by the range assign to implement [23.1.1]/9
1624 template<typename _InputIterator>
1625 void
1626 _M_assign_dispatch(_InputIterator __first, _InputIterator __last,
1627 __false_type)
1628 { _M_assign_aux(__first, __last, std::__iterator_category(__first)); }
1629
1630 // Called by the second assign_dispatch above
1631 template<typename _InputIterator>
1632 void
1633 _M_assign_aux(_InputIterator __first, _InputIterator __last,
1634 std::input_iterator_tag);
1635
1636 // Called by the second assign_dispatch above
1637 template<typename _ForwardIterator>
1638 void
1639 _M_assign_aux(_ForwardIterator __first, _ForwardIterator __last,
1640 std::forward_iterator_tag);
1641
1642 // Called by assign(n,t), and the range assign when it turns out
1643 // to be the same thing.
1644 void
1645 _M_fill_assign(size_type __n, const value_type& __val);
1646
1647 // Internal insert functions follow.
1648
1649 // Called by the range insert to implement [23.1.1]/9
1650
1651 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1652 // 438. Ambiguity in the "do the right thing" clause
1653 template<typename _Integer>
1654 void
1655 _M_insert_dispatch(iterator __pos, _Integer __n, _Integer __val,
1656 __true_type)
1657 { _M_fill_insert(__pos, __n, __val); }
1658
1659 // Called by the range insert to implement [23.1.1]/9
1660 template<typename _InputIterator>
1661 void
1662 _M_insert_dispatch(iterator __pos, _InputIterator __first,
1663 _InputIterator __last, __false_type)
1664 {
1665 _M_range_insert(__pos, __first, __last,
1666 std::__iterator_category(__first));
1667 }
1668
1669 // Called by the second insert_dispatch above
1670 template<typename _InputIterator>
1671 void
1672 _M_range_insert(iterator __pos, _InputIterator __first,
1673 _InputIterator __last, std::input_iterator_tag);
1674
1675 // Called by the second insert_dispatch above
1676 template<typename _ForwardIterator>
1677 void
1678 _M_range_insert(iterator __pos, _ForwardIterator __first,
1679 _ForwardIterator __last, std::forward_iterator_tag);
1680
1681 // Called by insert(p,n,x), and the range insert when it turns out to be
1682 // the same thing.
1683 void
1684 _M_fill_insert(iterator __pos, size_type __n, const value_type& __x);
1685
1686#if __cplusplus201402L >= 201103L
1687 // Called by resize(n).
1688 void
1689 _M_default_append(size_type __n);
1690
1691 bool
1692 _M_shrink_to_fit();
1693#endif
1694
1695#if __cplusplus201402L < 201103L
1696 // Called by insert(p,x)
1697 void
1698 _M_insert_aux(iterator __position, const value_type& __x);
1699
1700 void
1701 _M_realloc_insert(iterator __position, const value_type& __x);
1702#else
1703 // A value_type object constructed with _Alloc_traits::construct()
1704 // and destroyed with _Alloc_traits::destroy().
1705 struct _Temporary_value
1706 {
1707 template<typename... _Args>
1708 explicit
1709 _Temporary_value(vector* __vec, _Args&&... __args) : _M_this(__vec)
1710 {
1711 _Alloc_traits::construct(_M_this->_M_impl, _M_ptr(),
1712 std::forward<_Args>(__args)...);
1713 }
1714
1715 ~_Temporary_value()
1716 { _Alloc_traits::destroy(_M_this->_M_impl, _M_ptr()); }
1717
1718 value_type&
1719 _M_val() { return *_M_ptr(); }
1720
1721 private:
1722 _Tp*
1723 _M_ptr() { return reinterpret_cast<_Tp*>(&__buf); }
1724
1725 vector* _M_this;
1726 typename aligned_storage<sizeof(_Tp), alignof(_Tp)>::type __buf;
1727 };
1728
1729 // Called by insert(p,x) and other functions when insertion needs to
1730 // reallocate or move existing elements. _Arg is either _Tp& or _Tp.
1731 template<typename _Arg>
1732 void
1733 _M_insert_aux(iterator __position, _Arg&& __arg);
1734
1735 template<typename... _Args>
1736 void
1737 _M_realloc_insert(iterator __position, _Args&&... __args);
1738
1739 // Either move-construct at the end, or forward to _M_insert_aux.
1740 iterator
1741 _M_insert_rval(const_iterator __position, value_type&& __v);
1742
1743 // Try to emplace at the end, otherwise forward to _M_insert_aux.
1744 template<typename... _Args>
1745 iterator
1746 _M_emplace_aux(const_iterator __position, _Args&&... __args);
1747
1748 // Emplacing an rvalue of the correct type can use _M_insert_rval.
1749 iterator
1750 _M_emplace_aux(const_iterator __position, value_type&& __v)
1751 { return _M_insert_rval(__position, std::move(__v)); }
1752#endif
1753
1754 // Called by _M_fill_insert, _M_insert_aux etc.
1755 size_type
1756 _M_check_len(size_type __n, const char* __s) const
1757 {
1758 if (max_size() - size() < __n)
1759 __throw_length_error(__N(__s)(__s));
1760
1761 const size_type __len = size() + (std::max)(size(), __n);
1762 return (__len < size() || __len > max_size()) ? max_size() : __len;
1763 }
1764
1765 // Called by constructors to check initial size.
1766 static size_type
1767 _S_check_init_len(size_type __n, const allocator_type& __a)
1768 {
1769 if (__n > _S_max_size(_Tp_alloc_type(__a)))
1770 __throw_length_error(
1771 __N("cannot create std::vector larger than max_size()")("cannot create std::vector larger than max_size()"));
1772 return __n;
1773 }
1774
1775 static size_type
1776 _S_max_size(const _Tp_alloc_type& __a) _GLIBCXX_NOEXCEPTnoexcept
1777 {
1778 // std::distance(begin(), end()) cannot be greater than PTRDIFF_MAX,
1779 // and realistically we can't store more than PTRDIFF_MAX/sizeof(T)
1780 // (even if std::allocator_traits::max_size says we can).
1781 const size_t __diffmax
1782 = __gnu_cxx::__numeric_traits<ptrdiff_t>::__max / sizeof(_Tp);
1783 const size_t __allocmax = _Alloc_traits::max_size(__a);
1784 return (std::min)(__diffmax, __allocmax);
1785 }
1786
1787 // Internal erase functions follow.
1788
1789 // Called by erase(q1,q2), clear(), resize(), _M_fill_assign,
1790 // _M_assign_aux.
1791 void
1792 _M_erase_at_end(pointer __pos) _GLIBCXX_NOEXCEPTnoexcept
1793 {
1794 if (size_type __n = this->_M_impl._M_finish - __pos)
1795 {
1796 std::_Destroy(__pos, this->_M_impl._M_finish,
1797 _M_get_Tp_allocator());
1798 this->_M_impl._M_finish = __pos;
1799 _GLIBCXX_ASAN_ANNOTATE_SHRINK(__n);
1800 }
1801 }
1802
1803 iterator
1804 _M_erase(iterator __position);
1805
1806 iterator
1807 _M_erase(iterator __first, iterator __last);
1808
1809#if __cplusplus201402L >= 201103L
1810 private:
1811 // Constant-time move assignment when source object's memory can be
1812 // moved, either because the source's allocator will move too
1813 // or because the allocators are equal.
1814 void
1815 _M_move_assign(vector&& __x, true_type) noexcept
1816 {
1817 vector __tmp(get_allocator());
1818 this->_M_impl._M_swap_data(__x._M_impl);
1819 __tmp._M_impl._M_swap_data(__x._M_impl);
1820 std::__alloc_on_move(_M_get_Tp_allocator(), __x._M_get_Tp_allocator());
1821 }
1822
1823 // Do move assignment when it might not be possible to move source
1824 // object's memory, resulting in a linear-time operation.
1825 void
1826 _M_move_assign(vector&& __x, false_type)
1827 {
1828 if (__x._M_get_Tp_allocator() == this->_M_get_Tp_allocator())
1829 _M_move_assign(std::move(__x), true_type());
1830 else
1831 {
1832 // The rvalue's allocator cannot be moved and is not equal,
1833 // so we need to individually move each element.
1834 this->_M_assign_aux(std::make_move_iterator(__x.begin()),
1835 std::make_move_iterator(__x.end()),
1836 std::random_access_iterator_tag());
1837 __x.clear();
1838 }
1839 }
1840#endif
1841
1842 template<typename _Up>
1843 _Up*
1844 _M_data_ptr(_Up* __ptr) const _GLIBCXX_NOEXCEPTnoexcept
1845 { return __ptr; }
1846
1847#if __cplusplus201402L >= 201103L
1848 template<typename _Ptr>
1849 typename std::pointer_traits<_Ptr>::element_type*
1850 _M_data_ptr(_Ptr __ptr) const
1851 { return empty() ? nullptr : std::__to_address(__ptr); }
1852#else
1853 template<typename _Up>
1854 _Up*
1855 _M_data_ptr(_Up* __ptr) _GLIBCXX_NOEXCEPTnoexcept
1856 { return __ptr; }
1857
1858 template<typename _Ptr>
1859 value_type*
1860 _M_data_ptr(_Ptr __ptr)
1861 { return empty() ? (value_type*)0 : __ptr.operator->(); }
1862
1863 template<typename _Ptr>
1864 const value_type*
1865 _M_data_ptr(_Ptr __ptr) const
1866 { return empty() ? (const value_type*)0 : __ptr.operator->(); }
1867#endif
1868 };
1869
1870#if __cpp_deduction_guides >= 201606
1871 template<typename _InputIterator, typename _ValT
1872 = typename iterator_traits<_InputIterator>::value_type,
1873 typename _Allocator = allocator<_ValT>,
1874 typename = _RequireInputIter<_InputIterator>,
1875 typename = _RequireAllocator<_Allocator>>
1876 vector(_InputIterator, _InputIterator, _Allocator = _Allocator())
1877 -> vector<_ValT, _Allocator>;
1878#endif
1879
1880 /**
1881 * @brief Vector equality comparison.
1882 * @param __x A %vector.
1883 * @param __y A %vector of the same type as @a __x.
1884 * @return True iff the size and elements of the vectors are equal.
1885 *
1886 * This is an equivalence relation. It is linear in the size of the
1887 * vectors. Vectors are considered equivalent if their sizes are equal,
1888 * and if corresponding elements compare equal.
1889 */
1890 template<typename _Tp, typename _Alloc>
1891 inline bool
1892 operator==(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1893 { return (__x.size() == __y.size()
1894 && std::equal(__x.begin(), __x.end(), __y.begin())); }
1895
1896#if __cpp_lib_three_way_comparison
1897 /**
1898 * @brief Vector ordering relation.
1899 * @param __x A `vector`.
1900 * @param __y A `vector` of the same type as `__x`.
1901 * @return A value indicating whether `__x` is less than, equal to,
1902 * greater than, or incomparable with `__y`.
1903 *
1904 * See `std::lexicographical_compare_three_way()` for how the determination
1905 * is made. This operator is used to synthesize relational operators like
1906 * `<` and `>=` etc.
1907 */
1908 template<typename _Tp, typename _Alloc>
1909 inline __detail::__synth3way_t<_Tp>
1910 operator<=>(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1911 {
1912 return std::lexicographical_compare_three_way(__x.begin(), __x.end(),
1913 __y.begin(), __y.end(),
1914 __detail::__synth3way);
1915 }
1916#else
1917 /**
1918 * @brief Vector ordering relation.
1919 * @param __x A %vector.
1920 * @param __y A %vector of the same type as @a __x.
1921 * @return True iff @a __x is lexicographically less than @a __y.
1922 *
1923 * This is a total ordering relation. It is linear in the size of the
1924 * vectors. The elements must be comparable with @c <.
1925 *
1926 * See std::lexicographical_compare() for how the determination is made.
1927 */
1928 template<typename _Tp, typename _Alloc>
1929 inline bool
1930 operator<(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1931 { return std::lexicographical_compare(__x.begin(), __x.end(),
1932 __y.begin(), __y.end()); }
1933
1934 /// Based on operator==
1935 template<typename _Tp, typename _Alloc>
1936 inline bool
1937 operator!=(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1938 { return !(__x == __y); }
1939
1940 /// Based on operator<
1941 template<typename _Tp, typename _Alloc>
1942 inline bool
1943 operator>(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1944 { return __y < __x; }
1945
1946 /// Based on operator<
1947 template<typename _Tp, typename _Alloc>
1948 inline bool
1949 operator<=(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1950 { return !(__y < __x); }
1951
1952 /// Based on operator<
1953 template<typename _Tp, typename _Alloc>
1954 inline bool
1955 operator>=(const vector<_Tp, _Alloc>& __x, const vector<_Tp, _Alloc>& __y)
1956 { return !(__x < __y); }
1957#endif // three-way comparison
1958
1959 /// See std::vector::swap().
1960 template<typename _Tp, typename _Alloc>
1961 inline void
1962 swap(vector<_Tp, _Alloc>& __x, vector<_Tp, _Alloc>& __y)
1963 _GLIBCXX_NOEXCEPT_IF(noexcept(__x.swap(__y)))noexcept(noexcept(__x.swap(__y)))
1964 { __x.swap(__y); }
1965
1966_GLIBCXX_END_NAMESPACE_CONTAINER
1967
1968#if __cplusplus201402L >= 201703L
1969 namespace __detail::__variant
1970 {
1971 template<typename> struct _Never_valueless_alt; // see <variant>
1972
1973 // Provide the strong exception-safety guarantee when emplacing a
1974 // vector into a variant, but only if move assignment cannot throw.
1975 template<typename _Tp, typename _Alloc>
1976 struct _Never_valueless_alt<_GLIBCXX_STD_Cstd::vector<_Tp, _Alloc>>
1977 : std::is_nothrow_move_assignable<_GLIBCXX_STD_Cstd::vector<_Tp, _Alloc>>
1978 { };
1979 } // namespace __detail::__variant
1980#endif // C++17
1981
1982_GLIBCXX_END_NAMESPACE_VERSION
1983} // namespace std
1984
1985#endif /* _STL_VECTOR_H */

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/MC/MCRegisterInfo.h

1//===- MC/MCRegisterInfo.h - Target Register Description --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes an abstract interface used to get information about a
10// target machines register file. This information is used for a variety of
11// purposed, especially register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_MC_MCREGISTERINFO_H
16#define LLVM_MC_MCREGISTERINFO_H
17
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/iterator.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/MC/LaneBitmask.h"
22#include "llvm/MC/MCRegister.h"
23#include <cassert>
24#include <cstdint>
25#include <iterator>
26#include <utility>
27
28namespace llvm {
29
30/// MCRegisterClass - Base class of TargetRegisterClass.
31class MCRegisterClass {
32public:
33 using iterator = const MCPhysReg*;
34 using const_iterator = const MCPhysReg*;
35
36 const iterator RegsBegin;
37 const uint8_t *const RegSet;
38 const uint32_t NameIdx;
39 const uint16_t RegsSize;
40 const uint16_t RegSetSize;
41 const uint16_t ID;
42 const uint16_t RegSizeInBits;
43 const int8_t CopyCost;
44 const bool Allocatable;
45
46 /// getID() - Return the register class ID number.
47 ///
48 unsigned getID() const { return ID; }
49
50 /// begin/end - Return all of the registers in this class.
51 ///
52 iterator begin() const { return RegsBegin; }
53 iterator end() const { return RegsBegin + RegsSize; }
54
55 /// getNumRegs - Return the number of registers in this class.
56 ///
57 unsigned getNumRegs() const { return RegsSize; }
58
59 /// getRegister - Return the specified register in the class.
60 ///
61 unsigned getRegister(unsigned i) const {
62 assert(i < getNumRegs() && "Register number out of range!")(static_cast<void> (0));
63 return RegsBegin[i];
64 }
65
66 /// contains - Return true if the specified register is included in this
67 /// register class. This does not include virtual registers.
68 bool contains(MCRegister Reg) const {
69 unsigned RegNo = unsigned(Reg);
70 unsigned InByte = RegNo % 8;
71 unsigned Byte = RegNo / 8;
72 if (Byte >= RegSetSize)
73 return false;
74 return (RegSet[Byte] & (1 << InByte)) != 0;
75 }
76
77 /// contains - Return true if both registers are in this class.
78 bool contains(MCRegister Reg1, MCRegister Reg2) const {
79 return contains(Reg1) && contains(Reg2);
80 }
81
82 /// Return the size of the physical register in bits if we are able to
83 /// determine it. This always returns zero for registers of targets that use
84 /// HW modes, as we need more information to determine the size of registers
85 /// in such cases. Use TargetRegisterInfo to cover them.
86 unsigned getSizeInBits() const { return RegSizeInBits; }
87
88 /// getCopyCost - Return the cost of copying a value between two registers in
89 /// this class. A negative number means the register class is very expensive
90 /// to copy e.g. status flag register classes.
91 int getCopyCost() const { return CopyCost; }
92
93 /// isAllocatable - Return true if this register class may be used to create
94 /// virtual registers.
95 bool isAllocatable() const { return Allocatable; }
96};
97
98/// MCRegisterDesc - This record contains information about a particular
99/// register. The SubRegs field is a zero terminated array of registers that
100/// are sub-registers of the specific register, e.g. AL, AH are sub-registers
101/// of AX. The SuperRegs field is a zero terminated array of registers that are
102/// super-registers of the specific register, e.g. RAX, EAX, are
103/// super-registers of AX.
104///
105struct MCRegisterDesc {
106 uint32_t Name; // Printable name for the reg (for debugging)
107 uint32_t SubRegs; // Sub-register set, described above
108 uint32_t SuperRegs; // Super-register set, described above
109
110 // Offset into MCRI::SubRegIndices of a list of sub-register indices for each
111 // sub-register in SubRegs.
112 uint32_t SubRegIndices;
113
114 // RegUnits - Points to the list of register units. The low 4 bits holds the
115 // Scale, the high bits hold an offset into DiffLists. See MCRegUnitIterator.
116 uint32_t RegUnits;
117
118 /// Index into list with lane mask sequences. The sequence contains a lanemask
119 /// for every register unit.
120 uint16_t RegUnitLaneMasks;
121};
122
123/// MCRegisterInfo base class - We assume that the target defines a static
124/// array of MCRegisterDesc objects that represent all of the machine
125/// registers that the target has. As such, we simply have to track a pointer
126/// to this array so that we can turn register number into a register
127/// descriptor.
128///
129/// Note this class is designed to be a base class of TargetRegisterInfo, which
130/// is the interface used by codegen. However, specific targets *should never*
131/// specialize this class. MCRegisterInfo should only contain getters to access
132/// TableGen generated physical register data. It must not be extended with
133/// virtual methods.
134///
135class MCRegisterInfo {
136public:
137 using regclass_iterator = const MCRegisterClass *;
138
139 /// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
140 /// performed with a binary search.
141 struct DwarfLLVMRegPair {
142 unsigned FromReg;
143 unsigned ToReg;
144
145 bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
146 };
147
148 /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
149 /// index, -1 in any being invalid.
150 struct SubRegCoveredBits {
151 uint16_t Offset;
152 uint16_t Size;
153 };
154
155private:
156 const MCRegisterDesc *Desc; // Pointer to the descriptor array
157 unsigned NumRegs; // Number of entries in the array
158 MCRegister RAReg; // Return address register
159 MCRegister PCReg; // Program counter register
160 const MCRegisterClass *Classes; // Pointer to the regclass array
161 unsigned NumClasses; // Number of entries in the array
162 unsigned NumRegUnits; // Number of regunits.
163 const MCPhysReg (*RegUnitRoots)[2]; // Pointer to regunit root table.
164 const MCPhysReg *DiffLists; // Pointer to the difflists array
165 const LaneBitmask *RegUnitMaskSequences; // Pointer to lane mask sequences
166 // for register units.
167 const char *RegStrings; // Pointer to the string table.
168 const char *RegClassStrings; // Pointer to the class strings.
169 const uint16_t *SubRegIndices; // Pointer to the subreg lookup
170 // array.
171 const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered
172 // bit ranges array.
173 unsigned NumSubRegIndices; // Number of subreg indices.
174 const uint16_t *RegEncodingTable; // Pointer to array of register
175 // encodings.
176
177 unsigned L2DwarfRegsSize;
178 unsigned EHL2DwarfRegsSize;
179 unsigned Dwarf2LRegsSize;
180 unsigned EHDwarf2LRegsSize;
181 const DwarfLLVMRegPair *L2DwarfRegs; // LLVM to Dwarf regs mapping
182 const DwarfLLVMRegPair *EHL2DwarfRegs; // LLVM to Dwarf regs mapping EH
183 const DwarfLLVMRegPair *Dwarf2LRegs; // Dwarf to LLVM regs mapping
184 const DwarfLLVMRegPair *EHDwarf2LRegs; // Dwarf to LLVM regs mapping EH
185 DenseMap<MCRegister, int> L2SEHRegs; // LLVM to SEH regs mapping
186 DenseMap<MCRegister, int> L2CVRegs; // LLVM to CV regs mapping
187
188public:
189 // Forward declaration to become a friend class of DiffListIterator.
190 template <class SubT> class mc_difflist_iterator;
191
192 /// DiffListIterator - Base iterator class that can traverse the
193 /// differentially encoded register and regunit lists in DiffLists.
194 /// Don't use this class directly, use one of the specialized sub-classes
195 /// defined below.
196 class DiffListIterator {
197 uint16_t Val = 0;
198 const MCPhysReg *List = nullptr;
199
200 protected:
201 /// Create an invalid iterator. Call init() to point to something useful.
202 DiffListIterator() = default;
203
204 /// init - Point the iterator to InitVal, decoding subsequent values from
205 /// DiffList. The iterator will initially point to InitVal, sub-classes are
206 /// responsible for skipping the seed value if it is not part of the list.
207 void init(MCPhysReg InitVal, const MCPhysReg *DiffList) {
208 Val = InitVal;
209 List = DiffList;
210 }
211
212 /// advance - Move to the next list position, return the applied
213 /// differential. This function does not detect the end of the list, that
214 /// is the caller's responsibility (by checking for a 0 return value).
215 MCRegister advance() {
216 assert(isValid() && "Cannot move off the end of the list.")(static_cast<void> (0));
217 MCPhysReg D = *List++;
40
Dereference of null pointer
218 Val += D;
219 return D;
220 }
221
222 public:
223 /// isValid - returns true if this iterator is not yet at the end.
224 bool isValid() const { return List; }
225
226 /// Dereference the iterator to get the value at the current position.
227 MCRegister operator*() const { return Val; }
228
229 /// Pre-increment to move to the next position.
230 void operator++() {
231 // The end of the list is encoded as a 0 differential.
232 if (!advance())
39
Calling 'DiffListIterator::advance'
233 List = nullptr;
234 }
235
236 template <class SubT> friend class MCRegisterInfo::mc_difflist_iterator;
237 };
238
239 /// Forward iterator using DiffListIterator.
240 template <class SubT>
241 class mc_difflist_iterator
242 : public iterator_facade_base<mc_difflist_iterator<SubT>,
243 std::forward_iterator_tag, MCPhysReg> {
244 MCRegisterInfo::DiffListIterator Iter;
245 /// Current value as MCPhysReg, so we can return a reference to it.
246 MCPhysReg Val;
247
248 protected:
249 mc_difflist_iterator(MCRegisterInfo::DiffListIterator Iter) : Iter(Iter) {}
250
251 // Allow conversion between instantiations where valid.
252 mc_difflist_iterator(MCRegister Reg, const MCPhysReg *DiffList) {
253 Iter.init(Reg, DiffList);
254 Val = *Iter;
255 }
256
257 public:
258 // Allow default construction to build variables, but this doesn't build
259 // a useful iterator.
260 mc_difflist_iterator() = default;
261
262 /// Return an iterator past the last element.
263 static SubT end() {
264 SubT End;
265 End.Iter.List = nullptr;
266 return End;
267 }
268
269 bool operator==(const mc_difflist_iterator &Arg) const {
270 return Iter.List == Arg.Iter.List;
271 }
272
273 const MCPhysReg &operator*() const { return Val; }
274
275 using mc_difflist_iterator::iterator_facade_base::operator++;
276 void operator++() {
277 assert(Iter.List && "Cannot increment the end iterator!")(static_cast<void> (0));
278 ++Iter;
279 Val = *Iter;
280 }
281 };
282
283 /// Forward iterator over all sub-registers.
284 /// TODO: Replace remaining uses of MCSubRegIterator.
285 class mc_subreg_iterator : public mc_difflist_iterator<mc_subreg_iterator> {
286 public:
287 mc_subreg_iterator(MCRegisterInfo::DiffListIterator Iter)
288 : mc_difflist_iterator(Iter) {}
289 mc_subreg_iterator() = default;
290 mc_subreg_iterator(MCRegister Reg, const MCRegisterInfo *MCRI)
291 : mc_difflist_iterator(Reg, MCRI->DiffLists + MCRI->get(Reg).SubRegs) {}
292 };
293
294 /// Forward iterator over all super-registers.
295 /// TODO: Replace remaining uses of MCSuperRegIterator.
296 class mc_superreg_iterator
297 : public mc_difflist_iterator<mc_superreg_iterator> {
298 public:
299 mc_superreg_iterator(MCRegisterInfo::DiffListIterator Iter)
300 : mc_difflist_iterator(Iter) {}
301 mc_superreg_iterator() = default;
302 mc_superreg_iterator(MCRegister Reg, const MCRegisterInfo *MCRI)
303 : mc_difflist_iterator(Reg,
304 MCRI->DiffLists + MCRI->get(Reg).SuperRegs) {}
305 };
306
307 /// Return an iterator range over all sub-registers of \p Reg, excluding \p
308 /// Reg.
309 iterator_range<mc_subreg_iterator> subregs(MCRegister Reg) const {
310 return make_range(std::next(mc_subreg_iterator(Reg, this)),
311 mc_subreg_iterator::end());
312 }
313
314 /// Return an iterator range over all sub-registers of \p Reg, including \p
315 /// Reg.
316 iterator_range<mc_subreg_iterator> subregs_inclusive(MCRegister Reg) const {
317 return make_range({Reg, this}, mc_subreg_iterator::end());
318 }
319
320 /// Return an iterator range over all super-registers of \p Reg, excluding \p
321 /// Reg.
322 iterator_range<mc_superreg_iterator> superregs(MCRegister Reg) const {
323 return make_range(std::next(mc_superreg_iterator(Reg, this)),
324 mc_superreg_iterator::end());
325 }
326
327 /// Return an iterator range over all super-registers of \p Reg, including \p
328 /// Reg.
329 iterator_range<mc_superreg_iterator>
330 superregs_inclusive(MCRegister Reg) const {
331 return make_range({Reg, this}, mc_superreg_iterator::end());
332 }
333
334 /// Return an iterator range over all sub- and super-registers of \p Reg,
335 /// including \p Reg.
336 detail::concat_range<const MCPhysReg, iterator_range<mc_subreg_iterator>,
337 iterator_range<mc_superreg_iterator>>
338 sub_and_superregs_inclusive(MCRegister Reg) const {
339 return concat<const MCPhysReg>(subregs_inclusive(Reg), superregs(Reg));
340 }
341
342 // These iterators are allowed to sub-class DiffListIterator and access
343 // internal list pointers.
344 friend class MCSubRegIterator;
345 friend class MCSubRegIndexIterator;
346 friend class MCSuperRegIterator;
347 friend class MCRegUnitIterator;
348 friend class MCRegUnitMaskIterator;
349 friend class MCRegUnitRootIterator;
350
351 /// Initialize MCRegisterInfo, called by TableGen
352 /// auto-generated routines. *DO NOT USE*.
353 void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
354 unsigned PC,
355 const MCRegisterClass *C, unsigned NC,
356 const MCPhysReg (*RURoots)[2],
357 unsigned NRU,
358 const MCPhysReg *DL,
359 const LaneBitmask *RUMS,
360 const char *Strings,
361 const char *ClassStrings,
362 const uint16_t *SubIndices,
363 unsigned NumIndices,
364 const SubRegCoveredBits *SubIdxRanges,
365 const uint16_t *RET) {
366 Desc = D;
367 NumRegs = NR;
368 RAReg = RA;
369 PCReg = PC;
370 Classes = C;
371 DiffLists = DL;
372 RegUnitMaskSequences = RUMS;
373 RegStrings = Strings;
374 RegClassStrings = ClassStrings;
375 NumClasses = NC;
376 RegUnitRoots = RURoots;
377 NumRegUnits = NRU;
378 SubRegIndices = SubIndices;
379 NumSubRegIndices = NumIndices;
380 SubRegIdxRanges = SubIdxRanges;
381 RegEncodingTable = RET;
382
383 // Initialize DWARF register mapping variables
384 EHL2DwarfRegs = nullptr;
385 EHL2DwarfRegsSize = 0;
386 L2DwarfRegs = nullptr;
387 L2DwarfRegsSize = 0;
388 EHDwarf2LRegs = nullptr;
389 EHDwarf2LRegsSize = 0;
390 Dwarf2LRegs = nullptr;
391 Dwarf2LRegsSize = 0;
392 }
393
394 /// Used to initialize LLVM register to Dwarf
395 /// register number mapping. Called by TableGen auto-generated routines.
396 /// *DO NOT USE*.
397 void mapLLVMRegsToDwarfRegs(const DwarfLLVMRegPair *Map, unsigned Size,
398 bool isEH) {
399 if (isEH) {
400 EHL2DwarfRegs = Map;
401 EHL2DwarfRegsSize = Size;
402 } else {
403 L2DwarfRegs = Map;
404 L2DwarfRegsSize = Size;
405 }
406 }
407
408 /// Used to initialize Dwarf register to LLVM
409 /// register number mapping. Called by TableGen auto-generated routines.
410 /// *DO NOT USE*.
411 void mapDwarfRegsToLLVMRegs(const DwarfLLVMRegPair *Map, unsigned Size,
412 bool isEH) {
413 if (isEH) {
414 EHDwarf2LRegs = Map;
415 EHDwarf2LRegsSize = Size;
416 } else {
417 Dwarf2LRegs = Map;
418 Dwarf2LRegsSize = Size;
419 }
420 }
421
422 /// mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register
423 /// number mapping. By default the SEH register number is just the same
424 /// as the LLVM register number.
425 /// FIXME: TableGen these numbers. Currently this requires target specific
426 /// initialization code.
427 void mapLLVMRegToSEHReg(MCRegister LLVMReg, int SEHReg) {
428 L2SEHRegs[LLVMReg] = SEHReg;
429 }
430
431 void mapLLVMRegToCVReg(MCRegister LLVMReg, int CVReg) {
432 L2CVRegs[LLVMReg] = CVReg;
433 }
434
435 /// This method should return the register where the return
436 /// address can be found.
437 MCRegister getRARegister() const {
438 return RAReg;
439 }
440
441 /// Return the register which is the program counter.
442 MCRegister getProgramCounter() const {
443 return PCReg;
444 }
445
446 const MCRegisterDesc &operator[](MCRegister RegNo) const {
447 assert(RegNo < NumRegs &&(static_cast<void> (0))
448 "Attempting to access record for invalid register number!")(static_cast<void> (0));
449 return Desc[RegNo];
450 }
451
452 /// Provide a get method, equivalent to [], but more useful with a
453 /// pointer to this object.
454 const MCRegisterDesc &get(MCRegister RegNo) const {
455 return operator[](RegNo);
456 }
457
458 /// Returns the physical register number of sub-register "Index"
459 /// for physical register RegNo. Return zero if the sub-register does not
460 /// exist.
461 MCRegister getSubReg(MCRegister Reg, unsigned Idx) const;
462
463 /// Return a super-register of the specified register
464 /// Reg so its sub-register of index SubIdx is Reg.
465 MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
466 const MCRegisterClass *RC) const;
467
468 /// For a given register pair, return the sub-register index
469 /// if the second register is a sub-register of the first. Return zero
470 /// otherwise.
471 unsigned getSubRegIndex(MCRegister RegNo, MCRegister SubRegNo) const;
472
473 /// Get the size of the bit range covered by a sub-register index.
474 /// If the index isn't continuous, return the sum of the sizes of its parts.
475 /// If the index is used to access subregisters of different sizes, return -1.
476 unsigned getSubRegIdxSize(unsigned Idx) const;
477
478 /// Get the offset of the bit range covered by a sub-register index.
479 /// If an Offset doesn't make sense (the index isn't continuous, or is used to
480 /// access sub-registers at different offsets), return -1.
481 unsigned getSubRegIdxOffset(unsigned Idx) const;
482
483 /// Return the human-readable symbolic target-specific name for the
484 /// specified physical register.
485 const char *getName(MCRegister RegNo) const {
486 return RegStrings + get(RegNo).Name;
487 }
488
489 /// Return the number of registers this target has (useful for
490 /// sizing arrays holding per register information)
491 unsigned getNumRegs() const {
492 return NumRegs;
493 }
494
495 /// Return the number of sub-register indices
496 /// understood by the target. Index 0 is reserved for the no-op sub-register,
497 /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
498 unsigned getNumSubRegIndices() const {
499 return NumSubRegIndices;
500 }
501
502 /// Return the number of (native) register units in the
503 /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
504 /// can be accessed through MCRegUnitIterator defined below.
505 unsigned getNumRegUnits() const {
506 return NumRegUnits;
507 }
508
509 /// Map a target register to an equivalent dwarf register
510 /// number. Returns -1 if there is no equivalent value. The second
511 /// parameter allows targets to use different numberings for EH info and
512 /// debugging info.
513 int getDwarfRegNum(MCRegister RegNum, bool isEH) const;
514
515 /// Map a dwarf register back to a target register. Returns None is there is
516 /// no mapping.
517 Optional<unsigned> getLLVMRegNum(unsigned RegNum, bool isEH) const;
518
519 /// Map a target EH register number to an equivalent DWARF register
520 /// number.
521 int getDwarfRegNumFromDwarfEHRegNum(unsigned RegNum) const;
522
523 /// Map a target register to an equivalent SEH register
524 /// number. Returns LLVM register number if there is no equivalent value.
525 int getSEHRegNum(MCRegister RegNum) const;
526
527 /// Map a target register to an equivalent CodeView register
528 /// number.
529 int getCodeViewRegNum(MCRegister RegNum) const;
530
531 regclass_iterator regclass_begin() const { return Classes; }
532 regclass_iterator regclass_end() const { return Classes+NumClasses; }
533 iterator_range<regclass_iterator> regclasses() const {
534 return make_range(regclass_begin(), regclass_end());
535 }
536
537 unsigned getNumRegClasses() const {
538 return (unsigned)(regclass_end()-regclass_begin());
539 }
540
541 /// Returns the register class associated with the enumeration
542 /// value. See class MCOperandInfo.
543 const MCRegisterClass& getRegClass(unsigned i) const {
544 assert(i < getNumRegClasses() && "Register Class ID out of range")(static_cast<void> (0));
545 return Classes[i];
546 }
547
548 const char *getRegClassName(const MCRegisterClass *Class) const {
549 return RegClassStrings + Class->NameIdx;
550 }
551
552 /// Returns the encoding for RegNo
553 uint16_t getEncodingValue(MCRegister RegNo) const {
554 assert(RegNo < NumRegs &&(static_cast<void> (0))
555 "Attempting to get encoding for invalid register number!")(static_cast<void> (0));
556 return RegEncodingTable[RegNo];
557 }
558
559 /// Returns true if RegB is a sub-register of RegA.
560 bool isSubRegister(MCRegister RegA, MCRegister RegB) const {
561 return isSuperRegister(RegB, RegA);
562 }
563
564 /// Returns true if RegB is a super-register of RegA.
565 bool isSuperRegister(MCRegister RegA, MCRegister RegB) const;
566
567 /// Returns true if RegB is a sub-register of RegA or if RegB == RegA.
568 bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const {
569 return isSuperRegisterEq(RegB, RegA);
570 }
571
572 /// Returns true if RegB is a super-register of RegA or if
573 /// RegB == RegA.
574 bool isSuperRegisterEq(MCRegister RegA, MCRegister RegB) const {
575 return RegA == RegB || isSuperRegister(RegA, RegB);
576 }
577
578 /// Returns true if RegB is a super-register or sub-register of RegA
579 /// or if RegB == RegA.
580 bool isSuperOrSubRegisterEq(MCRegister RegA, MCRegister RegB) const {
581 return isSubRegisterEq(RegA, RegB) || isSuperRegister(RegA, RegB);
582 }
583};
584
585//===----------------------------------------------------------------------===//
586// Register List Iterators
587//===----------------------------------------------------------------------===//
588
589// MCRegisterInfo provides lists of super-registers, sub-registers, and
590// aliasing registers. Use these iterator classes to traverse the lists.
591
592/// MCSubRegIterator enumerates all sub-registers of Reg.
593/// If IncludeSelf is set, Reg itself is included in the list.
594class MCSubRegIterator : public MCRegisterInfo::DiffListIterator {
595public:
596 MCSubRegIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
597 bool IncludeSelf = false) {
598 init(Reg, MCRI->DiffLists + MCRI->get(Reg).SubRegs);
599 // Initially, the iterator points to Reg itself.
600 if (!IncludeSelf)
601 ++*this;
602 }
603};
604
605/// Iterator that enumerates the sub-registers of a Reg and the associated
606/// sub-register indices.
607class MCSubRegIndexIterator {
608 MCSubRegIterator SRIter;
609 const uint16_t *SRIndex;
610
611public:
612 /// Constructs an iterator that traverses subregisters and their
613 /// associated subregister indices.
614 MCSubRegIndexIterator(MCRegister Reg, const MCRegisterInfo *MCRI)
615 : SRIter(Reg, MCRI) {
616 SRIndex = MCRI->SubRegIndices + MCRI->get(Reg).SubRegIndices;
617 }
618
619 /// Returns current sub-register.
620 MCRegister getSubReg() const {
621 return *SRIter;
622 }
623
624 /// Returns sub-register index of the current sub-register.
625 unsigned getSubRegIndex() const {
626 return *SRIndex;
627 }
628
629 /// Returns true if this iterator is not yet at the end.
630 bool isValid() const { return SRIter.isValid(); }
631
632 /// Moves to the next position.
633 void operator++() {
634 ++SRIter;
635 ++SRIndex;
636 }
637};
638
639/// MCSuperRegIterator enumerates all super-registers of Reg.
640/// If IncludeSelf is set, Reg itself is included in the list.
641class MCSuperRegIterator : public MCRegisterInfo::DiffListIterator {
642public:
643 MCSuperRegIterator() = default;
644
645 MCSuperRegIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
646 bool IncludeSelf = false) {
647 init(Reg, MCRI->DiffLists + MCRI->get(Reg).SuperRegs);
648 // Initially, the iterator points to Reg itself.
649 if (!IncludeSelf
36.1
'IncludeSelf' is false
36.1
'IncludeSelf' is false
36.1
'IncludeSelf' is false
)
37
Taking true branch
650 ++*this;
38
Calling 'DiffListIterator::operator++'
651 }
652};
653
654// Definition for isSuperRegister. Put it down here since it needs the
655// iterator defined above in addition to the MCRegisterInfo class itself.
656inline bool MCRegisterInfo::isSuperRegister(MCRegister RegA, MCRegister RegB) const{
657 for (MCSuperRegIterator I(RegA, this); I.isValid(); ++I)
658 if (*I == RegB)
659 return true;
660 return false;
661}
662
663//===----------------------------------------------------------------------===//
664// Register Units
665//===----------------------------------------------------------------------===//
666
667// Register units are used to compute register aliasing. Every register has at
668// least one register unit, but it can have more. Two registers overlap if and
669// only if they have a common register unit.
670//
671// A target with a complicated sub-register structure will typically have many
672// fewer register units than actual registers. MCRI::getNumRegUnits() returns
673// the number of register units in the target.
674
675// MCRegUnitIterator enumerates a list of register units for Reg. The list is
676// in ascending numerical order.
677class MCRegUnitIterator : public MCRegisterInfo::DiffListIterator {
678public:
679 /// MCRegUnitIterator - Create an iterator that traverses the register units
680 /// in Reg.
681 MCRegUnitIterator() = default;
682
683 MCRegUnitIterator(MCRegister Reg, const MCRegisterInfo *MCRI) {
684 assert(Reg && "Null register has no regunits")(static_cast<void> (0));
685 assert(MCRegister::isPhysicalRegister(Reg.id()))(static_cast<void> (0));
686 // Decode the RegUnits MCRegisterDesc field.
687 unsigned RU = MCRI->get(Reg).RegUnits;
688 unsigned Scale = RU & 15;
689 unsigned Offset = RU >> 4;
690
691 // Initialize the iterator to Reg * Scale, and the List pointer to
692 // DiffLists + Offset.
693 init(Reg * Scale, MCRI->DiffLists + Offset);
694
695 // That may not be a valid unit, we need to advance by one to get the real
696 // unit number. The first differential can be 0 which would normally
697 // terminate the list, but since we know every register has at least one
698 // unit, we can allow a 0 differential here.
699 advance();
700 }
701};
702
703/// MCRegUnitMaskIterator enumerates a list of register units and their
704/// associated lane masks for Reg. The register units are in ascending
705/// numerical order.
706class MCRegUnitMaskIterator {
707 MCRegUnitIterator RUIter;
708 const LaneBitmask *MaskListIter;
709
710public:
711 MCRegUnitMaskIterator() = default;
712
713 /// Constructs an iterator that traverses the register units and their
714 /// associated LaneMasks in Reg.
715 MCRegUnitMaskIterator(MCRegister Reg, const MCRegisterInfo *MCRI)
716 : RUIter(Reg, MCRI) {
717 uint16_t Idx = MCRI->get(Reg).RegUnitLaneMasks;
718 MaskListIter = &MCRI->RegUnitMaskSequences[Idx];
719 }
720
721 /// Returns a (RegUnit, LaneMask) pair.
722 std::pair<unsigned,LaneBitmask> operator*() const {
723 return std::make_pair(*RUIter, *MaskListIter);
724 }
725
726 /// Returns true if this iterator is not yet at the end.
727 bool isValid() const { return RUIter.isValid(); }
728
729 /// Moves to the next position.
730 void operator++() {
731 ++MaskListIter;
732 ++RUIter;
733 }
734};
735
736// Each register unit has one or two root registers. The complete set of
737// registers containing a register unit is the union of the roots and their
738// super-registers. All registers aliasing Unit can be visited like this:
739//
740// for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
741// for (MCSuperRegIterator SI(*RI, MCRI, true); SI.isValid(); ++SI)
742// visit(*SI);
743// }
744
745/// MCRegUnitRootIterator enumerates the root registers of a register unit.
746class MCRegUnitRootIterator {
747 uint16_t Reg0 = 0;
748 uint16_t Reg1 = 0;
749
750public:
751 MCRegUnitRootIterator() = default;
752
753 MCRegUnitRootIterator(unsigned RegUnit, const MCRegisterInfo *MCRI) {
754 assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit")(static_cast<void> (0));
755 Reg0 = MCRI->RegUnitRoots[RegUnit][0];
756 Reg1 = MCRI->RegUnitRoots[RegUnit][1];
757 }
758
759 /// Dereference to get the current root register.
760 unsigned operator*() const {
761 return Reg0;
762 }
763
764 /// Check if the iterator is at the end of the list.
765 bool isValid() const {
766 return Reg0;
767 }
768
769 /// Preincrement to move to the next root register.
770 void operator++() {
771 assert(isValid() && "Cannot move off the end of the list.")(static_cast<void> (0));
772 Reg0 = Reg1;
773 Reg1 = 0;
774 }
775};
776
777/// MCRegAliasIterator enumerates all registers aliasing Reg. If IncludeSelf is
778/// set, Reg itself is included in the list. This iterator does not guarantee
779/// any ordering or that entries are unique.
780class MCRegAliasIterator {
781private:
782 MCRegister Reg;
783 const MCRegisterInfo *MCRI;
784 bool IncludeSelf;
785
786 MCRegUnitIterator RI;
787 MCRegUnitRootIterator RRI;
788 MCSuperRegIterator SI;
789
790public:
791 MCRegAliasIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
792 bool IncludeSelf)
793 : Reg(Reg), MCRI(MCRI), IncludeSelf(IncludeSelf) {
794 // Initialize the iterators.
795 for (RI = MCRegUnitIterator(Reg, MCRI); RI.isValid(); ++RI) {
796 for (RRI = MCRegUnitRootIterator(*RI, MCRI); RRI.isValid(); ++RRI) {
797 for (SI = MCSuperRegIterator(*RRI, MCRI, true); SI.isValid(); ++SI) {
798 if (!(!IncludeSelf && Reg == *SI))
799 return;
800 }
801 }
802 }
803 }
804
805 bool isValid() const { return RI.isValid(); }
806
807 MCRegister operator*() const {
808 assert(SI.isValid() && "Cannot dereference an invalid iterator.")(static_cast<void> (0));
809 return *SI;
810 }
811
812 void advance() {
813 // Assuming SI is valid.
814 ++SI;
815 if (SI.isValid()) return;
816
817 ++RRI;
818 if (RRI.isValid()) {
819 SI = MCSuperRegIterator(*RRI, MCRI, true);
820 return;
821 }
822
823 ++RI;
824 if (RI.isValid()) {
825 RRI = MCRegUnitRootIterator(*RI, MCRI);
826 SI = MCSuperRegIterator(*RRI, MCRI, true);
827 }
828 }
829
830 void operator++() {
831 assert(isValid() && "Cannot move off the end of the list.")(static_cast<void> (0));
832 do advance();
833 while (!IncludeSelf && isValid() && *SI == Reg);
834 }
835};
836
837} // end namespace llvm
838
839#endif // LLVM_MC_MCREGISTERINFO_H