Bug Summary

File:llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
Warning:line 786, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AggressiveAntiDepBreaker.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp

1//===- AggressiveAntiDepBreaker.cpp - Anti-dep breaker --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the AggressiveAntiDepBreaker class, which
10// implements register anti-dependence breaking during post-RA
11// scheduling. It attempts to break all anti-dependencies within a
12// block.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AggressiveAntiDepBreaker.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/iterator_range.h"
20#include "llvm/CodeGen/MachineBasicBlock.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineOperand.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/RegisterClassInfo.h"
27#include "llvm/CodeGen/ScheduleDAG.h"
28#include "llvm/CodeGen/TargetInstrInfo.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCRegisterInfo.h"
32#include "llvm/Support/CommandLine.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/MachineValueType.h"
35#include "llvm/Support/raw_ostream.h"
36#include <cassert>
37#include <utility>
38
39using namespace llvm;
40
41#define DEBUG_TYPE"post-RA-sched" "post-RA-sched"
42
43// If DebugDiv > 0 then only break antidep with (ID % DebugDiv) == DebugMod
44static cl::opt<int>
45DebugDiv("agg-antidep-debugdiv",
46 cl::desc("Debug control for aggressive anti-dep breaker"),
47 cl::init(0), cl::Hidden);
48
49static cl::opt<int>
50DebugMod("agg-antidep-debugmod",
51 cl::desc("Debug control for aggressive anti-dep breaker"),
52 cl::init(0), cl::Hidden);
53
54AggressiveAntiDepState::AggressiveAntiDepState(const unsigned TargetRegs,
55 MachineBasicBlock *BB)
56 : NumTargetRegs(TargetRegs), GroupNodes(TargetRegs, 0),
57 GroupNodeIndices(TargetRegs, 0), KillIndices(TargetRegs, 0),
58 DefIndices(TargetRegs, 0) {
59 const unsigned BBSize = BB->size();
60 for (unsigned i = 0; i < NumTargetRegs; ++i) {
61 // Initialize all registers to be in their own group. Initially we
62 // assign the register to the same-indexed GroupNode.
63 GroupNodeIndices[i] = i;
64 // Initialize the indices to indicate that no registers are live.
65 KillIndices[i] = ~0u;
66 DefIndices[i] = BBSize;
67 }
68}
69
70unsigned AggressiveAntiDepState::GetGroup(unsigned Reg) {
71 unsigned Node = GroupNodeIndices[Reg];
72 while (GroupNodes[Node] != Node)
73 Node = GroupNodes[Node];
74
75 return Node;
76}
77
78void AggressiveAntiDepState::GetGroupRegs(
79 unsigned Group,
80 std::vector<unsigned> &Regs,
81 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference> *RegRefs)
82{
83 for (unsigned Reg = 0; Reg != NumTargetRegs; ++Reg) {
84 if ((GetGroup(Reg) == Group) && (RegRefs->count(Reg) > 0))
85 Regs.push_back(Reg);
86 }
87}
88
89unsigned AggressiveAntiDepState::UnionGroups(unsigned Reg1, unsigned Reg2) {
90 assert(GroupNodes[0] == 0 && "GroupNode 0 not parent!")(static_cast<void> (0));
91 assert(GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!")(static_cast<void> (0));
92
93 // find group for each register
94 unsigned Group1 = GetGroup(Reg1);
95 unsigned Group2 = GetGroup(Reg2);
96
97 // if either group is 0, then that must become the parent
98 unsigned Parent = (Group1 == 0) ? Group1 : Group2;
99 unsigned Other = (Parent == Group1) ? Group2 : Group1;
100 GroupNodes.at(Other) = Parent;
101 return Parent;
102}
103
104unsigned AggressiveAntiDepState::LeaveGroup(unsigned Reg) {
105 // Create a new GroupNode for Reg. Reg's existing GroupNode must
106 // stay as is because there could be other GroupNodes referring to
107 // it.
108 unsigned idx = GroupNodes.size();
109 GroupNodes.push_back(idx);
110 GroupNodeIndices[Reg] = idx;
111 return idx;
112}
113
114bool AggressiveAntiDepState::IsLive(unsigned Reg) {
115 // KillIndex must be defined and DefIndex not defined for a register
116 // to be live.
117 return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
118}
119
120AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
121 MachineFunction &MFi, const RegisterClassInfo &RCI,
122 TargetSubtargetInfo::RegClassVector &CriticalPathRCs)
123 : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
124 TII(MF.getSubtarget().getInstrInfo()),
125 TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI) {
126 /* Collect a bitset of all registers that are only broken if they
127 are on the critical path. */
128 for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
129 BitVector CPSet = TRI->getAllocatableSet(MF, CriticalPathRCs[i]);
130 if (CriticalPathSet.none())
131 CriticalPathSet = CPSet;
132 else
133 CriticalPathSet |= CPSet;
134 }
135
136 LLVM_DEBUG(dbgs() << "AntiDep Critical-Path Registers:")do { } while (false);
137 LLVM_DEBUG(for (unsigned rdo { } while (false)
138 : CriticalPathSet.set_bits()) dbgs()do { } while (false)
139 << " " << printReg(r, TRI))do { } while (false);
140 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
141}
142
143AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
144 delete State;
145}
146
147void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
148 assert(!State)(static_cast<void> (0));
149 State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
150
151 bool IsReturnBlock = BB->isReturnBlock();
152 std::vector<unsigned> &KillIndices = State->GetKillIndices();
153 std::vector<unsigned> &DefIndices = State->GetDefIndices();
154
155 // Examine the live-in regs of all successors.
156 for (MachineBasicBlock *Succ : BB->successors())
157 for (const auto &LI : Succ->liveins()) {
158 for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI) {
159 unsigned Reg = *AI;
160 State->UnionGroups(Reg, 0);
161 KillIndices[Reg] = BB->size();
162 DefIndices[Reg] = ~0u;
163 }
164 }
165
166 // Mark live-out callee-saved registers. In a return block this is
167 // all callee-saved registers. In non-return this is any
168 // callee-saved register that is not saved in the prolog.
169 const MachineFrameInfo &MFI = MF.getFrameInfo();
170 BitVector Pristine = MFI.getPristineRegs(MF);
171 for (const MCPhysReg *I = MF.getRegInfo().getCalleeSavedRegs(); *I;
172 ++I) {
173 unsigned Reg = *I;
174 if (!IsReturnBlock && !Pristine.test(Reg))
175 continue;
176 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
177 unsigned AliasReg = *AI;
178 State->UnionGroups(AliasReg, 0);
179 KillIndices[AliasReg] = BB->size();
180 DefIndices[AliasReg] = ~0u;
181 }
182 }
183}
184
185void AggressiveAntiDepBreaker::FinishBlock() {
186 delete State;
187 State = nullptr;
188}
189
190void AggressiveAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
191 unsigned InsertPosIndex) {
192 assert(Count < InsertPosIndex && "Instruction index out of expected range!")(static_cast<void> (0));
193
194 std::set<unsigned> PassthruRegs;
195 GetPassthruRegs(MI, PassthruRegs);
196 PrescanInstruction(MI, Count, PassthruRegs);
197 ScanInstruction(MI, Count);
198
199 LLVM_DEBUG(dbgs() << "Observe: ")do { } while (false);
200 LLVM_DEBUG(MI.dump())do { } while (false);
201 LLVM_DEBUG(dbgs() << "\tRegs:")do { } while (false);
202
203 std::vector<unsigned> &DefIndices = State->GetDefIndices();
204 for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
205 // If Reg is current live, then mark that it can't be renamed as
206 // we don't know the extent of its live-range anymore (now that it
207 // has been scheduled). If it is not live but was defined in the
208 // previous schedule region, then set its def index to the most
209 // conservative location (i.e. the beginning of the previous
210 // schedule region).
211 if (State->IsLive(Reg)) {
212 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs()do { } while (false)
213 << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg)do { } while (false)
214 << "->g0(region live-out)")do { } while (false);
215 State->UnionGroups(Reg, 0);
216 } else if ((DefIndices[Reg] < InsertPosIndex)
217 && (DefIndices[Reg] >= Count)) {
218 DefIndices[Reg] = Count;
219 }
220 }
221 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
222}
223
224bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI,
225 MachineOperand &MO) {
226 if (!MO.isReg() || !MO.isImplicit())
227 return false;
228
229 Register Reg = MO.getReg();
230 if (Reg == 0)
231 return false;
232
233 MachineOperand *Op = nullptr;
234 if (MO.isDef())
235 Op = MI.findRegisterUseOperand(Reg, true);
236 else
237 Op = MI.findRegisterDefOperand(Reg);
238
239 return(Op && Op->isImplicit());
240}
241
242void AggressiveAntiDepBreaker::GetPassthruRegs(
243 MachineInstr &MI, std::set<unsigned> &PassthruRegs) {
244 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
245 MachineOperand &MO = MI.getOperand(i);
246 if (!MO.isReg()) continue;
247 if ((MO.isDef() && MI.isRegTiedToUseOperand(i)) ||
248 IsImplicitDefUse(MI, MO)) {
249 const Register Reg = MO.getReg();
250 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
251 SubRegs.isValid(); ++SubRegs)
252 PassthruRegs.insert(*SubRegs);
253 }
254 }
255}
256
257/// AntiDepEdges - Return in Edges the anti- and output- dependencies
258/// in SU that we want to consider for breaking.
259static void AntiDepEdges(const SUnit *SU, std::vector<const SDep *> &Edges) {
260 SmallSet<unsigned, 4> RegSet;
261 for (const SDep &Pred : SU->Preds) {
262 if ((Pred.getKind() == SDep::Anti) || (Pred.getKind() == SDep::Output)) {
263 if (RegSet.insert(Pred.getReg()).second)
264 Edges.push_back(&Pred);
265 }
266 }
267}
268
269/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
270/// critical path.
271static const SUnit *CriticalPathStep(const SUnit *SU) {
272 const SDep *Next = nullptr;
273 unsigned NextDepth = 0;
274 // Find the predecessor edge with the greatest depth.
275 if (SU) {
276 for (const SDep &Pred : SU->Preds) {
277 const SUnit *PredSU = Pred.getSUnit();
278 unsigned PredLatency = Pred.getLatency();
279 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
280 // In the case of a latency tie, prefer an anti-dependency edge over
281 // other types of edges.
282 if (NextDepth < PredTotalLatency ||
283 (NextDepth == PredTotalLatency && Pred.getKind() == SDep::Anti)) {
284 NextDepth = PredTotalLatency;
285 Next = &Pred;
286 }
287 }
288 }
289
290 return (Next) ? Next->getSUnit() : nullptr;
291}
292
293void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
294 const char *tag,
295 const char *header,
296 const char *footer) {
297 std::vector<unsigned> &KillIndices = State->GetKillIndices();
298 std::vector<unsigned> &DefIndices = State->GetDefIndices();
299 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
300 RegRefs = State->GetRegRefs();
301
302 // FIXME: We must leave subregisters of live super registers as live, so that
303 // we don't clear out the register tracking information for subregisters of
304 // super registers we're still tracking (and with which we're unioning
305 // subregister definitions).
306 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
307 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) {
308 LLVM_DEBUG(if (!header && footer) dbgs() << footer)do { } while (false);
309 return;
310 }
311
312 if (!State->IsLive(Reg)) {
313 KillIndices[Reg] = KillIdx;
314 DefIndices[Reg] = ~0u;
315 RegRefs.erase(Reg);
316 State->LeaveGroup(Reg);
317 LLVM_DEBUG(if (header) {do { } while (false)
318 dbgs() << header << printReg(Reg, TRI);do { } while (false)
319 header = nullptr;do { } while (false)
320 })do { } while (false);
321 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag)do { } while (false);
322 // Repeat for subregisters. Note that we only do this if the superregister
323 // was not live because otherwise, regardless whether we have an explicit
324 // use of the subregister, the subregister's contents are needed for the
325 // uses of the superregister.
326 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
327 unsigned SubregReg = *SubRegs;
328 if (!State->IsLive(SubregReg)) {
329 KillIndices[SubregReg] = KillIdx;
330 DefIndices[SubregReg] = ~0u;
331 RegRefs.erase(SubregReg);
332 State->LeaveGroup(SubregReg);
333 LLVM_DEBUG(if (header) {do { } while (false)
334 dbgs() << header << printReg(Reg, TRI);do { } while (false)
335 header = nullptr;do { } while (false)
336 })do { } while (false);
337 LLVM_DEBUG(dbgs() << " " << printReg(SubregReg, TRI) << "->g"do { } while (false)
338 << State->GetGroup(SubregReg) << tag)do { } while (false);
339 }
340 }
341 }
342
343 LLVM_DEBUG(if (!header && footer) dbgs() << footer)do { } while (false);
344}
345
346void AggressiveAntiDepBreaker::PrescanInstruction(
347 MachineInstr &MI, unsigned Count, std::set<unsigned> &PassthruRegs) {
348 std::vector<unsigned> &DefIndices = State->GetDefIndices();
349 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
350 RegRefs = State->GetRegRefs();
351
352 // Handle dead defs by simulating a last-use of the register just
353 // after the def. A dead def can occur because the def is truly
354 // dead, or because only a subregister is live at the def. If we
355 // don't do this the dead def will be incorrectly merged into the
356 // previous def.
357 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
358 MachineOperand &MO = MI.getOperand(i);
359 if (!MO.isReg() || !MO.isDef()) continue;
360 Register Reg = MO.getReg();
361 if (Reg == 0) continue;
362
363 HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n");
364 }
365
366 LLVM_DEBUG(dbgs() << "\tDef Groups:")do { } while (false);
367 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
368 MachineOperand &MO = MI.getOperand(i);
369 if (!MO.isReg() || !MO.isDef()) continue;
370 Register Reg = MO.getReg();
371 if (Reg == 0) continue;
372
373 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"do { } while (false)
374 << State->GetGroup(Reg))do { } while (false);
375
376 // If MI's defs have a special allocation requirement, don't allow
377 // any def registers to be changed. Also assume all registers
378 // defined in a call must not be changed (ABI). Inline assembly may
379 // reference either system calls or the register directly. Skip it until we
380 // can tell user specified registers from compiler-specified.
381 if (MI.isCall() || MI.hasExtraDefRegAllocReq() || TII->isPredicated(MI) ||
382 MI.isInlineAsm()) {
383 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)")do { } while (false);
384 State->UnionGroups(Reg, 0);
385 }
386
387 // Any aliased that are live at this point are completely or
388 // partially defined here, so group those aliases with Reg.
389 for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
390 unsigned AliasReg = *AI;
391 if (State->IsLive(AliasReg)) {
392 State->UnionGroups(Reg, AliasReg);
393 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via "do { } while (false)
394 << printReg(AliasReg, TRI) << ")")do { } while (false);
395 }
396 }
397
398 // Note register reference...
399 const TargetRegisterClass *RC = nullptr;
400 if (i < MI.getDesc().getNumOperands())
401 RC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
402 AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
403 RegRefs.insert(std::make_pair(Reg, RR));
404 }
405
406 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
407
408 // Scan the register defs for this instruction and update
409 // live-ranges.
410 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
411 MachineOperand &MO = MI.getOperand(i);
412 if (!MO.isReg() || !MO.isDef()) continue;
413 Register Reg = MO.getReg();
414 if (Reg == 0) continue;
415 // Ignore KILLs and passthru registers for liveness...
416 if (MI.isKill() || (PassthruRegs.count(Reg) != 0))
417 continue;
418
419 // Update def for Reg and aliases.
420 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
421 // We need to be careful here not to define already-live super registers.
422 // If the super register is already live, then this definition is not
423 // a definition of the whole super register (just a partial insertion
424 // into it). Earlier subregister definitions (which we've not yet visited
425 // because we're iterating bottom-up) need to be linked to the same group
426 // as this definition.
427 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI))
428 continue;
429
430 DefIndices[*AI] = Count;
431 }
432 }
433}
434
435void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
436 unsigned Count) {
437 LLVM_DEBUG(dbgs() << "\tUse Groups:")do { } while (false);
438 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
439 RegRefs = State->GetRegRefs();
440
441 // If MI's uses have special allocation requirement, don't allow
442 // any use registers to be changed. Also assume all registers
443 // used in a call must not be changed (ABI).
444 // Inline Assembly register uses also cannot be safely changed.
445 // FIXME: The issue with predicated instruction is more complex. We are being
446 // conservatively here because the kill markers cannot be trusted after
447 // if-conversion:
448 // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14]
449 // ...
450 // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395]
451 // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12]
452 // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8)
453 //
454 // The first R6 kill is not really a kill since it's killed by a predicated
455 // instruction which may not be executed. The second R6 def may or may not
456 // re-define R6 so it's not safe to change it since the last R6 use cannot be
457 // changed.
458 bool Special = MI.isCall() || MI.hasExtraSrcRegAllocReq() ||
459 TII->isPredicated(MI) || MI.isInlineAsm();
460
461 // Scan the register uses for this instruction and update
462 // live-ranges, groups and RegRefs.
463 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
464 MachineOperand &MO = MI.getOperand(i);
465 if (!MO.isReg() || !MO.isUse()) continue;
466 Register Reg = MO.getReg();
467 if (Reg == 0) continue;
468
469 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"do { } while (false)
470 << State->GetGroup(Reg))do { } while (false);
471
472 // It wasn't previously live but now it is, this is a kill. Forget
473 // the previous live-range information and start a new live-range
474 // for the register.
475 HandleLastUse(Reg, Count, "(last-use)");
476
477 if (Special) {
478 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)")do { } while (false);
479 State->UnionGroups(Reg, 0);
480 }
481
482 // Note register reference...
483 const TargetRegisterClass *RC = nullptr;
484 if (i < MI.getDesc().getNumOperands())
485 RC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
486 AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
487 RegRefs.insert(std::make_pair(Reg, RR));
488 }
489
490 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
491
492 // Form a group of all defs and uses of a KILL instruction to ensure
493 // that all registers are renamed as a group.
494 if (MI.isKill()) {
495 LLVM_DEBUG(dbgs() << "\tKill Group:")do { } while (false);
496
497 unsigned FirstReg = 0;
498 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
499 MachineOperand &MO = MI.getOperand(i);
500 if (!MO.isReg()) continue;
501 Register Reg = MO.getReg();
502 if (Reg == 0) continue;
503
504 if (FirstReg != 0) {
505 LLVM_DEBUG(dbgs() << "=" << printReg(Reg, TRI))do { } while (false);
506 State->UnionGroups(FirstReg, Reg);
507 } else {
508 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI))do { } while (false);
509 FirstReg = Reg;
510 }
511 }
512
513 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n')do { } while (false);
514 }
515}
516
517BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
518 BitVector BV(TRI->getNumRegs(), false);
519 bool first = true;
520
521 // Check all references that need rewriting for Reg. For each, use
522 // the corresponding register class to narrow the set of registers
523 // that are appropriate for renaming.
524 for (const auto &Q : make_range(State->GetRegRefs().equal_range(Reg))) {
525 const TargetRegisterClass *RC = Q.second.RC;
526 if (!RC) continue;
527
528 BitVector RCBV = TRI->getAllocatableSet(MF, RC);
529 if (first) {
530 BV |= RCBV;
531 first = false;
532 } else {
533 BV &= RCBV;
534 }
535
536 LLVM_DEBUG(dbgs() << " " << TRI->getRegClassName(RC))do { } while (false);
537 }
538
539 return BV;
540}
541
542bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
543 unsigned AntiDepGroupIndex,
544 RenameOrderType& RenameOrder,
545 std::map<unsigned, unsigned> &RenameMap) {
546 std::vector<unsigned> &KillIndices = State->GetKillIndices();
547 std::vector<unsigned> &DefIndices = State->GetDefIndices();
548 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
549 RegRefs = State->GetRegRefs();
550
551 // Collect all referenced registers in the same group as
552 // AntiDepReg. These all need to be renamed together if we are to
553 // break the anti-dependence.
554 std::vector<unsigned> Regs;
555 State->GetGroupRegs(AntiDepGroupIndex, Regs, &RegRefs);
556 assert(!Regs.empty() && "Empty register group!")(static_cast<void> (0));
557 if (Regs.empty())
558 return false;
559
560 // Find the "superest" register in the group. At the same time,
561 // collect the BitVector of registers that can be used to rename
562 // each register.
563 LLVM_DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndexdo { } while (false)
564 << ":\n")do { } while (false);
565 std::map<unsigned, BitVector> RenameRegisterMap;
566 unsigned SuperReg = 0;
567 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
568 unsigned Reg = Regs[i];
569 if ((SuperReg == 0) || TRI->isSuperRegister(SuperReg, Reg))
570 SuperReg = Reg;
571
572 // If Reg has any references, then collect possible rename regs
573 if (RegRefs.count(Reg) > 0) {
574 LLVM_DEBUG(dbgs() << "\t\t" << printReg(Reg, TRI) << ":")do { } while (false);
575
576 BitVector &BV = RenameRegisterMap[Reg];
577 assert(BV.empty())(static_cast<void> (0));
578 BV = GetRenameRegisters(Reg);
579
580 LLVM_DEBUG({do { } while (false)
581 dbgs() << " ::";do { } while (false)
582 for (unsigned r : BV.set_bits())do { } while (false)
583 dbgs() << " " << printReg(r, TRI);do { } while (false)
584 dbgs() << "\n";do { } while (false)
585 })do { } while (false);
586 }
587 }
588
589 // All group registers should be a subreg of SuperReg.
590 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
591 unsigned Reg = Regs[i];
592 if (Reg == SuperReg) continue;
593 bool IsSub = TRI->isSubRegister(SuperReg, Reg);
594 // FIXME: remove this once PR18663 has been properly fixed. For now,
595 // return a conservative answer:
596 // assert(IsSub && "Expecting group subregister");
597 if (!IsSub)
598 return false;
599 }
600
601#ifndef NDEBUG1
602 // If DebugDiv > 0 then only rename (renamecnt % DebugDiv) == DebugMod
603 if (DebugDiv > 0) {
604 static int renamecnt = 0;
605 if (renamecnt++ % DebugDiv != DebugMod)
606 return false;
607
608 dbgs() << "*** Performing rename " << printReg(SuperReg, TRI)
609 << " for debug ***\n";
610 }
611#endif
612
613 // Check each possible rename register for SuperReg in round-robin
614 // order. If that register is available, and the corresponding
615 // registers are available for the other group subregisters, then we
616 // can use those registers to rename.
617
618 // FIXME: Using getMinimalPhysRegClass is very conservative. We should
619 // check every use of the register and find the largest register class
620 // that can be used in all of them.
621 const TargetRegisterClass *SuperRC =
622 TRI->getMinimalPhysRegClass(SuperReg, MVT::Other);
623
624 ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(SuperRC);
625 if (Order.empty()) {
626 LLVM_DEBUG(dbgs() << "\tEmpty Super Regclass!!\n")do { } while (false);
627 return false;
628 }
629
630 LLVM_DEBUG(dbgs() << "\tFind Registers:")do { } while (false);
631
632 RenameOrder.insert(RenameOrderType::value_type(SuperRC, Order.size()));
633
634 unsigned OrigR = RenameOrder[SuperRC];
635 unsigned EndR = ((OrigR == Order.size()) ? 0 : OrigR);
636 unsigned R = OrigR;
637 do {
638 if (R == 0) R = Order.size();
639 --R;
640 const unsigned NewSuperReg = Order[R];
641 // Don't consider non-allocatable registers
642 if (!MRI.isAllocatable(NewSuperReg)) continue;
643 // Don't replace a register with itself.
644 if (NewSuperReg == SuperReg) continue;
645
646 LLVM_DEBUG(dbgs() << " [" << printReg(NewSuperReg, TRI) << ':')do { } while (false);
647 RenameMap.clear();
648
649 // For each referenced group register (which must be a SuperReg or
650 // a subregister of SuperReg), find the corresponding subregister
651 // of NewSuperReg and make sure it is free to be renamed.
652 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
653 unsigned Reg = Regs[i];
654 unsigned NewReg = 0;
655 if (Reg == SuperReg) {
656 NewReg = NewSuperReg;
657 } else {
658 unsigned NewSubRegIdx = TRI->getSubRegIndex(SuperReg, Reg);
659 if (NewSubRegIdx != 0)
660 NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx);
661 }
662
663 LLVM_DEBUG(dbgs() << " " << printReg(NewReg, TRI))do { } while (false);
664
665 // Check if Reg can be renamed to NewReg.
666 if (!RenameRegisterMap[Reg].test(NewReg)) {
667 LLVM_DEBUG(dbgs() << "(no rename)")do { } while (false);
668 goto next_super_reg;
669 }
670
671 // If NewReg is dead and NewReg's most recent def is not before
672 // Regs's kill, it's safe to replace Reg with NewReg. We
673 // must also check all aliases of NewReg, because we can't define a
674 // register when any sub or super is already live.
675 if (State->IsLive(NewReg) || (KillIndices[Reg] > DefIndices[NewReg])) {
676 LLVM_DEBUG(dbgs() << "(live)")do { } while (false);
677 goto next_super_reg;
678 } else {
679 bool found = false;
680 for (MCRegAliasIterator AI(NewReg, TRI, false); AI.isValid(); ++AI) {
681 unsigned AliasReg = *AI;
682 if (State->IsLive(AliasReg) ||
683 (KillIndices[Reg] > DefIndices[AliasReg])) {
684 LLVM_DEBUG(dbgs()do { } while (false)
685 << "(alias " << printReg(AliasReg, TRI) << " live)")do { } while (false);
686 found = true;
687 break;
688 }
689 }
690 if (found)
691 goto next_super_reg;
692 }
693
694 // We cannot rename 'Reg' to 'NewReg' if one of the uses of 'Reg' also
695 // defines 'NewReg' via an early-clobber operand.
696 for (const auto &Q : make_range(RegRefs.equal_range(Reg))) {
697 MachineInstr *UseMI = Q.second.Operand->getParent();
698 int Idx = UseMI->findRegisterDefOperandIdx(NewReg, false, true, TRI);
699 if (Idx == -1)
700 continue;
701
702 if (UseMI->getOperand(Idx).isEarlyClobber()) {
703 LLVM_DEBUG(dbgs() << "(ec)")do { } while (false);
704 goto next_super_reg;
705 }
706 }
707
708 // Also, we cannot rename 'Reg' to 'NewReg' if the instruction defining
709 // 'Reg' is an early-clobber define and that instruction also uses
710 // 'NewReg'.
711 for (const auto &Q : make_range(RegRefs.equal_range(Reg))) {
712 if (!Q.second.Operand->isDef() || !Q.second.Operand->isEarlyClobber())
713 continue;
714
715 MachineInstr *DefMI = Q.second.Operand->getParent();
716 if (DefMI->readsRegister(NewReg, TRI)) {
717 LLVM_DEBUG(dbgs() << "(ec)")do { } while (false);
718 goto next_super_reg;
719 }
720 }
721
722 // Record that 'Reg' can be renamed to 'NewReg'.
723 RenameMap.insert(std::pair<unsigned, unsigned>(Reg, NewReg));
724 }
725
726 // If we fall-out here, then every register in the group can be
727 // renamed, as recorded in RenameMap.
728 RenameOrder.erase(SuperRC);
729 RenameOrder.insert(RenameOrderType::value_type(SuperRC, R));
730 LLVM_DEBUG(dbgs() << "]\n")do { } while (false);
731 return true;
732
733 next_super_reg:
734 LLVM_DEBUG(dbgs() << ']')do { } while (false);
735 } while (R != EndR);
736
737 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
738
739 // No registers are free and available!
740 return false;
741}
742
743/// BreakAntiDependencies - Identifiy anti-dependencies within the
744/// ScheduleDAG and break them by renaming registers.
745unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
746 const std::vector<SUnit> &SUnits,
747 MachineBasicBlock::iterator Begin,
748 MachineBasicBlock::iterator End,
749 unsigned InsertPosIndex,
750 DbgValueVector &DbgValues) {
751 std::vector<unsigned> &KillIndices = State->GetKillIndices();
752 std::vector<unsigned> &DefIndices = State->GetDefIndices();
753 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
754 RegRefs = State->GetRegRefs();
755
756 // The code below assumes that there is at least one instruction,
757 // so just duck out immediately if the block is empty.
758 if (SUnits.empty()) return 0;
1
Assuming the condition is false
2
Taking false branch
759
760 // For each regclass the next register to use for renaming.
761 RenameOrderType RenameOrder;
762
763 // ...need a map from MI to SUnit.
764 std::map<MachineInstr *, const SUnit *> MISUnitMap;
765 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
3
Assuming 'i' is equal to 'e'
4
Loop condition is false. Execution continues on line 774
766 const SUnit *SU = &SUnits[i];
767 MISUnitMap.insert(std::pair<MachineInstr *, const SUnit *>(SU->getInstr(),
768 SU));
769 }
770
771 // Track progress along the critical path through the SUnit graph as
772 // we walk the instructions. This is needed for regclasses that only
773 // break critical-path anti-dependencies.
774 const SUnit *CriticalPathSU = nullptr;
5
'CriticalPathSU' initialized to a null pointer value
775 MachineInstr *CriticalPathMI = nullptr;
776 if (CriticalPathSet.any()) {
6
Calling 'BitVector::any'
18
Returning from 'BitVector::any'
19
Taking true branch
777 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
20
Assuming 'i' is equal to 'e'
21
Loop condition is false. Execution continues on line 785
778 const SUnit *SU = &SUnits[i];
779 if (!CriticalPathSU ||
780 ((SU->getDepth() + SU->Latency) >
781 (CriticalPathSU->getDepth() + CriticalPathSU->Latency))) {
782 CriticalPathSU = SU;
783 }
784 }
785 assert(CriticalPathSU && "Failed to find SUnit critical path")(static_cast<void> (0));
786 CriticalPathMI = CriticalPathSU->getInstr();
22
Called C++ object pointer is null
787 }
788
789#ifndef NDEBUG1
790 LLVM_DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n")do { } while (false);
791 LLVM_DEBUG(dbgs() << "Available regs:")do { } while (false);
792 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
793 if (!State->IsLive(Reg))
794 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI))do { } while (false);
795 }
796 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
797#endif
798
799 BitVector RegAliases(TRI->getNumRegs());
800
801 // Attempt to break anti-dependence edges. Walk the instructions
802 // from the bottom up, tracking information about liveness as we go
803 // to help determine which registers are available.
804 unsigned Broken = 0;
805 unsigned Count = InsertPosIndex - 1;
806 for (MachineBasicBlock::iterator I = End, E = Begin;
807 I != E; --Count) {
808 MachineInstr &MI = *--I;
809
810 if (MI.isDebugInstr())
811 continue;
812
813 LLVM_DEBUG(dbgs() << "Anti: ")do { } while (false);
814 LLVM_DEBUG(MI.dump())do { } while (false);
815
816 std::set<unsigned> PassthruRegs;
817 GetPassthruRegs(MI, PassthruRegs);
818
819 // Process the defs in MI...
820 PrescanInstruction(MI, Count, PassthruRegs);
821
822 // The dependence edges that represent anti- and output-
823 // dependencies that are candidates for breaking.
824 std::vector<const SDep *> Edges;
825 const SUnit *PathSU = MISUnitMap[&MI];
826 AntiDepEdges(PathSU, Edges);
827
828 // If MI is not on the critical path, then we don't rename
829 // registers in the CriticalPathSet.
830 BitVector *ExcludeRegs = nullptr;
831 if (&MI == CriticalPathMI) {
832 CriticalPathSU = CriticalPathStep(CriticalPathSU);
833 CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : nullptr;
834 } else if (CriticalPathSet.any()) {
835 ExcludeRegs = &CriticalPathSet;
836 }
837
838 // Ignore KILL instructions (they form a group in ScanInstruction
839 // but don't cause any anti-dependence breaking themselves)
840 if (!MI.isKill()) {
841 // Attempt to break each anti-dependency...
842 for (unsigned i = 0, e = Edges.size(); i != e; ++i) {
843 const SDep *Edge = Edges[i];
844 SUnit *NextSU = Edge->getSUnit();
845
846 if ((Edge->getKind() != SDep::Anti) &&
847 (Edge->getKind() != SDep::Output)) continue;
848
849 unsigned AntiDepReg = Edge->getReg();
850 LLVM_DEBUG(dbgs() << "\tAntidep reg: " << printReg(AntiDepReg, TRI))do { } while (false);
851 assert(AntiDepReg != 0 && "Anti-dependence on reg0?")(static_cast<void> (0));
852
853 if (!MRI.isAllocatable(AntiDepReg)) {
854 // Don't break anti-dependencies on non-allocatable registers.
855 LLVM_DEBUG(dbgs() << " (non-allocatable)\n")do { } while (false);
856 continue;
857 } else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
858 // Don't break anti-dependencies for critical path registers
859 // if not on the critical path
860 LLVM_DEBUG(dbgs() << " (not critical-path)\n")do { } while (false);
861 continue;
862 } else if (PassthruRegs.count(AntiDepReg) != 0) {
863 // If the anti-dep register liveness "passes-thru", then
864 // don't try to change it. It will be changed along with
865 // the use if required to break an earlier antidep.
866 LLVM_DEBUG(dbgs() << " (passthru)\n")do { } while (false);
867 continue;
868 } else {
869 // No anti-dep breaking for implicit deps
870 MachineOperand *AntiDepOp = MI.findRegisterDefOperand(AntiDepReg);
871 assert(AntiDepOp && "Can't find index for defined register operand")(static_cast<void> (0));
872 if (!AntiDepOp || AntiDepOp->isImplicit()) {
873 LLVM_DEBUG(dbgs() << " (implicit)\n")do { } while (false);
874 continue;
875 }
876
877 // If the SUnit has other dependencies on the SUnit that
878 // it anti-depends on, don't bother breaking the
879 // anti-dependency since those edges would prevent such
880 // units from being scheduled past each other
881 // regardless.
882 //
883 // Also, if there are dependencies on other SUnits with the
884 // same register as the anti-dependency, don't attempt to
885 // break it.
886 for (const SDep &Pred : PathSU->Preds) {
887 if (Pred.getSUnit() == NextSU ? (Pred.getKind() != SDep::Anti ||
888 Pred.getReg() != AntiDepReg)
889 : (Pred.getKind() == SDep::Data &&
890 Pred.getReg() == AntiDepReg)) {
891 AntiDepReg = 0;
892 break;
893 }
894 }
895 for (const SDep &Pred : PathSU->Preds) {
896 if ((Pred.getSUnit() == NextSU) && (Pred.getKind() != SDep::Anti) &&
897 (Pred.getKind() != SDep::Output)) {
898 LLVM_DEBUG(dbgs() << " (real dependency)\n")do { } while (false);
899 AntiDepReg = 0;
900 break;
901 } else if ((Pred.getSUnit() != NextSU) &&
902 (Pred.getKind() == SDep::Data) &&
903 (Pred.getReg() == AntiDepReg)) {
904 LLVM_DEBUG(dbgs() << " (other dependency)\n")do { } while (false);
905 AntiDepReg = 0;
906 break;
907 }
908 }
909
910 if (AntiDepReg == 0) continue;
911
912 // If the definition of the anti-dependency register does not start
913 // a new live range, bail out. This can happen if the anti-dep
914 // register is a sub-register of another register whose live range
915 // spans over PathSU. In such case, PathSU defines only a part of
916 // the larger register.
917 RegAliases.reset();
918 for (MCRegAliasIterator AI(AntiDepReg, TRI, true); AI.isValid(); ++AI)
919 RegAliases.set(*AI);
920 for (SDep S : PathSU->Succs) {
921 SDep::Kind K = S.getKind();
922 if (K != SDep::Data && K != SDep::Output && K != SDep::Anti)
923 continue;
924 unsigned R = S.getReg();
925 if (!RegAliases[R])
926 continue;
927 if (R == AntiDepReg || TRI->isSubRegister(AntiDepReg, R))
928 continue;
929 AntiDepReg = 0;
930 break;
931 }
932
933 if (AntiDepReg == 0) continue;
934 }
935
936 assert(AntiDepReg != 0)(static_cast<void> (0));
937 if (AntiDepReg == 0) continue;
938
939 // Determine AntiDepReg's register group.
940 const unsigned GroupIndex = State->GetGroup(AntiDepReg);
941 if (GroupIndex == 0) {
942 LLVM_DEBUG(dbgs() << " (zero group)\n")do { } while (false);
943 continue;
944 }
945
946 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
947
948 // Look for a suitable register to use to break the anti-dependence.
949 std::map<unsigned, unsigned> RenameMap;
950 if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) {
951 LLVM_DEBUG(dbgs() << "\tBreaking anti-dependence edge on "do { } while (false)
952 << printReg(AntiDepReg, TRI) << ":")do { } while (false);
953
954 // Handle each group register...
955 for (const auto &P : RenameMap) {
956 unsigned CurrReg = P.first;
957 unsigned NewReg = P.second;
958
959 LLVM_DEBUG(dbgs() << " " << printReg(CurrReg, TRI) << "->"do { } while (false)
960 << printReg(NewReg, TRI) << "("do { } while (false)
961 << RegRefs.count(CurrReg) << " refs)")do { } while (false);
962
963 // Update the references to the old register CurrReg to
964 // refer to the new register NewReg.
965 for (const auto &Q : make_range(RegRefs.equal_range(CurrReg))) {
966 Q.second.Operand->setReg(NewReg);
967 // If the SU for the instruction being updated has debug
968 // information related to the anti-dependency register, make
969 // sure to update that as well.
970 const SUnit *SU = MISUnitMap[Q.second.Operand->getParent()];
971 if (!SU) continue;
972 UpdateDbgValues(DbgValues, Q.second.Operand->getParent(),
973 AntiDepReg, NewReg);
974 }
975
976 // We just went back in time and modified history; the
977 // liveness information for CurrReg is now inconsistent. Set
978 // the state as if it were dead.
979 State->UnionGroups(NewReg, 0);
980 RegRefs.erase(NewReg);
981 DefIndices[NewReg] = DefIndices[CurrReg];
982 KillIndices[NewReg] = KillIndices[CurrReg];
983
984 State->UnionGroups(CurrReg, 0);
985 RegRefs.erase(CurrReg);
986 DefIndices[CurrReg] = KillIndices[CurrReg];
987 KillIndices[CurrReg] = ~0u;
988 assert(((KillIndices[CurrReg] == ~0u) !=(static_cast<void> (0))
989 (DefIndices[CurrReg] == ~0u)) &&(static_cast<void> (0))
990 "Kill and Def maps aren't consistent for AntiDepReg!")(static_cast<void> (0));
991 }
992
993 ++Broken;
994 LLVM_DEBUG(dbgs() << '\n')do { } while (false);
995 }
996 }
997 }
998
999 ScanInstruction(MI, Count);
1000 }
1001
1002 return Broken;
1003}
1004
1005AntiDepBreaker *llvm::createAggressiveAntiDepBreaker(
1006 MachineFunction &MFi, const RegisterClassInfo &RCI,
1007 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) {
1008 return new AggressiveAntiDepBreaker(MFi, RCI, CriticalPathRCs);
1009}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h

1//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the BitVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_BITVECTOR_H
14#define LLVM_ADT_BITVECTOR_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/Support/MathExtras.h"
20#include <algorithm>
21#include <cassert>
22#include <climits>
23#include <cstdint>
24#include <cstdlib>
25#include <cstring>
26#include <utility>
27
28namespace llvm {
29
30/// ForwardIterator for the bits that are set.
31/// Iterators get invalidated when resize / reserve is called.
32template <typename BitVectorT> class const_set_bits_iterator_impl {
33 const BitVectorT &Parent;
34 int Current = 0;
35
36 void advance() {
37 assert(Current != -1 && "Trying to advance past end.")(static_cast<void> (0));
38 Current = Parent.find_next(Current);
39 }
40
41public:
42 const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
43 : Parent(Parent), Current(Current) {}
44 explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
45 : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
46 const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
47
48 const_set_bits_iterator_impl operator++(int) {
49 auto Prev = *this;
50 advance();
51 return Prev;
52 }
53
54 const_set_bits_iterator_impl &operator++() {
55 advance();
56 return *this;
57 }
58
59 unsigned operator*() const { return Current; }
60
61 bool operator==(const const_set_bits_iterator_impl &Other) const {
62 assert(&Parent == &Other.Parent &&(static_cast<void> (0))
63 "Comparing iterators from different BitVectors")(static_cast<void> (0));
64 return Current == Other.Current;
65 }
66
67 bool operator!=(const const_set_bits_iterator_impl &Other) const {
68 assert(&Parent == &Other.Parent &&(static_cast<void> (0))
69 "Comparing iterators from different BitVectors")(static_cast<void> (0));
70 return Current != Other.Current;
71 }
72};
73
74class BitVector {
75 typedef uintptr_t BitWord;
76
77 enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT8 };
78
79 static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
80 "Unsupported word size");
81
82 using Storage = SmallVector<BitWord>;
83
84 Storage Bits; // Actual bits.
85 unsigned Size; // Size of bitvector in bits.
86
87public:
88 using size_type = unsigned;
89
90 // Encapsulation of a single bit.
91 class reference {
92
93 BitWord *WordRef;
94 unsigned BitPos;
95
96 public:
97 reference(BitVector &b, unsigned Idx) {
98 WordRef = &b.Bits[Idx / BITWORD_SIZE];
99 BitPos = Idx % BITWORD_SIZE;
100 }
101
102 reference() = delete;
103 reference(const reference&) = default;
104
105 reference &operator=(reference t) {
106 *this = bool(t);
107 return *this;
108 }
109
110 reference& operator=(bool t) {
111 if (t)
112 *WordRef |= BitWord(1) << BitPos;
113 else
114 *WordRef &= ~(BitWord(1) << BitPos);
115 return *this;
116 }
117
118 operator bool() const {
119 return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
120 }
121 };
122
123 typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
124 typedef const_set_bits_iterator set_iterator;
125
126 const_set_bits_iterator set_bits_begin() const {
127 return const_set_bits_iterator(*this);
128 }
129 const_set_bits_iterator set_bits_end() const {
130 return const_set_bits_iterator(*this, -1);
131 }
132 iterator_range<const_set_bits_iterator> set_bits() const {
133 return make_range(set_bits_begin(), set_bits_end());
134 }
135
136 /// BitVector default ctor - Creates an empty bitvector.
137 BitVector() : Size(0) {}
138
139 /// BitVector ctor - Creates a bitvector of specified number of bits. All
140 /// bits are initialized to the specified value.
141 explicit BitVector(unsigned s, bool t = false)
142 : Bits(NumBitWords(s), 0 - (BitWord)t), Size(s) {
143 if (t)
144 clear_unused_bits();
145 }
146
147 /// empty - Tests whether there are no bits in this bitvector.
148 bool empty() const { return Size == 0; }
149
150 /// size - Returns the number of bits in this bitvector.
151 size_type size() const { return Size; }
152
153 /// count - Returns the number of bits which are set.
154 size_type count() const {
155 unsigned NumBits = 0;
156 for (auto Bit : Bits)
157 NumBits += countPopulation(Bit);
158 return NumBits;
159 }
160
161 /// any - Returns true if any bit is set.
162 bool any() const {
163 return any_of(Bits, [](BitWord Bit) { return Bit != 0; });
7
Calling 'any_of<const llvm::SmallVector<unsigned long, 6> &, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
16
Returning from 'any_of<const llvm::SmallVector<unsigned long, 6> &, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
17
Returning the value 1, which participates in a condition later
164 }
165
166 /// all - Returns true if all bits are set.
167 bool all() const {
168 for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
169 if (Bits[i] != ~BitWord(0))
170 return false;
171
172 // If bits remain check that they are ones. The unused bits are always zero.
173 if (unsigned Remainder = Size % BITWORD_SIZE)
174 return Bits[Size / BITWORD_SIZE] == (BitWord(1) << Remainder) - 1;
175
176 return true;
177 }
178
179 /// none - Returns true if none of the bits are set.
180 bool none() const {
181 return !any();
182 }
183
184 /// find_first_in - Returns the index of the first set / unset bit,
185 /// depending on \p Set, in the range [Begin, End).
186 /// Returns -1 if all bits in the range are unset / set.
187 int find_first_in(unsigned Begin, unsigned End, bool Set = true) const {
188 assert(Begin <= End && End <= Size)(static_cast<void> (0));
189 if (Begin == End)
190 return -1;
191
192 unsigned FirstWord = Begin / BITWORD_SIZE;
193 unsigned LastWord = (End - 1) / BITWORD_SIZE;
194
195 // Check subsequent words.
196 // The code below is based on search for the first _set_ bit. If
197 // we're searching for the first _unset_, we just take the
198 // complement of each word before we use it and apply
199 // the same method.
200 for (unsigned i = FirstWord; i <= LastWord; ++i) {
201 BitWord Copy = Bits[i];
202 if (!Set)
203 Copy = ~Copy;
204
205 if (i == FirstWord) {
206 unsigned FirstBit = Begin % BITWORD_SIZE;
207 Copy &= maskTrailingZeros<BitWord>(FirstBit);
208 }
209
210 if (i == LastWord) {
211 unsigned LastBit = (End - 1) % BITWORD_SIZE;
212 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
213 }
214 if (Copy != 0)
215 return i * BITWORD_SIZE + countTrailingZeros(Copy);
216 }
217 return -1;
218 }
219
220 /// find_last_in - Returns the index of the last set bit in the range
221 /// [Begin, End). Returns -1 if all bits in the range are unset.
222 int find_last_in(unsigned Begin, unsigned End) const {
223 assert(Begin <= End && End <= Size)(static_cast<void> (0));
224 if (Begin == End)
225 return -1;
226
227 unsigned LastWord = (End - 1) / BITWORD_SIZE;
228 unsigned FirstWord = Begin / BITWORD_SIZE;
229
230 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
231 unsigned CurrentWord = i - 1;
232
233 BitWord Copy = Bits[CurrentWord];
234 if (CurrentWord == LastWord) {
235 unsigned LastBit = (End - 1) % BITWORD_SIZE;
236 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
237 }
238
239 if (CurrentWord == FirstWord) {
240 unsigned FirstBit = Begin % BITWORD_SIZE;
241 Copy &= maskTrailingZeros<BitWord>(FirstBit);
242 }
243
244 if (Copy != 0)
245 return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
246 }
247
248 return -1;
249 }
250
251 /// find_first_unset_in - Returns the index of the first unset bit in the
252 /// range [Begin, End). Returns -1 if all bits in the range are set.
253 int find_first_unset_in(unsigned Begin, unsigned End) const {
254 return find_first_in(Begin, End, /* Set = */ false);
255 }
256
257 /// find_last_unset_in - Returns the index of the last unset bit in the
258 /// range [Begin, End). Returns -1 if all bits in the range are set.
259 int find_last_unset_in(unsigned Begin, unsigned End) const {
260 assert(Begin <= End && End <= Size)(static_cast<void> (0));
261 if (Begin == End)
262 return -1;
263
264 unsigned LastWord = (End - 1) / BITWORD_SIZE;
265 unsigned FirstWord = Begin / BITWORD_SIZE;
266
267 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
268 unsigned CurrentWord = i - 1;
269
270 BitWord Copy = Bits[CurrentWord];
271 if (CurrentWord == LastWord) {
272 unsigned LastBit = (End - 1) % BITWORD_SIZE;
273 Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
274 }
275
276 if (CurrentWord == FirstWord) {
277 unsigned FirstBit = Begin % BITWORD_SIZE;
278 Copy |= maskTrailingOnes<BitWord>(FirstBit);
279 }
280
281 if (Copy != ~BitWord(0)) {
282 unsigned Result =
283 (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
284 return Result < Size ? Result : -1;
285 }
286 }
287 return -1;
288 }
289
290 /// find_first - Returns the index of the first set bit, -1 if none
291 /// of the bits are set.
292 int find_first() const { return find_first_in(0, Size); }
293
294 /// find_last - Returns the index of the last set bit, -1 if none of the bits
295 /// are set.
296 int find_last() const { return find_last_in(0, Size); }
297
298 /// find_next - Returns the index of the next set bit following the
299 /// "Prev" bit. Returns -1 if the next set bit is not found.
300 int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
301
302 /// find_prev - Returns the index of the first set bit that precedes the
303 /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
304 int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
305
306 /// find_first_unset - Returns the index of the first unset bit, -1 if all
307 /// of the bits are set.
308 int find_first_unset() const { return find_first_unset_in(0, Size); }
309
310 /// find_next_unset - Returns the index of the next unset bit following the
311 /// "Prev" bit. Returns -1 if all remaining bits are set.
312 int find_next_unset(unsigned Prev) const {
313 return find_first_unset_in(Prev + 1, Size);
314 }
315
316 /// find_last_unset - Returns the index of the last unset bit, -1 if all of
317 /// the bits are set.
318 int find_last_unset() const { return find_last_unset_in(0, Size); }
319
320 /// find_prev_unset - Returns the index of the first unset bit that precedes
321 /// the bit at \p PriorTo. Returns -1 if all previous bits are set.
322 int find_prev_unset(unsigned PriorTo) {
323 return find_last_unset_in(0, PriorTo);
324 }
325
326 /// clear - Removes all bits from the bitvector.
327 void clear() {
328 Size = 0;
329 Bits.clear();
330 }
331
332 /// resize - Grow or shrink the bitvector.
333 void resize(unsigned N, bool t = false) {
334 set_unused_bits(t);
335 Size = N;
336 Bits.resize(NumBitWords(N), 0 - BitWord(t));
337 clear_unused_bits();
338 }
339
340 void reserve(unsigned N) { Bits.reserve(NumBitWords(N)); }
341
342 // Set, reset, flip
343 BitVector &set() {
344 init_words(true);
345 clear_unused_bits();
346 return *this;
347 }
348
349 BitVector &set(unsigned Idx) {
350 assert(Idx < Size && "access in bound")(static_cast<void> (0));
351 Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
352 return *this;
353 }
354
355 /// set - Efficiently set a range of bits in [I, E)
356 BitVector &set(unsigned I, unsigned E) {
357 assert(I <= E && "Attempted to set backwards range!")(static_cast<void> (0));
358 assert(E <= size() && "Attempted to set out-of-bounds range!")(static_cast<void> (0));
359
360 if (I == E) return *this;
361
362 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
363 BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
364 BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
365 BitWord Mask = EMask - IMask;
366 Bits[I / BITWORD_SIZE] |= Mask;
367 return *this;
368 }
369
370 BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
371 Bits[I / BITWORD_SIZE] |= PrefixMask;
372 I = alignTo(I, BITWORD_SIZE);
373
374 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
375 Bits[I / BITWORD_SIZE] = ~BitWord(0);
376
377 BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
378 if (I < E)
379 Bits[I / BITWORD_SIZE] |= PostfixMask;
380
381 return *this;
382 }
383
384 BitVector &reset() {
385 init_words(false);
386 return *this;
387 }
388
389 BitVector &reset(unsigned Idx) {
390 Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
391 return *this;
392 }
393
394 /// reset - Efficiently reset a range of bits in [I, E)
395 BitVector &reset(unsigned I, unsigned E) {
396 assert(I <= E && "Attempted to reset backwards range!")(static_cast<void> (0));
397 assert(E <= size() && "Attempted to reset out-of-bounds range!")(static_cast<void> (0));
398
399 if (I == E) return *this;
400
401 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
402 BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
403 BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
404 BitWord Mask = EMask - IMask;
405 Bits[I / BITWORD_SIZE] &= ~Mask;
406 return *this;
407 }
408
409 BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
410 Bits[I / BITWORD_SIZE] &= ~PrefixMask;
411 I = alignTo(I, BITWORD_SIZE);
412
413 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
414 Bits[I / BITWORD_SIZE] = BitWord(0);
415
416 BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
417 if (I < E)
418 Bits[I / BITWORD_SIZE] &= ~PostfixMask;
419
420 return *this;
421 }
422
423 BitVector &flip() {
424 for (auto &Bit : Bits)
425 Bit = ~Bit;
426 clear_unused_bits();
427 return *this;
428 }
429
430 BitVector &flip(unsigned Idx) {
431 Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
432 return *this;
433 }
434
435 // Indexing.
436 reference operator[](unsigned Idx) {
437 assert (Idx < Size && "Out-of-bounds Bit access.")(static_cast<void> (0));
438 return reference(*this, Idx);
439 }
440
441 bool operator[](unsigned Idx) const {
442 assert (Idx < Size && "Out-of-bounds Bit access.")(static_cast<void> (0));
443 BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
444 return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
445 }
446
447 bool test(unsigned Idx) const {
448 return (*this)[Idx];
449 }
450
451 // Push single bit to end of vector.
452 void push_back(bool Val) {
453 unsigned OldSize = Size;
454 unsigned NewSize = Size + 1;
455
456 // Resize, which will insert zeros.
457 // If we already fit then the unused bits will be already zero.
458 if (NewSize > getBitCapacity())
459 resize(NewSize, false);
460 else
461 Size = NewSize;
462
463 // If true, set single bit.
464 if (Val)
465 set(OldSize);
466 }
467
468 /// Test if any common bits are set.
469 bool anyCommon(const BitVector &RHS) const {
470 unsigned ThisWords = Bits.size();
471 unsigned RHSWords = RHS.Bits.size();
472 for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
473 if (Bits[i] & RHS.Bits[i])
474 return true;
475 return false;
476 }
477
478 // Comparison operators.
479 bool operator==(const BitVector &RHS) const {
480 if (size() != RHS.size())
481 return false;
482 unsigned NumWords = Bits.size();
483 return std::equal(Bits.begin(), Bits.begin() + NumWords, RHS.Bits.begin());
484 }
485
486 bool operator!=(const BitVector &RHS) const { return !(*this == RHS); }
487
488 /// Intersection, union, disjoint union.
489 BitVector &operator&=(const BitVector &RHS) {
490 unsigned ThisWords = Bits.size();
491 unsigned RHSWords = RHS.Bits.size();
492 unsigned i;
493 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
494 Bits[i] &= RHS.Bits[i];
495
496 // Any bits that are just in this bitvector become zero, because they aren't
497 // in the RHS bit vector. Any words only in RHS are ignored because they
498 // are already zero in the LHS.
499 for (; i != ThisWords; ++i)
500 Bits[i] = 0;
501
502 return *this;
503 }
504
505 /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
506 BitVector &reset(const BitVector &RHS) {
507 unsigned ThisWords = Bits.size();
508 unsigned RHSWords = RHS.Bits.size();
509 for (unsigned i = 0; i != std::min(ThisWords, RHSWords); ++i)
510 Bits[i] &= ~RHS.Bits[i];
511 return *this;
512 }
513
514 /// test - Check if (This - RHS) is zero.
515 /// This is the same as reset(RHS) and any().
516 bool test(const BitVector &RHS) const {
517 unsigned ThisWords = Bits.size();
518 unsigned RHSWords = RHS.Bits.size();
519 unsigned i;
520 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
521 if ((Bits[i] & ~RHS.Bits[i]) != 0)
522 return true;
523
524 for (; i != ThisWords ; ++i)
525 if (Bits[i] != 0)
526 return true;
527
528 return false;
529 }
530
531 template <class F, class... ArgTys>
532 static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
533 ArgTys const &...Args) {
534 assert(llvm::all_of((static_cast<void> (0))
535 std::initializer_list<unsigned>{Args.size()...},(static_cast<void> (0))
536 [&Arg](auto const &BV) { return Arg.size() == BV; }) &&(static_cast<void> (0))
537 "consistent sizes")(static_cast<void> (0));
538 Out.resize(Arg.size());
539 for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I)
540 Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...);
541 Out.clear_unused_bits();
542 return Out;
543 }
544
545 BitVector &operator|=(const BitVector &RHS) {
546 if (size() < RHS.size())
547 resize(RHS.size());
548 for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
549 Bits[I] |= RHS.Bits[I];
550 return *this;
551 }
552
553 BitVector &operator^=(const BitVector &RHS) {
554 if (size() < RHS.size())
555 resize(RHS.size());
556 for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
557 Bits[I] ^= RHS.Bits[I];
558 return *this;
559 }
560
561 BitVector &operator>>=(unsigned N) {
562 assert(N <= Size)(static_cast<void> (0));
563 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
564 return *this;
565
566 unsigned NumWords = Bits.size();
567 assert(NumWords >= 1)(static_cast<void> (0));
568
569 wordShr(N / BITWORD_SIZE);
570
571 unsigned BitDistance = N % BITWORD_SIZE;
572 if (BitDistance == 0)
573 return *this;
574
575 // When the shift size is not a multiple of the word size, then we have
576 // a tricky situation where each word in succession needs to extract some
577 // of the bits from the next word and or them into this word while
578 // shifting this word to make room for the new bits. This has to be done
579 // for every word in the array.
580
581 // Since we're shifting each word right, some bits will fall off the end
582 // of each word to the right, and empty space will be created on the left.
583 // The final word in the array will lose bits permanently, so starting at
584 // the beginning, work forwards shifting each word to the right, and
585 // OR'ing in the bits from the end of the next word to the beginning of
586 // the current word.
587
588 // Example:
589 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
590 // by 4 bits.
591 // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD
592 // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD
593 // Step 3: Word[1] >>= 4 ; 0x0EEFF001
594 // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001
595 // Step 5: Word[2] >>= 4 ; 0x02334455
596 // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
597 const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
598 const unsigned LSH = BITWORD_SIZE - BitDistance;
599
600 for (unsigned I = 0; I < NumWords - 1; ++I) {
601 Bits[I] >>= BitDistance;
602 Bits[I] |= (Bits[I + 1] & Mask) << LSH;
603 }
604
605 Bits[NumWords - 1] >>= BitDistance;
606
607 return *this;
608 }
609
610 BitVector &operator<<=(unsigned N) {
611 assert(N <= Size)(static_cast<void> (0));
612 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
613 return *this;
614
615 unsigned NumWords = Bits.size();
616 assert(NumWords >= 1)(static_cast<void> (0));
617
618 wordShl(N / BITWORD_SIZE);
619
620 unsigned BitDistance = N % BITWORD_SIZE;
621 if (BitDistance == 0)
622 return *this;
623
624 // When the shift size is not a multiple of the word size, then we have
625 // a tricky situation where each word in succession needs to extract some
626 // of the bits from the previous word and or them into this word while
627 // shifting this word to make room for the new bits. This has to be done
628 // for every word in the array. This is similar to the algorithm outlined
629 // in operator>>=, but backwards.
630
631 // Since we're shifting each word left, some bits will fall off the end
632 // of each word to the left, and empty space will be created on the right.
633 // The first word in the array will lose bits permanently, so starting at
634 // the end, work backwards shifting each word to the left, and OR'ing
635 // in the bits from the end of the next word to the beginning of the
636 // current word.
637
638 // Example:
639 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
640 // by 4 bits.
641 // Step 1: Word[2] <<= 4 ; 0x23344550
642 // Step 2: Word[2] |= 0x0000000E ; 0x2334455E
643 // Step 3: Word[1] <<= 4 ; 0xEFF00110
644 // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A
645 // Step 5: Word[0] <<= 4 ; 0xABBCCDD0
646 // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
647 const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
648 const unsigned RSH = BITWORD_SIZE - BitDistance;
649
650 for (int I = NumWords - 1; I > 0; --I) {
651 Bits[I] <<= BitDistance;
652 Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
653 }
654 Bits[0] <<= BitDistance;
655 clear_unused_bits();
656
657 return *this;
658 }
659
660 void swap(BitVector &RHS) {
661 std::swap(Bits, RHS.Bits);
662 std::swap(Size, RHS.Size);
663 }
664
665 void invalid() {
666 assert(!Size && Bits.empty())(static_cast<void> (0));
667 Size = (unsigned)-1;
668 }
669 bool isInvalid() const { return Size == (unsigned)-1; }
670
671 ArrayRef<BitWord> getData() const { return {&Bits[0], Bits.size()}; }
672
673 //===--------------------------------------------------------------------===//
674 // Portable bit mask operations.
675 //===--------------------------------------------------------------------===//
676 //
677 // These methods all operate on arrays of uint32_t, each holding 32 bits. The
678 // fixed word size makes it easier to work with literal bit vector constants
679 // in portable code.
680 //
681 // The LSB in each word is the lowest numbered bit. The size of a portable
682 // bit mask is always a whole multiple of 32 bits. If no bit mask size is
683 // given, the bit mask is assumed to cover the entire BitVector.
684
685 /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
686 /// This computes "*this |= Mask".
687 void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
688 applyMask<true, false>(Mask, MaskWords);
689 }
690
691 /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
692 /// Don't resize. This computes "*this &= ~Mask".
693 void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
694 applyMask<false, false>(Mask, MaskWords);
695 }
696
697 /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
698 /// Don't resize. This computes "*this |= ~Mask".
699 void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
700 applyMask<true, true>(Mask, MaskWords);
701 }
702
703 /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
704 /// Don't resize. This computes "*this &= Mask".
705 void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
706 applyMask<false, true>(Mask, MaskWords);
707 }
708
709private:
710 /// Perform a logical left shift of \p Count words by moving everything
711 /// \p Count words to the right in memory.
712 ///
713 /// While confusing, words are stored from least significant at Bits[0] to
714 /// most significant at Bits[NumWords-1]. A logical shift left, however,
715 /// moves the current least significant bit to a higher logical index, and
716 /// fills the previous least significant bits with 0. Thus, we actually
717 /// need to move the bytes of the memory to the right, not to the left.
718 /// Example:
719 /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
720 /// represents a BitVector where 0xBBBBAAAA contain the least significant
721 /// bits. So if we want to shift the BitVector left by 2 words, we need
722 /// to turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
723 /// memmove which moves right, not left.
724 void wordShl(uint32_t Count) {
725 if (Count == 0)
726 return;
727
728 uint32_t NumWords = Bits.size();
729
730 // Since we always move Word-sized chunks of data with src and dest both
731 // aligned to a word-boundary, we don't need to worry about endianness
732 // here.
733 std::copy(Bits.begin(), Bits.begin() + NumWords - Count,
734 Bits.begin() + Count);
735 std::fill(Bits.begin(), Bits.begin() + Count, 0);
736 clear_unused_bits();
737 }
738
739 /// Perform a logical right shift of \p Count words by moving those
740 /// words to the left in memory. See wordShl for more information.
741 ///
742 void wordShr(uint32_t Count) {
743 if (Count == 0)
744 return;
745
746 uint32_t NumWords = Bits.size();
747
748 std::copy(Bits.begin() + Count, Bits.begin() + NumWords, Bits.begin());
749 std::fill(Bits.begin() + NumWords - Count, Bits.begin() + NumWords, 0);
750 }
751
752 int next_unset_in_word(int WordIndex, BitWord Word) const {
753 unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
754 return Result < size() ? Result : -1;
755 }
756
757 unsigned NumBitWords(unsigned S) const {
758 return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
759 }
760
761 // Set the unused bits in the high words.
762 void set_unused_bits(bool t = true) {
763 // Then set any stray high bits of the last used word.
764 if (unsigned ExtraBits = Size % BITWORD_SIZE) {
765 BitWord ExtraBitMask = ~BitWord(0) << ExtraBits;
766 if (t)
767 Bits.back() |= ExtraBitMask;
768 else
769 Bits.back() &= ~ExtraBitMask;
770 }
771 }
772
773 // Clear the unused bits in the high words.
774 void clear_unused_bits() {
775 set_unused_bits(false);
776 }
777
778 void init_words(bool t) {
779 std::fill(Bits.begin(), Bits.end(), 0 - (BitWord)t);
780 }
781
782 template<bool AddBits, bool InvertMask>
783 void applyMask(const uint32_t *Mask, unsigned MaskWords) {
784 static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
785 MaskWords = std::min(MaskWords, (size() + 31) / 32);
786 const unsigned Scale = BITWORD_SIZE / 32;
787 unsigned i;
788 for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
789 BitWord BW = Bits[i];
790 // This inner loop should unroll completely when BITWORD_SIZE > 32.
791 for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
792 uint32_t M = *Mask++;
793 if (InvertMask) M = ~M;
794 if (AddBits) BW |= BitWord(M) << b;
795 else BW &= ~(BitWord(M) << b);
796 }
797 Bits[i] = BW;
798 }
799 for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
800 uint32_t M = *Mask++;
801 if (InvertMask) M = ~M;
802 if (AddBits) Bits[i] |= BitWord(M) << b;
803 else Bits[i] &= ~(BitWord(M) << b);
804 }
805 if (AddBits)
806 clear_unused_bits();
807 }
808
809public:
810 /// Return the size (in bytes) of the bit vector.
811 size_type getMemorySize() const { return Bits.size() * sizeof(BitWord); }
812 size_type getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
813};
814
815inline BitVector::size_type capacity_in_bytes(const BitVector &X) {
816 return X.getMemorySize();
817}
818
819template <> struct DenseMapInfo<BitVector> {
820 static inline BitVector getEmptyKey() { return {}; }
821 static inline BitVector getTombstoneKey() {
822 BitVector V;
823 V.invalid();
824 return V;
825 }
826 static unsigned getHashValue(const BitVector &V) {
827 return DenseMapInfo<std::pair<BitVector::size_type, ArrayRef<uintptr_t>>>::
828 getHashValue(std::make_pair(V.size(), V.getData()));
829 }
830 static bool isEqual(const BitVector &LHS, const BitVector &RHS) {
831 if (LHS.isInvalid() || RHS.isInvalid())
832 return LHS.isInvalid() == RHS.isInvalid();
833 return LHS == RHS;
834 }
835};
836} // end namespace llvm
837
838namespace std {
839 /// Implement std::swap in terms of BitVector swap.
840inline void swap(llvm::BitVector &LHS, llvm::BitVector &RHS) { LHS.swap(RHS); }
841} // end namespace std
842
843#endif // LLVM_ADT_BITVECTOR_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/ADT/iterator.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Config/abi-breaking.h"
24#include "llvm/Support/ErrorHandling.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstdint>
29#include <cstdlib>
30#include <functional>
31#include <initializer_list>
32#include <iterator>
33#include <limits>
34#include <memory>
35#include <tuple>
36#include <type_traits>
37#include <utility>
38
39#ifdef EXPENSIVE_CHECKS
40#include <random> // for std::mt19937
41#endif
42
43namespace llvm {
44
45// Only used by compiler if both template types are the same. Useful when
46// using SFINAE to test for the existence of member functions.
47template <typename T, T> struct SameType;
48
49namespace detail {
50
51template <typename RangeT>
52using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
53
54template <typename RangeT>
55using ValueOfRange = typename std::remove_reference<decltype(
56 *std::begin(std::declval<RangeT &>()))>::type;
57
58} // end namespace detail
59
60//===----------------------------------------------------------------------===//
61// Extra additions to <type_traits>
62//===----------------------------------------------------------------------===//
63
64template <typename T> struct make_const_ptr {
65 using type =
66 typename std::add_pointer<typename std::add_const<T>::type>::type;
67};
68
69template <typename T> struct make_const_ref {
70 using type = typename std::add_lvalue_reference<
71 typename std::add_const<T>::type>::type;
72};
73
74namespace detail {
75template <typename...> using void_t = void;
76template <class, template <class...> class Op, class... Args> struct detector {
77 using value_t = std::false_type;
78};
79template <template <class...> class Op, class... Args>
80struct detector<void_t<Op<Args...>>, Op, Args...> {
81 using value_t = std::true_type;
82};
83} // end namespace detail
84
85/// Detects if a given trait holds for some set of arguments 'Args'.
86/// For example, the given trait could be used to detect if a given type
87/// has a copy assignment operator:
88/// template<class T>
89/// using has_copy_assign_t = decltype(std::declval<T&>()
90/// = std::declval<const T&>());
91/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
92template <template <class...> class Op, class... Args>
93using is_detected = typename detail::detector<void, Op, Args...>::value_t;
94
95namespace detail {
96template <typename Callable, typename... Args>
97using is_invocable =
98 decltype(std::declval<Callable &>()(std::declval<Args>()...));
99} // namespace detail
100
101/// Check if a Callable type can be invoked with the given set of arg types.
102template <typename Callable, typename... Args>
103using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
104
105/// This class provides various trait information about a callable object.
106/// * To access the number of arguments: Traits::num_args
107/// * To access the type of an argument: Traits::arg_t<Index>
108/// * To access the type of the result: Traits::result_t
109template <typename T, bool isClass = std::is_class<T>::value>
110struct function_traits : public function_traits<decltype(&T::operator())> {};
111
112/// Overload for class function types.
113template <typename ClassType, typename ReturnType, typename... Args>
114struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
115 /// The number of arguments to this function.
116 enum { num_args = sizeof...(Args) };
117
118 /// The result type of this function.
119 using result_t = ReturnType;
120
121 /// The type of an argument to this function.
122 template <size_t Index>
123 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
124};
125/// Overload for class function types.
126template <typename ClassType, typename ReturnType, typename... Args>
127struct function_traits<ReturnType (ClassType::*)(Args...), false>
128 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
129/// Overload for non-class function types.
130template <typename ReturnType, typename... Args>
131struct function_traits<ReturnType (*)(Args...), false> {
132 /// The number of arguments to this function.
133 enum { num_args = sizeof...(Args) };
134
135 /// The result type of this function.
136 using result_t = ReturnType;
137
138 /// The type of an argument to this function.
139 template <size_t i>
140 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
141};
142/// Overload for non-class function type references.
143template <typename ReturnType, typename... Args>
144struct function_traits<ReturnType (&)(Args...), false>
145 : public function_traits<ReturnType (*)(Args...)> {};
146
147//===----------------------------------------------------------------------===//
148// Extra additions to <functional>
149//===----------------------------------------------------------------------===//
150
151template <class Ty> struct identity {
152 using argument_type = Ty;
153
154 Ty &operator()(Ty &self) const {
155 return self;
156 }
157 const Ty &operator()(const Ty &self) const {
158 return self;
159 }
160};
161
162/// An efficient, type-erasing, non-owning reference to a callable. This is
163/// intended for use as the type of a function parameter that is not used
164/// after the function in question returns.
165///
166/// This class does not own the callable, so it is not in general safe to store
167/// a function_ref.
168template<typename Fn> class function_ref;
169
170template<typename Ret, typename ...Params>
171class function_ref<Ret(Params...)> {
172 Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
173 intptr_t callable;
174
175 template<typename Callable>
176 static Ret callback_fn(intptr_t callable, Params ...params) {
177 return (*reinterpret_cast<Callable*>(callable))(
178 std::forward<Params>(params)...);
179 }
180
181public:
182 function_ref() = default;
183 function_ref(std::nullptr_t) {}
184
185 template <typename Callable>
186 function_ref(
187 Callable &&callable,
188 // This is not the copy-constructor.
189 std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
190 function_ref>::value> * = nullptr,
191 // Functor must be callable and return a suitable type.
192 std::enable_if_t<std::is_void<Ret>::value ||
193 std::is_convertible<decltype(std::declval<Callable>()(
194 std::declval<Params>()...)),
195 Ret>::value> * = nullptr)
196 : callback(callback_fn<typename std::remove_reference<Callable>::type>),
197 callable(reinterpret_cast<intptr_t>(&callable)) {}
198
199 Ret operator()(Params ...params) const {
200 return callback(callable, std::forward<Params>(params)...);
201 }
202
203 explicit operator bool() const { return callback; }
204};
205
206//===----------------------------------------------------------------------===//
207// Extra additions to <iterator>
208//===----------------------------------------------------------------------===//
209
210namespace adl_detail {
211
212using std::begin;
213
214template <typename ContainerTy>
215decltype(auto) adl_begin(ContainerTy &&container) {
216 return begin(std::forward<ContainerTy>(container));
217}
218
219using std::end;
220
221template <typename ContainerTy>
222decltype(auto) adl_end(ContainerTy &&container) {
223 return end(std::forward<ContainerTy>(container));
224}
225
226using std::swap;
227
228template <typename T>
229void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
230 std::declval<T>()))) {
231 swap(std::forward<T>(lhs), std::forward<T>(rhs));
232}
233
234} // end namespace adl_detail
235
236template <typename ContainerTy>
237decltype(auto) adl_begin(ContainerTy &&container) {
238 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
239}
240
241template <typename ContainerTy>
242decltype(auto) adl_end(ContainerTy &&container) {
243 return adl_detail::adl_end(std::forward<ContainerTy>(container));
244}
245
246template <typename T>
247void adl_swap(T &&lhs, T &&rhs) noexcept(
248 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
249 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
250}
251
252/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
253template <typename T>
254constexpr bool empty(const T &RangeOrContainer) {
255 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
256}
257
258/// Returns true if the given container only contains a single element.
259template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
260 auto B = std::begin(C), E = std::end(C);
261 return B != E && std::next(B) == E;
262}
263
264/// Return a range covering \p RangeOrContainer with the first N elements
265/// excluded.
266template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
267 return make_range(std::next(adl_begin(RangeOrContainer), N),
268 adl_end(RangeOrContainer));
269}
270
271// mapped_iterator - This is a simple iterator adapter that causes a function to
272// be applied whenever operator* is invoked on the iterator.
273
274template <typename ItTy, typename FuncTy,
275 typename FuncReturnTy =
276 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
277class mapped_iterator
278 : public iterator_adaptor_base<
279 mapped_iterator<ItTy, FuncTy>, ItTy,
280 typename std::iterator_traits<ItTy>::iterator_category,
281 typename std::remove_reference<FuncReturnTy>::type> {
282public:
283 mapped_iterator(ItTy U, FuncTy F)
284 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
285
286 ItTy getCurrent() { return this->I; }
287
288 FuncReturnTy operator*() const { return F(*this->I); }
289
290private:
291 FuncTy F;
292};
293
294// map_iterator - Provide a convenient way to create mapped_iterators, just like
295// make_pair is useful for creating pairs...
296template <class ItTy, class FuncTy>
297inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
298 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
299}
300
301template <class ContainerTy, class FuncTy>
302auto map_range(ContainerTy &&C, FuncTy F) {
303 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
304}
305
306/// Helper to determine if type T has a member called rbegin().
307template <typename Ty> class has_rbegin_impl {
308 using yes = char[1];
309 using no = char[2];
310
311 template <typename Inner>
312 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
313
314 template <typename>
315 static no& test(...);
316
317public:
318 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
319};
320
321/// Metafunction to determine if T& or T has a member called rbegin().
322template <typename Ty>
323struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
324};
325
326// Returns an iterator_range over the given container which iterates in reverse.
327// Note that the container must have rbegin()/rend() methods for this to work.
328template <typename ContainerTy>
329auto reverse(ContainerTy &&C,
330 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
331 return make_range(C.rbegin(), C.rend());
332}
333
334// Returns a std::reverse_iterator wrapped around the given iterator.
335template <typename IteratorTy>
336std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
337 return std::reverse_iterator<IteratorTy>(It);
338}
339
340// Returns an iterator_range over the given container which iterates in reverse.
341// Note that the container must have begin()/end() methods which return
342// bidirectional iterators for this to work.
343template <typename ContainerTy>
344auto reverse(ContainerTy &&C,
345 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
346 return make_range(llvm::make_reverse_iterator(std::end(C)),
347 llvm::make_reverse_iterator(std::begin(C)));
348}
349
350/// An iterator adaptor that filters the elements of given inner iterators.
351///
352/// The predicate parameter should be a callable object that accepts the wrapped
353/// iterator's reference type and returns a bool. When incrementing or
354/// decrementing the iterator, it will call the predicate on each element and
355/// skip any where it returns false.
356///
357/// \code
358/// int A[] = { 1, 2, 3, 4 };
359/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
360/// // R contains { 1, 3 }.
361/// \endcode
362///
363/// Note: filter_iterator_base implements support for forward iteration.
364/// filter_iterator_impl exists to provide support for bidirectional iteration,
365/// conditional on whether the wrapped iterator supports it.
366template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
367class filter_iterator_base
368 : public iterator_adaptor_base<
369 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
370 WrappedIteratorT,
371 typename std::common_type<
372 IterTag, typename std::iterator_traits<
373 WrappedIteratorT>::iterator_category>::type> {
374 using BaseT = iterator_adaptor_base<
375 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
376 WrappedIteratorT,
377 typename std::common_type<
378 IterTag, typename std::iterator_traits<
379 WrappedIteratorT>::iterator_category>::type>;
380
381protected:
382 WrappedIteratorT End;
383 PredicateT Pred;
384
385 void findNextValid() {
386 while (this->I != End && !Pred(*this->I))
387 BaseT::operator++();
388 }
389
390 // Construct the iterator. The begin iterator needs to know where the end
391 // is, so that it can properly stop when it gets there. The end iterator only
392 // needs the predicate to support bidirectional iteration.
393 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
394 PredicateT Pred)
395 : BaseT(Begin), End(End), Pred(Pred) {
396 findNextValid();
397 }
398
399public:
400 using BaseT::operator++;
401
402 filter_iterator_base &operator++() {
403 BaseT::operator++();
404 findNextValid();
405 return *this;
406 }
407};
408
409/// Specialization of filter_iterator_base for forward iteration only.
410template <typename WrappedIteratorT, typename PredicateT,
411 typename IterTag = std::forward_iterator_tag>
412class filter_iterator_impl
413 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
414 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
415
416public:
417 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
418 PredicateT Pred)
419 : BaseT(Begin, End, Pred) {}
420};
421
422/// Specialization of filter_iterator_base for bidirectional iteration.
423template <typename WrappedIteratorT, typename PredicateT>
424class filter_iterator_impl<WrappedIteratorT, PredicateT,
425 std::bidirectional_iterator_tag>
426 : public filter_iterator_base<WrappedIteratorT, PredicateT,
427 std::bidirectional_iterator_tag> {
428 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
429 std::bidirectional_iterator_tag>;
430 void findPrevValid() {
431 while (!this->Pred(*this->I))
432 BaseT::operator--();
433 }
434
435public:
436 using BaseT::operator--;
437
438 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
439 PredicateT Pred)
440 : BaseT(Begin, End, Pred) {}
441
442 filter_iterator_impl &operator--() {
443 BaseT::operator--();
444 findPrevValid();
445 return *this;
446 }
447};
448
449namespace detail {
450
451template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
452 using type = std::forward_iterator_tag;
453};
454
455template <> struct fwd_or_bidi_tag_impl<true> {
456 using type = std::bidirectional_iterator_tag;
457};
458
459/// Helper which sets its type member to forward_iterator_tag if the category
460/// of \p IterT does not derive from bidirectional_iterator_tag, and to
461/// bidirectional_iterator_tag otherwise.
462template <typename IterT> struct fwd_or_bidi_tag {
463 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
464 std::bidirectional_iterator_tag,
465 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
466};
467
468} // namespace detail
469
470/// Defines filter_iterator to a suitable specialization of
471/// filter_iterator_impl, based on the underlying iterator's category.
472template <typename WrappedIteratorT, typename PredicateT>
473using filter_iterator = filter_iterator_impl<
474 WrappedIteratorT, PredicateT,
475 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
476
477/// Convenience function that takes a range of elements and a predicate,
478/// and return a new filter_iterator range.
479///
480/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
481/// lifetime of that temporary is not kept by the returned range object, and the
482/// temporary is going to be dropped on the floor after the make_iterator_range
483/// full expression that contains this function call.
484template <typename RangeT, typename PredicateT>
485iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
486make_filter_range(RangeT &&Range, PredicateT Pred) {
487 using FilterIteratorT =
488 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
489 return make_range(
490 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
491 std::end(std::forward<RangeT>(Range)), Pred),
492 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
493 std::end(std::forward<RangeT>(Range)), Pred));
494}
495
496/// A pseudo-iterator adaptor that is designed to implement "early increment"
497/// style loops.
498///
499/// This is *not a normal iterator* and should almost never be used directly. It
500/// is intended primarily to be used with range based for loops and some range
501/// algorithms.
502///
503/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
504/// somewhere between them. The constraints of these iterators are:
505///
506/// - On construction or after being incremented, it is comparable and
507/// dereferencable. It is *not* incrementable.
508/// - After being dereferenced, it is neither comparable nor dereferencable, it
509/// is only incrementable.
510///
511/// This means you can only dereference the iterator once, and you can only
512/// increment it once between dereferences.
513template <typename WrappedIteratorT>
514class early_inc_iterator_impl
515 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
516 WrappedIteratorT, std::input_iterator_tag> {
517 using BaseT =
518 iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
519 WrappedIteratorT, std::input_iterator_tag>;
520
521 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
522
523protected:
524#if LLVM_ENABLE_ABI_BREAKING_CHECKS0
525 bool IsEarlyIncremented = false;
526#endif
527
528public:
529 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
530
531 using BaseT::operator*;
532 decltype(*std::declval<WrappedIteratorT>()) operator*() {
533#if LLVM_ENABLE_ABI_BREAKING_CHECKS0
534 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast<void> (0));
535 IsEarlyIncremented = true;
536#endif
537 return *(this->I)++;
538 }
539
540 using BaseT::operator++;
541 early_inc_iterator_impl &operator++() {
542#if LLVM_ENABLE_ABI_BREAKING_CHECKS0
543 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast<void> (0));
544 IsEarlyIncremented = false;
545#endif
546 return *this;
547 }
548
549 friend bool operator==(const early_inc_iterator_impl &LHS,
550 const early_inc_iterator_impl &RHS) {
551#if LLVM_ENABLE_ABI_BREAKING_CHECKS0
552 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast<void> (0));
553#endif
554 return (const BaseT &)LHS == (const BaseT &)RHS;
555 }
556};
557
558/// Make a range that does early increment to allow mutation of the underlying
559/// range without disrupting iteration.
560///
561/// The underlying iterator will be incremented immediately after it is
562/// dereferenced, allowing deletion of the current node or insertion of nodes to
563/// not disrupt iteration provided they do not invalidate the *next* iterator --
564/// the current iterator can be invalidated.
565///
566/// This requires a very exact pattern of use that is only really suitable to
567/// range based for loops and other range algorithms that explicitly guarantee
568/// to dereference exactly once each element, and to increment exactly once each
569/// element.
570template <typename RangeT>
571iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
572make_early_inc_range(RangeT &&Range) {
573 using EarlyIncIteratorT =
574 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
575 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
576 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
577}
578
579// forward declarations required by zip_shortest/zip_first/zip_longest
580template <typename R, typename UnaryPredicate>
581bool all_of(R &&range, UnaryPredicate P);
582template <typename R, typename UnaryPredicate>
583bool any_of(R &&range, UnaryPredicate P);
584
585namespace detail {
586
587using std::declval;
588
589// We have to alias this since inlining the actual type at the usage site
590// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
591template<typename... Iters> struct ZipTupleType {
592 using type = std::tuple<decltype(*declval<Iters>())...>;
593};
594
595template <typename ZipType, typename... Iters>
596using zip_traits = iterator_facade_base<
597 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
598 typename std::iterator_traits<
599 Iters>::iterator_category...>::type,
600 // ^ TODO: Implement random access methods.
601 typename ZipTupleType<Iters...>::type,
602 typename std::iterator_traits<typename std::tuple_element<
603 0, std::tuple<Iters...>>::type>::difference_type,
604 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
605 // inner iterators have the same difference_type. It would fail if, for
606 // instance, the second field's difference_type were non-numeric while the
607 // first is.
608 typename ZipTupleType<Iters...>::type *,
609 typename ZipTupleType<Iters...>::type>;
610
611template <typename ZipType, typename... Iters>
612struct zip_common : public zip_traits<ZipType, Iters...> {
613 using Base = zip_traits<ZipType, Iters...>;
614 using value_type = typename Base::value_type;
615
616 std::tuple<Iters...> iterators;
617
618protected:
619 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
620 return value_type(*std::get<Ns>(iterators)...);
621 }
622
623 template <size_t... Ns>
624 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
625 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
626 }
627
628 template <size_t... Ns>
629 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
630 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
631 }
632
633 template <size_t... Ns>
634 bool test_all_equals(const zip_common &other,
635 std::index_sequence<Ns...>) const {
636 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) ==
637 std::get<Ns>(other.iterators)...},
638 identity<bool>{});
639 }
640
641public:
642 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
643
644 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
645
646 const value_type operator*() const {
647 return deref(std::index_sequence_for<Iters...>{});
648 }
649
650 ZipType &operator++() {
651 iterators = tup_inc(std::index_sequence_for<Iters...>{});
652 return *reinterpret_cast<ZipType *>(this);
653 }
654
655 ZipType &operator--() {
656 static_assert(Base::IsBidirectional,
657 "All inner iterators must be at least bidirectional.");
658 iterators = tup_dec(std::index_sequence_for<Iters...>{});
659 return *reinterpret_cast<ZipType *>(this);
660 }
661
662 /// Return true if all the iterator are matching `other`'s iterators.
663 bool all_equals(zip_common &other) {
664 return test_all_equals(other, std::index_sequence_for<Iters...>{});
665 }
666};
667
668template <typename... Iters>
669struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
670 using Base = zip_common<zip_first<Iters...>, Iters...>;
671
672 bool operator==(const zip_first<Iters...> &other) const {
673 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
674 }
675
676 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
677};
678
679template <typename... Iters>
680class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
681 template <size_t... Ns>
682 bool test(const zip_shortest<Iters...> &other,
683 std::index_sequence<Ns...>) const {
684 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
685 std::get<Ns>(other.iterators)...},
686 identity<bool>{});
687 }
688
689public:
690 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
691
692 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
693
694 bool operator==(const zip_shortest<Iters...> &other) const {
695 return !test(other, std::index_sequence_for<Iters...>{});
696 }
697};
698
699template <template <typename...> class ItType, typename... Args> class zippy {
700public:
701 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
702 using iterator_category = typename iterator::iterator_category;
703 using value_type = typename iterator::value_type;
704 using difference_type = typename iterator::difference_type;
705 using pointer = typename iterator::pointer;
706 using reference = typename iterator::reference;
707
708private:
709 std::tuple<Args...> ts;
710
711 template <size_t... Ns>
712 iterator begin_impl(std::index_sequence<Ns...>) const {
713 return iterator(std::begin(std::get<Ns>(ts))...);
714 }
715 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
716 return iterator(std::end(std::get<Ns>(ts))...);
717 }
718
719public:
720 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
721
722 iterator begin() const {
723 return begin_impl(std::index_sequence_for<Args...>{});
724 }
725 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
726};
727
728} // end namespace detail
729
730/// zip iterator for two or more iteratable types.
731template <typename T, typename U, typename... Args>
732detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
733 Args &&... args) {
734 return detail::zippy<detail::zip_shortest, T, U, Args...>(
735 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
736}
737
738/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
739/// be the shortest.
740template <typename T, typename U, typename... Args>
741detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
742 Args &&... args) {
743 return detail::zippy<detail::zip_first, T, U, Args...>(
744 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
745}
746
747namespace detail {
748template <typename Iter>
749Iter next_or_end(const Iter &I, const Iter &End) {
750 if (I == End)
751 return End;
752 return std::next(I);
753}
754
755template <typename Iter>
756auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
757 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
758 if (I == End)
759 return None;
760 return *I;
761}
762
763template <typename Iter> struct ZipLongestItemType {
764 using type =
765 llvm::Optional<typename std::remove_const<typename std::remove_reference<
766 decltype(*std::declval<Iter>())>::type>::type>;
767};
768
769template <typename... Iters> struct ZipLongestTupleType {
770 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
771};
772
773template <typename... Iters>
774class zip_longest_iterator
775 : public iterator_facade_base<
776 zip_longest_iterator<Iters...>,
777 typename std::common_type<
778 std::forward_iterator_tag,
779 typename std::iterator_traits<Iters>::iterator_category...>::type,
780 typename ZipLongestTupleType<Iters...>::type,
781 typename std::iterator_traits<typename std::tuple_element<
782 0, std::tuple<Iters...>>::type>::difference_type,
783 typename ZipLongestTupleType<Iters...>::type *,
784 typename ZipLongestTupleType<Iters...>::type> {
785public:
786 using value_type = typename ZipLongestTupleType<Iters...>::type;
787
788private:
789 std::tuple<Iters...> iterators;
790 std::tuple<Iters...> end_iterators;
791
792 template <size_t... Ns>
793 bool test(const zip_longest_iterator<Iters...> &other,
794 std::index_sequence<Ns...>) const {
795 return llvm::any_of(
796 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
797 std::get<Ns>(other.iterators)...},
798 identity<bool>{});
799 }
800
801 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
802 return value_type(
803 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
804 }
805
806 template <size_t... Ns>
807 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
808 return std::tuple<Iters...>(
809 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
810 }
811
812public:
813 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
814 : iterators(std::forward<Iters>(ts.first)...),
815 end_iterators(std::forward<Iters>(ts.second)...) {}
816
817 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
818
819 value_type operator*() const {
820 return deref(std::index_sequence_for<Iters...>{});
821 }
822
823 zip_longest_iterator<Iters...> &operator++() {
824 iterators = tup_inc(std::index_sequence_for<Iters...>{});
825 return *this;
826 }
827
828 bool operator==(const zip_longest_iterator<Iters...> &other) const {
829 return !test(other, std::index_sequence_for<Iters...>{});
830 }
831};
832
833template <typename... Args> class zip_longest_range {
834public:
835 using iterator =
836 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
837 using iterator_category = typename iterator::iterator_category;
838 using value_type = typename iterator::value_type;
839 using difference_type = typename iterator::difference_type;
840 using pointer = typename iterator::pointer;
841 using reference = typename iterator::reference;
842
843private:
844 std::tuple<Args...> ts;
845
846 template <size_t... Ns>
847 iterator begin_impl(std::index_sequence<Ns...>) const {
848 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
849 adl_end(std::get<Ns>(ts)))...);
850 }
851
852 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
853 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
854 adl_end(std::get<Ns>(ts)))...);
855 }
856
857public:
858 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
859
860 iterator begin() const {
861 return begin_impl(std::index_sequence_for<Args...>{});
862 }
863 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
864};
865} // namespace detail
866
867/// Iterate over two or more iterators at the same time. Iteration continues
868/// until all iterators reach the end. The llvm::Optional only contains a value
869/// if the iterator has not reached the end.
870template <typename T, typename U, typename... Args>
871detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
872 Args &&... args) {
873 return detail::zip_longest_range<T, U, Args...>(
874 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
875}
876
877/// Iterator wrapper that concatenates sequences together.
878///
879/// This can concatenate different iterators, even with different types, into
880/// a single iterator provided the value types of all the concatenated
881/// iterators expose `reference` and `pointer` types that can be converted to
882/// `ValueT &` and `ValueT *` respectively. It doesn't support more
883/// interesting/customized pointer or reference types.
884///
885/// Currently this only supports forward or higher iterator categories as
886/// inputs and always exposes a forward iterator interface.
887template <typename ValueT, typename... IterTs>
888class concat_iterator
889 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
890 std::forward_iterator_tag, ValueT> {
891 using BaseT = typename concat_iterator::iterator_facade_base;
892
893 /// We store both the current and end iterators for each concatenated
894 /// sequence in a tuple of pairs.
895 ///
896 /// Note that something like iterator_range seems nice at first here, but the
897 /// range properties are of little benefit and end up getting in the way
898 /// because we need to do mutation on the current iterators.
899 std::tuple<IterTs...> Begins;
900 std::tuple<IterTs...> Ends;
901
902 /// Attempts to increment a specific iterator.
903 ///
904 /// Returns true if it was able to increment the iterator. Returns false if
905 /// the iterator is already at the end iterator.
906 template <size_t Index> bool incrementHelper() {
907 auto &Begin = std::get<Index>(Begins);
908 auto &End = std::get<Index>(Ends);
909 if (Begin == End)
910 return false;
911
912 ++Begin;
913 return true;
914 }
915
916 /// Increments the first non-end iterator.
917 ///
918 /// It is an error to call this with all iterators at the end.
919 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
920 // Build a sequence of functions to increment each iterator if possible.
921 bool (concat_iterator::*IncrementHelperFns[])() = {
922 &concat_iterator::incrementHelper<Ns>...};
923
924 // Loop over them, and stop as soon as we succeed at incrementing one.
925 for (auto &IncrementHelperFn : IncrementHelperFns)
926 if ((this->*IncrementHelperFn)())
927 return;
928
929 llvm_unreachable("Attempted to increment an end concat iterator!")__builtin_unreachable();
930 }
931
932 /// Returns null if the specified iterator is at the end. Otherwise,
933 /// dereferences the iterator and returns the address of the resulting
934 /// reference.
935 template <size_t Index> ValueT *getHelper() const {
936 auto &Begin = std::get<Index>(Begins);
937 auto &End = std::get<Index>(Ends);
938 if (Begin == End)
939 return nullptr;
940
941 return &*Begin;
942 }
943
944 /// Finds the first non-end iterator, dereferences, and returns the resulting
945 /// reference.
946 ///
947 /// It is an error to call this with all iterators at the end.
948 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
949 // Build a sequence of functions to get from iterator if possible.
950 ValueT *(concat_iterator::*GetHelperFns[])() const = {
951 &concat_iterator::getHelper<Ns>...};
952
953 // Loop over them, and return the first result we find.
954 for (auto &GetHelperFn : GetHelperFns)
955 if (ValueT *P = (this->*GetHelperFn)())
956 return *P;
957
958 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")__builtin_unreachable();
959 }
960
961public:
962 /// Constructs an iterator from a sequence of ranges.
963 ///
964 /// We need the full range to know how to switch between each of the
965 /// iterators.
966 template <typename... RangeTs>
967 explicit concat_iterator(RangeTs &&... Ranges)
968 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
969
970 using BaseT::operator++;
971
972 concat_iterator &operator++() {
973 increment(std::index_sequence_for<IterTs...>());
974 return *this;
975 }
976
977 ValueT &operator*() const {
978 return get(std::index_sequence_for<IterTs...>());
979 }
980
981 bool operator==(const concat_iterator &RHS) const {
982 return Begins == RHS.Begins && Ends == RHS.Ends;
983 }
984};
985
986namespace detail {
987
988/// Helper to store a sequence of ranges being concatenated and access them.
989///
990/// This is designed to facilitate providing actual storage when temporaries
991/// are passed into the constructor such that we can use it as part of range
992/// based for loops.
993template <typename ValueT, typename... RangeTs> class concat_range {
994public:
995 using iterator =
996 concat_iterator<ValueT,
997 decltype(std::begin(std::declval<RangeTs &>()))...>;
998
999private:
1000 std::tuple<RangeTs...> Ranges;
1001
1002 template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
1003 return iterator(std::get<Ns>(Ranges)...);
1004 }
1005 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
1006 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1007 std::end(std::get<Ns>(Ranges)))...);
1008 }
1009
1010public:
1011 concat_range(RangeTs &&... Ranges)
1012 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1013
1014 iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
1015 iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
1016};
1017
1018} // end namespace detail
1019
1020/// Concatenated range across two or more ranges.
1021///
1022/// The desired value type must be explicitly specified.
1023template <typename ValueT, typename... RangeTs>
1024detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1025 static_assert(sizeof...(RangeTs) > 1,
1026 "Need more than one range to concatenate!");
1027 return detail::concat_range<ValueT, RangeTs...>(
1028 std::forward<RangeTs>(Ranges)...);
1029}
1030
1031/// A utility class used to implement an iterator that contains some base object
1032/// and an index. The iterator moves the index but keeps the base constant.
1033template <typename DerivedT, typename BaseT, typename T,
1034 typename PointerT = T *, typename ReferenceT = T &>
1035class indexed_accessor_iterator
1036 : public llvm::iterator_facade_base<DerivedT,
1037 std::random_access_iterator_tag, T,
1038 std::ptrdiff_t, PointerT, ReferenceT> {
1039public:
1040 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1041 assert(base == rhs.base && "incompatible iterators")(static_cast<void> (0));
1042 return index - rhs.index;
1043 }
1044 bool operator==(const indexed_accessor_iterator &rhs) const {
1045 return base == rhs.base && index == rhs.index;
1046 }
1047 bool operator<(const indexed_accessor_iterator &rhs) const {
1048 assert(base == rhs.base && "incompatible iterators")(static_cast<void> (0));
1049 return index < rhs.index;
1050 }
1051
1052 DerivedT &operator+=(ptrdiff_t offset) {
1053 this->index += offset;
1054 return static_cast<DerivedT &>(*this);
1055 }
1056 DerivedT &operator-=(ptrdiff_t offset) {
1057 this->index -= offset;
1058 return static_cast<DerivedT &>(*this);
1059 }
1060
1061 /// Returns the current index of the iterator.
1062 ptrdiff_t getIndex() const { return index; }
1063
1064 /// Returns the current base of the iterator.
1065 const BaseT &getBase() const { return base; }
1066
1067protected:
1068 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1069 : base(base), index(index) {}
1070 BaseT base;
1071 ptrdiff_t index;
1072};
1073
1074namespace detail {
1075/// The class represents the base of a range of indexed_accessor_iterators. It
1076/// provides support for many different range functionalities, e.g.
1077/// drop_front/slice/etc.. Derived range classes must implement the following
1078/// static methods:
1079/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1080/// - Dereference an iterator pointing to the base object at the given
1081/// index.
1082/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1083/// - Return a new base that is offset from the provide base by 'index'
1084/// elements.
1085template <typename DerivedT, typename BaseT, typename T,
1086 typename PointerT = T *, typename ReferenceT = T &>
1087class indexed_accessor_range_base {
1088public:
1089 using RangeBaseT =
1090 indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
1091
1092 /// An iterator element of this range.
1093 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1094 PointerT, ReferenceT> {
1095 public:
1096 // Index into this iterator, invoking a static method on the derived type.
1097 ReferenceT operator*() const {
1098 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1099 }
1100
1101 private:
1102 iterator(BaseT owner, ptrdiff_t curIndex)
1103 : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
1104 owner, curIndex) {}
1105
1106 /// Allow access to the constructor.
1107 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1108 ReferenceT>;
1109 };
1110
1111 indexed_accessor_range_base(iterator begin, iterator end)
1112 : base(offset_base(begin.getBase(), begin.getIndex())),
1113 count(end.getIndex() - begin.getIndex()) {}
1114 indexed_accessor_range_base(const iterator_range<iterator> &range)
1115 : indexed_accessor_range_base(range.begin(), range.end()) {}
1116 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1117 : base(base), count(count) {}
1118
1119 iterator begin() const { return iterator(base, 0); }
1120 iterator end() const { return iterator(base, count); }
1121 ReferenceT operator[](size_t Index) const {
1122 assert(Index < size() && "invalid index for value range")(static_cast<void> (0));
1123 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1124 }
1125 ReferenceT front() const {
1126 assert(!empty() && "expected non-empty range")(static_cast<void> (0));
1127 return (*this)[0];
1128 }
1129 ReferenceT back() const {
1130 assert(!empty() && "expected non-empty range")(static_cast<void> (0));
1131 return (*this)[size() - 1];
1132 }
1133
1134 /// Compare this range with another.
1135 template <typename OtherT> bool operator==(const OtherT &other) const {
1136 return size() ==
1137 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1138 std::equal(begin(), end(), other.begin());
1139 }
1140 template <typename OtherT> bool operator!=(const OtherT &other) const {
1141 return !(*this == other);
1142 }
1143
1144 /// Return the size of this range.
1145 size_t size() const { return count; }
1146
1147 /// Return if the range is empty.
1148 bool empty() const { return size() == 0; }
1149
1150 /// Drop the first N elements, and keep M elements.
1151 DerivedT slice(size_t n, size_t m) const {
1152 assert(n + m <= size() && "invalid size specifiers")(static_cast<void> (0));
1153 return DerivedT(offset_base(base, n), m);
1154 }
1155
1156 /// Drop the first n elements.
1157 DerivedT drop_front(size_t n = 1) const {
1158 assert(size() >= n && "Dropping more elements than exist")(static_cast<void> (0));
1159 return slice(n, size() - n);
1160 }
1161 /// Drop the last n elements.
1162 DerivedT drop_back(size_t n = 1) const {
1163 assert(size() >= n && "Dropping more elements than exist")(static_cast<void> (0));
1164 return DerivedT(base, size() - n);
1165 }
1166
1167 /// Take the first n elements.
1168 DerivedT take_front(size_t n = 1) const {
1169 return n < size() ? drop_back(size() - n)
1170 : static_cast<const DerivedT &>(*this);
1171 }
1172
1173 /// Take the last n elements.
1174 DerivedT take_back(size_t n = 1) const {
1175 return n < size() ? drop_front(size() - n)
1176 : static_cast<const DerivedT &>(*this);
1177 }
1178
1179 /// Allow conversion to any type accepting an iterator_range.
1180 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1181 RangeT, iterator_range<iterator>>::value>>
1182 operator RangeT() const {
1183 return RangeT(iterator_range<iterator>(*this));
1184 }
1185
1186 /// Returns the base of this range.
1187 const BaseT &getBase() const { return base; }
1188
1189private:
1190 /// Offset the given base by the given amount.
1191 static BaseT offset_base(const BaseT &base, size_t n) {
1192 return n == 0 ? base : DerivedT::offset_base(base, n);
1193 }
1194
1195protected:
1196 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1197 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1198 indexed_accessor_range_base &
1199 operator=(const indexed_accessor_range_base &) = default;
1200
1201 /// The base that owns the provided range of values.
1202 BaseT base;
1203 /// The size from the owning range.
1204 ptrdiff_t count;
1205};
1206} // end namespace detail
1207
1208/// This class provides an implementation of a range of
1209/// indexed_accessor_iterators where the base is not indexable. Ranges with
1210/// bases that are offsetable should derive from indexed_accessor_range_base
1211/// instead. Derived range classes are expected to implement the following
1212/// static method:
1213/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1214/// - Dereference an iterator pointing to a parent base at the given index.
1215template <typename DerivedT, typename BaseT, typename T,
1216 typename PointerT = T *, typename ReferenceT = T &>
1217class indexed_accessor_range
1218 : public detail::indexed_accessor_range_base<
1219 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1220public:
1221 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1222 : detail::indexed_accessor_range_base<
1223 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1224 std::make_pair(base, startIndex), count) {}
1225 using detail::indexed_accessor_range_base<
1226 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1227 ReferenceT>::indexed_accessor_range_base;
1228
1229 /// Returns the current base of the range.
1230 const BaseT &getBase() const { return this->base.first; }
1231
1232 /// Returns the current start index of the range.
1233 ptrdiff_t getStartIndex() const { return this->base.second; }
1234
1235 /// See `detail::indexed_accessor_range_base` for details.
1236 static std::pair<BaseT, ptrdiff_t>
1237 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1238 // We encode the internal base as a pair of the derived base and a start
1239 // index into the derived base.
1240 return std::make_pair(base.first, base.second + index);
1241 }
1242 /// See `detail::indexed_accessor_range_base` for details.
1243 static ReferenceT
1244 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1245 ptrdiff_t index) {
1246 return DerivedT::dereference(base.first, base.second + index);
1247 }
1248};
1249
1250/// Given a container of pairs, return a range over the first elements.
1251template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1252 return llvm::map_range(
1253 std::forward<ContainerTy>(c),
1254 [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
1255 return elt.first;
1256 });
1257}
1258
1259/// Given a container of pairs, return a range over the second elements.
1260template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1261 return llvm::map_range(
1262 std::forward<ContainerTy>(c),
1263 [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
1264 return elt.second;
1265 });
1266}
1267
1268//===----------------------------------------------------------------------===//
1269// Extra additions to <utility>
1270//===----------------------------------------------------------------------===//
1271
1272/// Function object to check whether the first component of a std::pair
1273/// compares less than the first component of another std::pair.
1274struct less_first {
1275 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1276 return std::less<>()(lhs.first, rhs.first);
1277 }
1278};
1279
1280/// Function object to check whether the second component of a std::pair
1281/// compares less than the second component of another std::pair.
1282struct less_second {
1283 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1284 return std::less<>()(lhs.second, rhs.second);
1285 }
1286};
1287
1288/// \brief Function object to apply a binary function to the first component of
1289/// a std::pair.
1290template<typename FuncTy>
1291struct on_first {
1292 FuncTy func;
1293
1294 template <typename T>
1295 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1296 return func(lhs.first, rhs.first);
1297 }
1298};
1299
1300/// Utility type to build an inheritance chain that makes it easy to rank
1301/// overload candidates.
1302template <int N> struct rank : rank<N - 1> {};
1303template <> struct rank<0> {};
1304
1305/// traits class for checking whether type T is one of any of the given
1306/// types in the variadic list.
1307template <typename T, typename... Ts>
1308using is_one_of = disjunction<std::is_same<T, Ts>...>;
1309
1310/// traits class for checking whether type T is a base class for all
1311/// the given types in the variadic list.
1312template <typename T, typename... Ts>
1313using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
1314
1315namespace detail {
1316template <typename... Ts> struct Visitor;
1317
1318template <typename HeadT, typename... TailTs>
1319struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
1320 explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
1321 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
1322 Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
1323 using remove_cvref_t<HeadT>::operator();
1324 using Visitor<TailTs...>::operator();
1325};
1326
1327template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
1328 explicit constexpr Visitor(HeadT &&Head)
1329 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
1330 using remove_cvref_t<HeadT>::operator();
1331};
1332} // namespace detail
1333
1334/// Returns an opaquely-typed Callable object whose operator() overload set is
1335/// the sum of the operator() overload sets of each CallableT in CallableTs.
1336///
1337/// The type of the returned object derives from each CallableT in CallableTs.
1338/// The returned object is constructed by invoking the appropriate copy or move
1339/// constructor of each CallableT, as selected by overload resolution on the
1340/// corresponding argument to makeVisitor.
1341///
1342/// Example:
1343///
1344/// \code
1345/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
1346/// [](int i) { return "int"; },
1347/// [](std::string s) { return "str"; });
1348/// auto a = visitor(42); // `a` is now "int".
1349/// auto b = visitor("foo"); // `b` is now "str".
1350/// auto c = visitor(3.14f); // `c` is now "unhandled type".
1351/// \endcode
1352///
1353/// Example of making a visitor with a lambda which captures a move-only type:
1354///
1355/// \code
1356/// std::unique_ptr<FooHandler> FH = /* ... */;
1357/// auto visitor = makeVisitor(
1358/// [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
1359/// [](int i) { return i; },
1360/// [](std::string s) { return atoi(s); });
1361/// \endcode
1362template <typename... CallableTs>
1363constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
1364 return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
1365}
1366
1367//===----------------------------------------------------------------------===//
1368// Extra additions for arrays
1369//===----------------------------------------------------------------------===//
1370
1371// We have a copy here so that LLVM behaves the same when using different
1372// standard libraries.
1373template <class Iterator, class RNG>
1374void shuffle(Iterator first, Iterator last, RNG &&g) {
1375 // It would be better to use a std::uniform_int_distribution,
1376 // but that would be stdlib dependent.
1377 typedef
1378 typename std::iterator_traits<Iterator>::difference_type difference_type;
1379 for (auto size = last - first; size > 1; ++first, (void)--size) {
1380 difference_type offset = g() % size;
1381 // Avoid self-assignment due to incorrect assertions in libstdc++
1382 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1383 if (offset != difference_type(0))
1384 std::iter_swap(first, first + offset);
1385 }
1386}
1387
1388/// Find the length of an array.
1389template <class T, std::size_t N>
1390constexpr inline size_t array_lengthof(T (&)[N]) {
1391 return N;
1392}
1393
1394/// Adapt std::less<T> for array_pod_sort.
1395template<typename T>
1396inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1397 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1398 *reinterpret_cast<const T*>(P2)))
1399 return -1;
1400 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1401 *reinterpret_cast<const T*>(P1)))
1402 return 1;
1403 return 0;
1404}
1405
1406/// get_array_pod_sort_comparator - This is an internal helper function used to
1407/// get type deduction of T right.
1408template<typename T>
1409inline int (*get_array_pod_sort_comparator(const T &))
1410 (const void*, const void*) {
1411 return array_pod_sort_comparator<T>;
1412}
1413
1414#ifdef EXPENSIVE_CHECKS
1415namespace detail {
1416
1417inline unsigned presortShuffleEntropy() {
1418 static unsigned Result(std::random_device{}());
1419 return Result;
1420}
1421
1422template <class IteratorTy>
1423inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1424 std::mt19937 Generator(presortShuffleEntropy());
1425 llvm::shuffle(Start, End, Generator);
1426}
1427
1428} // end namespace detail
1429#endif
1430
1431/// array_pod_sort - This sorts an array with the specified start and end
1432/// extent. This is just like std::sort, except that it calls qsort instead of
1433/// using an inlined template. qsort is slightly slower than std::sort, but
1434/// most sorts are not performance critical in LLVM and std::sort has to be
1435/// template instantiated for each type, leading to significant measured code
1436/// bloat. This function should generally be used instead of std::sort where
1437/// possible.
1438///
1439/// This function assumes that you have simple POD-like types that can be
1440/// compared with std::less and can be moved with memcpy. If this isn't true,
1441/// you should use std::sort.
1442///
1443/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1444/// default to std::less.
1445template<class IteratorTy>
1446inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1447 // Don't inefficiently call qsort with one element or trigger undefined
1448 // behavior with an empty sequence.
1449 auto NElts = End - Start;
1450 if (NElts <= 1) return;
1451#ifdef EXPENSIVE_CHECKS
1452 detail::presortShuffle<IteratorTy>(Start, End);
1453#endif
1454 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1455}
1456
1457template <class IteratorTy>
1458inline void array_pod_sort(
1459 IteratorTy Start, IteratorTy End,
1460 int (*Compare)(
1461 const typename std::iterator_traits<IteratorTy>::value_type *,
1462 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1463 // Don't inefficiently call qsort with one element or trigger undefined
1464 // behavior with an empty sequence.
1465 auto NElts = End - Start;
1466 if (NElts <= 1) return;
1467#ifdef EXPENSIVE_CHECKS
1468 detail::presortShuffle<IteratorTy>(Start, End);
1469#endif
1470 qsort(&*Start, NElts, sizeof(*Start),
1471 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1472}
1473
1474namespace detail {
1475template <typename T>
1476// We can use qsort if the iterator type is a pointer and the underlying value
1477// is trivially copyable.
1478using sort_trivially_copyable = conjunction<
1479 std::is_pointer<T>,
1480 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1481} // namespace detail
1482
1483// Provide wrappers to std::sort which shuffle the elements before sorting
1484// to help uncover non-deterministic behavior (PR35135).
1485template <typename IteratorTy,
1486 std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
1487 int> = 0>
1488inline void sort(IteratorTy Start, IteratorTy End) {
1489#ifdef EXPENSIVE_CHECKS
1490 detail::presortShuffle<IteratorTy>(Start, End);
1491#endif
1492 std::sort(Start, End);
1493}
1494
1495// Forward trivially copyable types to array_pod_sort. This avoids a large
1496// amount of code bloat for a minor performance hit.
1497template <typename IteratorTy,
1498 std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
1499 int> = 0>
1500inline void sort(IteratorTy Start, IteratorTy End) {
1501 array_pod_sort(Start, End);
1502}
1503
1504template <typename Container> inline void sort(Container &&C) {
1505 llvm::sort(adl_begin(C), adl_end(C));
1506}
1507
1508template <typename IteratorTy, typename Compare>
1509inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1510#ifdef EXPENSIVE_CHECKS
1511 detail::presortShuffle<IteratorTy>(Start, End);
1512#endif
1513 std::sort(Start, End, Comp);
1514}
1515
1516template <typename Container, typename Compare>
1517inline void sort(Container &&C, Compare Comp) {
1518 llvm::sort(adl_begin(C), adl_end(C), Comp);
1519}
1520
1521//===----------------------------------------------------------------------===//
1522// Extra additions to <algorithm>
1523//===----------------------------------------------------------------------===//
1524
1525/// Get the size of a range. This is a wrapper function around std::distance
1526/// which is only enabled when the operation is O(1).
1527template <typename R>
1528auto size(R &&Range,
1529 std::enable_if_t<
1530 std::is_base_of<std::random_access_iterator_tag,
1531 typename std::iterator_traits<decltype(
1532 Range.begin())>::iterator_category>::value,
1533 void> * = nullptr) {
1534 return std::distance(Range.begin(), Range.end());
1535}
1536
1537/// Provide wrappers to std::for_each which take ranges instead of having to
1538/// pass begin/end explicitly.
1539template <typename R, typename UnaryFunction>
1540UnaryFunction for_each(R &&Range, UnaryFunction F) {
1541 return std::for_each(adl_begin(Range), adl_end(Range), F);
1542}
1543
1544/// Provide wrappers to std::all_of which take ranges instead of having to pass
1545/// begin/end explicitly.
1546template <typename R, typename UnaryPredicate>
1547bool all_of(R &&Range, UnaryPredicate P) {
1548 return std::all_of(adl_begin(Range), adl_end(Range), P);
1549}
1550
1551/// Provide wrappers to std::any_of which take ranges instead of having to pass
1552/// begin/end explicitly.
1553template <typename R, typename UnaryPredicate>
1554bool any_of(R &&Range, UnaryPredicate P) {
1555 return std::any_of(adl_begin(Range), adl_end(Range), P);
8
Calling 'any_of<const unsigned long *, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
14
Returning from 'any_of<const unsigned long *, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
15
Returning the value 1, which participates in a condition later
1556}
1557
1558/// Provide wrappers to std::none_of which take ranges instead of having to pass
1559/// begin/end explicitly.
1560template <typename R, typename UnaryPredicate>
1561bool none_of(R &&Range, UnaryPredicate P) {
1562 return std::none_of(adl_begin(Range), adl_end(Range), P);
1563}
1564
1565/// Provide wrappers to std::find which take ranges instead of having to pass
1566/// begin/end explicitly.
1567template <typename R, typename T> auto find(R &&Range, const T &Val) {
1568 return std::find(adl_begin(Range), adl_end(Range), Val);
1569}
1570
1571/// Provide wrappers to std::find_if which take ranges instead of having to pass
1572/// begin/end explicitly.
1573template <typename R, typename UnaryPredicate>
1574auto find_if(R &&Range, UnaryPredicate P) {
1575 return std::find_if(adl_begin(Range), adl_end(Range), P);
1576}
1577
1578template <typename R, typename UnaryPredicate>
1579auto find_if_not(R &&Range, UnaryPredicate P) {
1580 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1581}
1582
1583/// Provide wrappers to std::remove_if which take ranges instead of having to
1584/// pass begin/end explicitly.
1585template <typename R, typename UnaryPredicate>
1586auto remove_if(R &&Range, UnaryPredicate P) {
1587 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1588}
1589
1590/// Provide wrappers to std::copy_if which take ranges instead of having to
1591/// pass begin/end explicitly.
1592template <typename R, typename OutputIt, typename UnaryPredicate>
1593OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1594 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1595}
1596
1597template <typename R, typename OutputIt>
1598OutputIt copy(R &&Range, OutputIt Out) {
1599 return std::copy(adl_begin(Range), adl_end(Range), Out);
1600}
1601
1602/// Provide wrappers to std::move which take ranges instead of having to
1603/// pass begin/end explicitly.
1604template <typename R, typename OutputIt>
1605OutputIt move(R &&Range, OutputIt Out) {
1606 return std::move(adl_begin(Range), adl_end(Range), Out);
1607}
1608
1609/// Wrapper function around std::find to detect if an element exists
1610/// in a container.
1611template <typename R, typename E>
1612bool is_contained(R &&Range, const E &Element) {
1613 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
1614}
1615
1616/// Wrapper function around std::is_sorted to check if elements in a range \p R
1617/// are sorted with respect to a comparator \p C.
1618template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1619 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1620}
1621
1622/// Wrapper function around std::is_sorted to check if elements in a range \p R
1623/// are sorted in non-descending order.
1624template <typename R> bool is_sorted(R &&Range) {
1625 return std::is_sorted(adl_begin(Range), adl_end(Range));
1626}
1627
1628/// Wrapper function around std::count to count the number of times an element
1629/// \p Element occurs in the given range \p Range.
1630template <typename R, typename E> auto count(R &&Range, const E &Element) {
1631 return std::count(adl_begin(Range), adl_end(Range), Element);
1632}
1633
1634/// Wrapper function around std::count_if to count the number of times an
1635/// element satisfying a given predicate occurs in a range.
1636template <typename R, typename UnaryPredicate>
1637auto count_if(R &&Range, UnaryPredicate P) {
1638 return std::count_if(adl_begin(Range), adl_end(Range), P);
1639}
1640
1641/// Wrapper function around std::transform to apply a function to a range and
1642/// store the result elsewhere.
1643template <typename R, typename OutputIt, typename UnaryFunction>
1644OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1645 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1646}
1647
1648/// Provide wrappers to std::partition which take ranges instead of having to
1649/// pass begin/end explicitly.
1650template <typename R, typename UnaryPredicate>
1651auto partition(R &&Range, UnaryPredicate P) {
1652 return std::partition(adl_begin(Range), adl_end(Range), P);
1653}
1654
1655/// Provide wrappers to std::lower_bound which take ranges instead of having to
1656/// pass begin/end explicitly.
1657template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1658 return std::lower_bound(adl_begin(Range), adl_end(Range),
1659 std::forward<T>(Value));
1660}
1661
1662template <typename R, typename T, typename Compare>
1663auto lower_bound(R &&Range, T &&Value, Compare C) {
1664 return std::lower_bound(adl_begin(Range), adl_end(Range),
1665 std::forward<T>(Value), C);
1666}
1667
1668/// Provide wrappers to std::upper_bound which take ranges instead of having to
1669/// pass begin/end explicitly.
1670template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1671 return std::upper_bound(adl_begin(Range), adl_end(Range),
1672 std::forward<T>(Value));
1673}
1674
1675template <typename R, typename T, typename Compare>
1676auto upper_bound(R &&Range, T &&Value, Compare C) {
1677 return std::upper_bound(adl_begin(Range), adl_end(Range),
1678 std::forward<T>(Value), C);
1679}
1680
1681template <typename R>
1682void stable_sort(R &&Range) {
1683 std::stable_sort(adl_begin(Range), adl_end(Range));
1684}
1685
1686template <typename R, typename Compare>
1687void stable_sort(R &&Range, Compare C) {
1688 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1689}
1690
1691/// Binary search for the first iterator in a range where a predicate is false.
1692/// Requires that C is always true below some limit, and always false above it.
1693template <typename R, typename Predicate,
1694 typename Val = decltype(*adl_begin(std::declval<R>()))>
1695auto partition_point(R &&Range, Predicate P) {
1696 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1697}
1698
1699template<typename Range, typename Predicate>
1700auto unique(Range &&R, Predicate P) {
1701 return std::unique(adl_begin(R), adl_end(R), P);
1702}
1703
1704/// Wrapper function around std::equal to detect if pair-wise elements between
1705/// two ranges are the same.
1706template <typename L, typename R> bool equal(L &&LRange, R &&RRange) {
1707 return std::equal(adl_begin(LRange), adl_end(LRange), adl_begin(RRange),
1708 adl_end(RRange));
1709}
1710
1711/// Wrapper function around std::equal to detect if all elements
1712/// in a container are same.
1713template <typename R>
1714bool is_splat(R &&Range) {
1715 size_t range_size = size(Range);
1716 return range_size != 0 && (range_size == 1 ||
1717 std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
1718}
1719
1720/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1721/// `erase_if` which is equivalent to:
1722///
1723/// C.erase(remove_if(C, pred), C.end());
1724///
1725/// This version works for any container with an erase method call accepting
1726/// two iterators.
1727template <typename Container, typename UnaryPredicate>
1728void erase_if(Container &C, UnaryPredicate P) {
1729 C.erase(remove_if(C, P), C.end());
1730}
1731
1732/// Wrapper function to remove a value from a container:
1733///
1734/// C.erase(remove(C.begin(), C.end(), V), C.end());
1735template <typename Container, typename ValueType>
1736void erase_value(Container &C, ValueType V) {
1737 C.erase(std::remove(C.begin(), C.end(), V), C.end());
1738}
1739
1740/// Wrapper function to append a range to a container.
1741///
1742/// C.insert(C.end(), R.begin(), R.end());
1743template <typename Container, typename Range>
1744inline void append_range(Container &C, Range &&R) {
1745 C.insert(C.end(), R.begin(), R.end());
1746}
1747
1748/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1749/// the range [ValIt, ValEnd) (which is not from the same container).
1750template<typename Container, typename RandomAccessIterator>
1751void replace(Container &Cont, typename Container::iterator ContIt,
1752 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
1753 RandomAccessIterator ValEnd) {
1754 while (true) {
1755 if (ValIt == ValEnd) {
1756 Cont.erase(ContIt, ContEnd);
1757 return;
1758 } else if (ContIt == ContEnd) {
1759 Cont.insert(ContIt, ValIt, ValEnd);
1760 return;
1761 }
1762 *ContIt++ = *ValIt++;
1763 }
1764}
1765
1766/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1767/// the range R.
1768template<typename Container, typename Range = std::initializer_list<
1769 typename Container::value_type>>
1770void replace(Container &Cont, typename Container::iterator ContIt,
1771 typename Container::iterator ContEnd, Range R) {
1772 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
1773}
1774
1775/// An STL-style algorithm similar to std::for_each that applies a second
1776/// functor between every pair of elements.
1777///
1778/// This provides the control flow logic to, for example, print a
1779/// comma-separated list:
1780/// \code
1781/// interleave(names.begin(), names.end(),
1782/// [&](StringRef name) { os << name; },
1783/// [&] { os << ", "; });
1784/// \endcode
1785template <typename ForwardIterator, typename UnaryFunctor,
1786 typename NullaryFunctor,
1787 typename = typename std::enable_if<
1788 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1789 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1790inline void interleave(ForwardIterator begin, ForwardIterator end,
1791 UnaryFunctor each_fn, NullaryFunctor between_fn) {
1792 if (begin == end)
1793 return;
1794 each_fn(*begin);
1795 ++begin;
1796 for (; begin != end; ++begin) {
1797 between_fn();
1798 each_fn(*begin);
1799 }
1800}
1801
1802template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
1803 typename = typename std::enable_if<
1804 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1805 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1806inline void interleave(const Container &c, UnaryFunctor each_fn,
1807 NullaryFunctor between_fn) {
1808 interleave(c.begin(), c.end(), each_fn, between_fn);
1809}
1810
1811/// Overload of interleave for the common case of string separator.
1812template <typename Container, typename UnaryFunctor, typename StreamT,
1813 typename T = detail::ValueOfRange<Container>>
1814inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
1815 const StringRef &separator) {
1816 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
1817}
1818template <typename Container, typename StreamT,
1819 typename T = detail::ValueOfRange<Container>>
1820inline void interleave(const Container &c, StreamT &os,
1821 const StringRef &separator) {
1822 interleave(
1823 c, os, [&](const T &a) { os << a; }, separator);
1824}
1825
1826template <typename Container, typename UnaryFunctor, typename StreamT,
1827 typename T = detail::ValueOfRange<Container>>
1828inline void interleaveComma(const Container &c, StreamT &os,
1829 UnaryFunctor each_fn) {
1830 interleave(c, os, each_fn, ", ");
1831}
1832template <typename Container, typename StreamT,
1833 typename T = detail::ValueOfRange<Container>>
1834inline void interleaveComma(const Container &c, StreamT &os) {
1835 interleaveComma(c, os, [&](const T &a) { os << a; });
1836}
1837
1838//===----------------------------------------------------------------------===//
1839// Extra additions to <memory>
1840//===----------------------------------------------------------------------===//
1841
1842struct FreeDeleter {
1843 void operator()(void* v) {
1844 ::free(v);
1845 }
1846};
1847
1848template<typename First, typename Second>
1849struct pair_hash {
1850 size_t operator()(const std::pair<First, Second> &P) const {
1851 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
1852 }
1853};
1854
1855/// Binary functor that adapts to any other binary functor after dereferencing
1856/// operands.
1857template <typename T> struct deref {
1858 T func;
1859
1860 // Could be further improved to cope with non-derivable functors and
1861 // non-binary functors (should be a variadic template member function
1862 // operator()).
1863 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
1864 assert(lhs)(static_cast<void> (0));
1865 assert(rhs)(static_cast<void> (0));
1866 return func(*lhs, *rhs);
1867 }
1868};
1869
1870namespace detail {
1871
1872template <typename R> class enumerator_iter;
1873
1874template <typename R> struct result_pair {
1875 using value_reference =
1876 typename std::iterator_traits<IterOfRange<R>>::reference;
1877
1878 friend class enumerator_iter<R>;
1879
1880 result_pair() = default;
1881 result_pair(std::size_t Index, IterOfRange<R> Iter)
1882 : Index(Index), Iter(Iter) {}
1883
1884 result_pair(const result_pair<R> &Other)
1885 : Index(Other.Index), Iter(Other.Iter) {}
1886 result_pair &operator=(const result_pair &Other) {
1887 Index = Other.Index;
1888 Iter = Other.Iter;
1889 return *this;
1890 }
1891
1892 std::size_t index() const { return Index; }
1893 const value_reference value() const { return *Iter; }
1894 value_reference value() { return *Iter; }
1895
1896private:
1897 std::size_t Index = std::numeric_limits<std::size_t>::max();
1898 IterOfRange<R> Iter;
1899};
1900
1901template <typename R>
1902class enumerator_iter
1903 : public iterator_facade_base<
1904 enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
1905 typename std::iterator_traits<IterOfRange<R>>::difference_type,
1906 typename std::iterator_traits<IterOfRange<R>>::pointer,
1907 typename std::iterator_traits<IterOfRange<R>>::reference> {
1908 using result_type = result_pair<R>;
1909
1910public:
1911 explicit enumerator_iter(IterOfRange<R> EndIter)
1912 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
1913
1914 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
1915 : Result(Index, Iter) {}
1916
1917 result_type &operator*() { return Result; }
1918 const result_type &operator*() const { return Result; }
1919
1920 enumerator_iter &operator++() {
1921 assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast<void> (0));
1922 ++Result.Iter;
1923 ++Result.Index;
1924 return *this;
1925 }
1926
1927 bool operator==(const enumerator_iter &RHS) const {
1928 // Don't compare indices here, only iterators. It's possible for an end
1929 // iterator to have different indices depending on whether it was created
1930 // by calling std::end() versus incrementing a valid iterator.
1931 return Result.Iter == RHS.Result.Iter;
1932 }
1933
1934 enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
1935 enumerator_iter &operator=(const enumerator_iter &Other) {
1936 Result = Other.Result;
1937 return *this;
1938 }
1939
1940private:
1941 result_type Result;
1942};
1943
1944template <typename R> class enumerator {
1945public:
1946 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
1947
1948 enumerator_iter<R> begin() {
1949 return enumerator_iter<R>(0, std::begin(TheRange));
1950 }
1951
1952 enumerator_iter<R> end() {
1953 return enumerator_iter<R>(std::end(TheRange));
1954 }
1955
1956private:
1957 R TheRange;
1958};
1959
1960} // end namespace detail
1961
1962/// Given an input range, returns a new range whose values are are pair (A,B)
1963/// such that A is the 0-based index of the item in the sequence, and B is
1964/// the value from the original sequence. Example:
1965///
1966/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
1967/// for (auto X : enumerate(Items)) {
1968/// printf("Item %d - %c\n", X.index(), X.value());
1969/// }
1970///
1971/// Output:
1972/// Item 0 - A
1973/// Item 1 - B
1974/// Item 2 - C
1975/// Item 3 - D
1976///
1977template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
1978 return detail::enumerator<R>(std::forward<R>(TheRange));
1979}
1980
1981namespace detail {
1982
1983template <typename F, typename Tuple, std::size_t... I>
1984decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
1985 return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
1986}
1987
1988} // end namespace detail
1989
1990/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
1991/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
1992/// return the result.
1993template <typename F, typename Tuple>
1994decltype(auto) apply_tuple(F &&f, Tuple &&t) {
1995 using Indices = std::make_index_sequence<
1996 std::tuple_size<typename std::decay<Tuple>::type>::value>;
1997
1998 return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
1999 Indices{});
2000}
2001
2002namespace detail {
2003
2004template <typename Predicate, typename... Args>
2005bool all_of_zip_predicate_first(Predicate &&P, Args &&...args) {
2006 auto z = zip(args...);
2007 auto it = z.begin();
2008 auto end = z.end();
2009 while (it != end) {
2010 if (!apply_tuple([&](auto &&...args) { return P(args...); }, *it))
2011 return false;
2012 ++it;
2013 }
2014 return it.all_equals(end);
2015}
2016
2017// Just an adaptor to switch the order of argument and have the predicate before
2018// the zipped inputs.
2019template <typename... ArgsThenPredicate, size_t... InputIndexes>
2020bool all_of_zip_predicate_last(
2021 std::tuple<ArgsThenPredicate...> argsThenPredicate,
2022 std::index_sequence<InputIndexes...>) {
2023 auto constexpr OutputIndex =
2024 std::tuple_size<decltype(argsThenPredicate)>::value - 1;
2025 return all_of_zip_predicate_first(std::get<OutputIndex>(argsThenPredicate),
2026 std::get<InputIndexes>(argsThenPredicate)...);
2027}
2028
2029} // end namespace detail
2030
2031/// Compare two zipped ranges using the provided predicate (as last argument).
2032/// Return true if all elements satisfy the predicate and false otherwise.
2033// Return false if the zipped iterator aren't all at end (size mismatch).
2034template <typename... ArgsAndPredicate>
2035bool all_of_zip(ArgsAndPredicate &&...argsAndPredicate) {
2036 return detail::all_of_zip_predicate_last(
2037 std::forward_as_tuple(argsAndPredicate...),
2038 std::make_index_sequence<sizeof...(argsAndPredicate) - 1>{});
2039}
2040
2041/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
2042/// time. Not meant for use with random-access iterators.
2043/// Can optionally take a predicate to filter lazily some items.
2044template <typename IterTy,
2045 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2046bool hasNItems(
2047 IterTy &&Begin, IterTy &&End, unsigned N,
2048 Pred &&ShouldBeCounted =
2049 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2050 std::enable_if_t<
2051 !std::is_base_of<std::random_access_iterator_tag,
2052 typename std::iterator_traits<std::remove_reference_t<
2053 decltype(Begin)>>::iterator_category>::value,
2054 void> * = nullptr) {
2055 for (; N; ++Begin) {
2056 if (Begin == End)
2057 return false; // Too few.
2058 N -= ShouldBeCounted(*Begin);
2059 }
2060 for (; Begin != End; ++Begin)
2061 if (ShouldBeCounted(*Begin))
2062 return false; // Too many.
2063 return true;
2064}
2065
2066/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
2067/// time. Not meant for use with random-access iterators.
2068/// Can optionally take a predicate to lazily filter some items.
2069template <typename IterTy,
2070 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2071bool hasNItemsOrMore(
2072 IterTy &&Begin, IterTy &&End, unsigned N,
2073 Pred &&ShouldBeCounted =
2074 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2075 std::enable_if_t<
2076 !std::is_base_of<std::random_access_iterator_tag,
2077 typename std::iterator_traits<std::remove_reference_t<
2078 decltype(Begin)>>::iterator_category>::value,
2079 void> * = nullptr) {
2080 for (; N; ++Begin) {
2081 if (Begin == End)
2082 return false; // Too few.
2083 N -= ShouldBeCounted(*Begin);
2084 }
2085 return true;
2086}
2087
2088/// Returns true if the sequence [Begin, End) has N or less items. Can
2089/// optionally take a predicate to lazily filter some items.
2090template <typename IterTy,
2091 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2092bool hasNItemsOrLess(
2093 IterTy &&Begin, IterTy &&End, unsigned N,
2094 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
2095 return true;
2096 }) {
2097 assert(N != std::numeric_limits<unsigned>::max())(static_cast<void> (0));
2098 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
2099}
2100
2101/// Returns true if the given container has exactly N items
2102template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
2103 return hasNItems(std::begin(C), std::end(C), N);
2104}
2105
2106/// Returns true if the given container has N or more items
2107template <typename ContainerTy>
2108bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
2109 return hasNItemsOrMore(std::begin(C), std::end(C), N);
2110}
2111
2112/// Returns true if the given container has N or less items
2113template <typename ContainerTy>
2114bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2115 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2116}
2117
2118/// Returns a raw pointer that represents the same address as the argument.
2119///
2120/// This implementation can be removed once we move to C++20 where it's defined
2121/// as std::to_address().
2122///
2123/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2124/// not been implemented.
2125template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2126template <class T> constexpr T *to_address(T *P) { return P; }
2127
2128} // end namespace llvm
2129
2130#endif // LLVM_ADT_STLEXTRAS_H

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/stl_algo.h

1// Algorithm implementation -*- C++ -*-
2
3// Copyright (C) 2001-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1994
28 * Hewlett-Packard Company
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Hewlett-Packard Company makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1996
40 * Silicon Graphics Computer Systems, Inc.
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Silicon Graphics makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 */
50
51/** @file bits/stl_algo.h
52 * This is an internal header file, included by other library headers.
53 * Do not attempt to use it directly. @headername{algorithm}
54 */
55
56#ifndef _STL_ALGO_H1
57#define _STL_ALGO_H1 1
58
59#include <cstdlib> // for rand
60#include <bits/algorithmfwd.h>
61#include <bits/stl_heap.h>
62#include <bits/stl_tempbuf.h> // for _Temporary_buffer
63#include <bits/predefined_ops.h>
64
65#if __cplusplus201402L >= 201103L
66#include <bits/uniform_int_dist.h>
67#endif
68
69// See concept_check.h for the __glibcxx_*_requires macros.
70
71namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
72{
73_GLIBCXX_BEGIN_NAMESPACE_VERSION
74
75 /// Swaps the median value of *__a, *__b and *__c under __comp to *__result
76 template<typename _Iterator, typename _Compare>
77 _GLIBCXX20_CONSTEXPR
78 void
79 __move_median_to_first(_Iterator __result,_Iterator __a, _Iterator __b,
80 _Iterator __c, _Compare __comp)
81 {
82 if (__comp(__a, __b))
83 {
84 if (__comp(__b, __c))
85 std::iter_swap(__result, __b);
86 else if (__comp(__a, __c))
87 std::iter_swap(__result, __c);
88 else
89 std::iter_swap(__result, __a);
90 }
91 else if (__comp(__a, __c))
92 std::iter_swap(__result, __a);
93 else if (__comp(__b, __c))
94 std::iter_swap(__result, __c);
95 else
96 std::iter_swap(__result, __b);
97 }
98
99 /// Provided for stable_partition to use.
100 template<typename _InputIterator, typename _Predicate>
101 _GLIBCXX20_CONSTEXPR
102 inline _InputIterator
103 __find_if_not(_InputIterator __first, _InputIterator __last,
104 _Predicate __pred)
105 {
106 return std::__find_if(__first, __last,
107 __gnu_cxx::__ops::__negate(__pred),
108 std::__iterator_category(__first));
109 }
110
111 /// Like find_if_not(), but uses and updates a count of the
112 /// remaining range length instead of comparing against an end
113 /// iterator.
114 template<typename _InputIterator, typename _Predicate, typename _Distance>
115 _GLIBCXX20_CONSTEXPR
116 _InputIterator
117 __find_if_not_n(_InputIterator __first, _Distance& __len, _Predicate __pred)
118 {
119 for (; __len; --__len, (void) ++__first)
120 if (!__pred(__first))
121 break;
122 return __first;
123 }
124
125 // set_difference
126 // set_intersection
127 // set_symmetric_difference
128 // set_union
129 // for_each
130 // find
131 // find_if
132 // find_first_of
133 // adjacent_find
134 // count
135 // count_if
136 // search
137
138 template<typename _ForwardIterator1, typename _ForwardIterator2,
139 typename _BinaryPredicate>
140 _GLIBCXX20_CONSTEXPR
141 _ForwardIterator1
142 __search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
143 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
144 _BinaryPredicate __predicate)
145 {
146 // Test for empty ranges
147 if (__first1 == __last1 || __first2 == __last2)
148 return __first1;
149
150 // Test for a pattern of length 1.
151 _ForwardIterator2 __p1(__first2);
152 if (++__p1 == __last2)
153 return std::__find_if(__first1, __last1,
154 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
155
156 // General case.
157 _ForwardIterator1 __current = __first1;
158
159 for (;;)
160 {
161 __first1 =
162 std::__find_if(__first1, __last1,
163 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
164
165 if (__first1 == __last1)
166 return __last1;
167
168 _ForwardIterator2 __p = __p1;
169 __current = __first1;
170 if (++__current == __last1)
171 return __last1;
172
173 while (__predicate(__current, __p))
174 {
175 if (++__p == __last2)
176 return __first1;
177 if (++__current == __last1)
178 return __last1;
179 }
180 ++__first1;
181 }
182 return __first1;
183 }
184
185 // search_n
186
187 /**
188 * This is an helper function for search_n overloaded for forward iterators.
189 */
190 template<typename _ForwardIterator, typename _Integer,
191 typename _UnaryPredicate>
192 _GLIBCXX20_CONSTEXPR
193 _ForwardIterator
194 __search_n_aux(_ForwardIterator __first, _ForwardIterator __last,
195 _Integer __count, _UnaryPredicate __unary_pred,
196 std::forward_iterator_tag)
197 {
198 __first = std::__find_if(__first, __last, __unary_pred);
199 while (__first != __last)
200 {
201 typename iterator_traits<_ForwardIterator>::difference_type
202 __n = __count;
203 _ForwardIterator __i = __first;
204 ++__i;
205 while (__i != __last && __n != 1 && __unary_pred(__i))
206 {
207 ++__i;
208 --__n;
209 }
210 if (__n == 1)
211 return __first;
212 if (__i == __last)
213 return __last;
214 __first = std::__find_if(++__i, __last, __unary_pred);
215 }
216 return __last;
217 }
218
219 /**
220 * This is an helper function for search_n overloaded for random access
221 * iterators.
222 */
223 template<typename _RandomAccessIter, typename _Integer,
224 typename _UnaryPredicate>
225 _GLIBCXX20_CONSTEXPR
226 _RandomAccessIter
227 __search_n_aux(_RandomAccessIter __first, _RandomAccessIter __last,
228 _Integer __count, _UnaryPredicate __unary_pred,
229 std::random_access_iterator_tag)
230 {
231 typedef typename std::iterator_traits<_RandomAccessIter>::difference_type
232 _DistanceType;
233
234 _DistanceType __tailSize = __last - __first;
235 _DistanceType __remainder = __count;
236
237 while (__remainder <= __tailSize) // the main loop...
238 {
239 __first += __remainder;
240 __tailSize -= __remainder;
241 // __first here is always pointing to one past the last element of
242 // next possible match.
243 _RandomAccessIter __backTrack = __first;
244 while (__unary_pred(--__backTrack))
245 {
246 if (--__remainder == 0)
247 return (__first - __count); // Success
248 }
249 __remainder = __count + 1 - (__first - __backTrack);
250 }
251 return __last; // Failure
252 }
253
254 template<typename _ForwardIterator, typename _Integer,
255 typename _UnaryPredicate>
256 _GLIBCXX20_CONSTEXPR
257 _ForwardIterator
258 __search_n(_ForwardIterator __first, _ForwardIterator __last,
259 _Integer __count,
260 _UnaryPredicate __unary_pred)
261 {
262 if (__count <= 0)
263 return __first;
264
265 if (__count == 1)
266 return std::__find_if(__first, __last, __unary_pred);
267
268 return std::__search_n_aux(__first, __last, __count, __unary_pred,
269 std::__iterator_category(__first));
270 }
271
272 // find_end for forward iterators.
273 template<typename _ForwardIterator1, typename _ForwardIterator2,
274 typename _BinaryPredicate>
275 _GLIBCXX20_CONSTEXPR
276 _ForwardIterator1
277 __find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
278 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
279 forward_iterator_tag, forward_iterator_tag,
280 _BinaryPredicate __comp)
281 {
282 if (__first2 == __last2)
283 return __last1;
284
285 _ForwardIterator1 __result = __last1;
286 while (1)
287 {
288 _ForwardIterator1 __new_result
289 = std::__search(__first1, __last1, __first2, __last2, __comp);
290 if (__new_result == __last1)
291 return __result;
292 else
293 {
294 __result = __new_result;
295 __first1 = __new_result;
296 ++__first1;
297 }
298 }
299 }
300
301 // find_end for bidirectional iterators (much faster).
302 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
303 typename _BinaryPredicate>
304 _GLIBCXX20_CONSTEXPR
305 _BidirectionalIterator1
306 __find_end(_BidirectionalIterator1 __first1,
307 _BidirectionalIterator1 __last1,
308 _BidirectionalIterator2 __first2,
309 _BidirectionalIterator2 __last2,
310 bidirectional_iterator_tag, bidirectional_iterator_tag,
311 _BinaryPredicate __comp)
312 {
313 // concept requirements
314 __glibcxx_function_requires(_BidirectionalIteratorConcept<
315 _BidirectionalIterator1>)
316 __glibcxx_function_requires(_BidirectionalIteratorConcept<
317 _BidirectionalIterator2>)
318
319 typedef reverse_iterator<_BidirectionalIterator1> _RevIterator1;
320 typedef reverse_iterator<_BidirectionalIterator2> _RevIterator2;
321
322 _RevIterator1 __rlast1(__first1);
323 _RevIterator2 __rlast2(__first2);
324 _RevIterator1 __rresult = std::__search(_RevIterator1(__last1), __rlast1,
325 _RevIterator2(__last2), __rlast2,
326 __comp);
327
328 if (__rresult == __rlast1)
329 return __last1;
330 else
331 {
332 _BidirectionalIterator1 __result = __rresult.base();
333 std::advance(__result, -std::distance(__first2, __last2));
334 return __result;
335 }
336 }
337
338 /**
339 * @brief Find last matching subsequence in a sequence.
340 * @ingroup non_mutating_algorithms
341 * @param __first1 Start of range to search.
342 * @param __last1 End of range to search.
343 * @param __first2 Start of sequence to match.
344 * @param __last2 End of sequence to match.
345 * @return The last iterator @c i in the range
346 * @p [__first1,__last1-(__last2-__first2)) such that @c *(i+N) ==
347 * @p *(__first2+N) for each @c N in the range @p
348 * [0,__last2-__first2), or @p __last1 if no such iterator exists.
349 *
350 * Searches the range @p [__first1,__last1) for a sub-sequence that
351 * compares equal value-by-value with the sequence given by @p
352 * [__first2,__last2) and returns an iterator to the __first
353 * element of the sub-sequence, or @p __last1 if the sub-sequence
354 * is not found. The sub-sequence will be the last such
355 * subsequence contained in [__first1,__last1).
356 *
357 * Because the sub-sequence must lie completely within the range @p
358 * [__first1,__last1) it must start at a position less than @p
359 * __last1-(__last2-__first2) where @p __last2-__first2 is the
360 * length of the sub-sequence. This means that the returned
361 * iterator @c i will be in the range @p
362 * [__first1,__last1-(__last2-__first2))
363 */
364 template<typename _ForwardIterator1, typename _ForwardIterator2>
365 _GLIBCXX20_CONSTEXPR
366 inline _ForwardIterator1
367 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
368 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
369 {
370 // concept requirements
371 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
372 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
373 __glibcxx_function_requires(_EqualOpConcept<
374 typename iterator_traits<_ForwardIterator1>::value_type,
375 typename iterator_traits<_ForwardIterator2>::value_type>)
376 __glibcxx_requires_valid_range(__first1, __last1);
377 __glibcxx_requires_valid_range(__first2, __last2);
378
379 return std::__find_end(__first1, __last1, __first2, __last2,
380 std::__iterator_category(__first1),
381 std::__iterator_category(__first2),
382 __gnu_cxx::__ops::__iter_equal_to_iter());
383 }
384
385 /**
386 * @brief Find last matching subsequence in a sequence using a predicate.
387 * @ingroup non_mutating_algorithms
388 * @param __first1 Start of range to search.
389 * @param __last1 End of range to search.
390 * @param __first2 Start of sequence to match.
391 * @param __last2 End of sequence to match.
392 * @param __comp The predicate to use.
393 * @return The last iterator @c i in the range @p
394 * [__first1,__last1-(__last2-__first2)) such that @c
395 * predicate(*(i+N), @p (__first2+N)) is true for each @c N in the
396 * range @p [0,__last2-__first2), or @p __last1 if no such iterator
397 * exists.
398 *
399 * Searches the range @p [__first1,__last1) for a sub-sequence that
400 * compares equal value-by-value with the sequence given by @p
401 * [__first2,__last2) using comp as a predicate and returns an
402 * iterator to the first element of the sub-sequence, or @p __last1
403 * if the sub-sequence is not found. The sub-sequence will be the
404 * last such subsequence contained in [__first,__last1).
405 *
406 * Because the sub-sequence must lie completely within the range @p
407 * [__first1,__last1) it must start at a position less than @p
408 * __last1-(__last2-__first2) where @p __last2-__first2 is the
409 * length of the sub-sequence. This means that the returned
410 * iterator @c i will be in the range @p
411 * [__first1,__last1-(__last2-__first2))
412 */
413 template<typename _ForwardIterator1, typename _ForwardIterator2,
414 typename _BinaryPredicate>
415 _GLIBCXX20_CONSTEXPR
416 inline _ForwardIterator1
417 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
418 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
419 _BinaryPredicate __comp)
420 {
421 // concept requirements
422 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
423 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
424 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
425 typename iterator_traits<_ForwardIterator1>::value_type,
426 typename iterator_traits<_ForwardIterator2>::value_type>)
427 __glibcxx_requires_valid_range(__first1, __last1);
428 __glibcxx_requires_valid_range(__first2, __last2);
429
430 return std::__find_end(__first1, __last1, __first2, __last2,
431 std::__iterator_category(__first1),
432 std::__iterator_category(__first2),
433 __gnu_cxx::__ops::__iter_comp_iter(__comp));
434 }
435
436#if __cplusplus201402L >= 201103L
437 /**
438 * @brief Checks that a predicate is true for all the elements
439 * of a sequence.
440 * @ingroup non_mutating_algorithms
441 * @param __first An input iterator.
442 * @param __last An input iterator.
443 * @param __pred A predicate.
444 * @return True if the check is true, false otherwise.
445 *
446 * Returns true if @p __pred is true for each element in the range
447 * @p [__first,__last), and false otherwise.
448 */
449 template<typename _InputIterator, typename _Predicate>
450 _GLIBCXX20_CONSTEXPR
451 inline bool
452 all_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
453 { return __last == std::find_if_not(__first, __last, __pred); }
454
455 /**
456 * @brief Checks that a predicate is false for all the elements
457 * of a sequence.
458 * @ingroup non_mutating_algorithms
459 * @param __first An input iterator.
460 * @param __last An input iterator.
461 * @param __pred A predicate.
462 * @return True if the check is true, false otherwise.
463 *
464 * Returns true if @p __pred is false for each element in the range
465 * @p [__first,__last), and false otherwise.
466 */
467 template<typename _InputIterator, typename _Predicate>
468 _GLIBCXX20_CONSTEXPR
469 inline bool
470 none_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
471 { return __last == _GLIBCXX_STD_Astd::find_if(__first, __last, __pred); }
10
Assuming the condition is false
11
Returning zero, which participates in a condition later
472
473 /**
474 * @brief Checks that a predicate is true for at least one element
475 * of a sequence.
476 * @ingroup non_mutating_algorithms
477 * @param __first An input iterator.
478 * @param __last An input iterator.
479 * @param __pred A predicate.
480 * @return True if the check is true, false otherwise.
481 *
482 * Returns true if an element exists in the range @p
483 * [__first,__last) such that @p __pred is true, and false
484 * otherwise.
485 */
486 template<typename _InputIterator, typename _Predicate>
487 _GLIBCXX20_CONSTEXPR
488 inline bool
489 any_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
490 { return !std::none_of(__first, __last, __pred); }
9
Calling 'none_of<const unsigned long *, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
12
Returning from 'none_of<const unsigned long *, (lambda at /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/BitVector.h:163:25)>'
13
Returning the value 1, which participates in a condition later
491
492 /**
493 * @brief Find the first element in a sequence for which a
494 * predicate is false.
495 * @ingroup non_mutating_algorithms
496 * @param __first An input iterator.
497 * @param __last An input iterator.
498 * @param __pred A predicate.
499 * @return The first iterator @c i in the range @p [__first,__last)
500 * such that @p __pred(*i) is false, or @p __last if no such iterator exists.
501 */
502 template<typename _InputIterator, typename _Predicate>
503 _GLIBCXX20_CONSTEXPR
504 inline _InputIterator
505 find_if_not(_InputIterator __first, _InputIterator __last,
506 _Predicate __pred)
507 {
508 // concept requirements
509 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
510 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
511 typename iterator_traits<_InputIterator>::value_type>)
512 __glibcxx_requires_valid_range(__first, __last);
513 return std::__find_if_not(__first, __last,
514 __gnu_cxx::__ops::__pred_iter(__pred));
515 }
516
517 /**
518 * @brief Checks whether the sequence is partitioned.
519 * @ingroup mutating_algorithms
520 * @param __first An input iterator.
521 * @param __last An input iterator.
522 * @param __pred A predicate.
523 * @return True if the range @p [__first,__last) is partioned by @p __pred,
524 * i.e. if all elements that satisfy @p __pred appear before those that
525 * do not.
526 */
527 template<typename _InputIterator, typename _Predicate>
528 _GLIBCXX20_CONSTEXPR
529 inline bool
530 is_partitioned(_InputIterator __first, _InputIterator __last,
531 _Predicate __pred)
532 {
533 __first = std::find_if_not(__first, __last, __pred);
534 if (__first == __last)
535 return true;
536 ++__first;
537 return std::none_of(__first, __last, __pred);
538 }
539
540 /**
541 * @brief Find the partition point of a partitioned range.
542 * @ingroup mutating_algorithms
543 * @param __first An iterator.
544 * @param __last Another iterator.
545 * @param __pred A predicate.
546 * @return An iterator @p mid such that @p all_of(__first, mid, __pred)
547 * and @p none_of(mid, __last, __pred) are both true.
548 */
549 template<typename _ForwardIterator, typename _Predicate>
550 _GLIBCXX20_CONSTEXPR
551 _ForwardIterator
552 partition_point(_ForwardIterator __first, _ForwardIterator __last,
553 _Predicate __pred)
554 {
555 // concept requirements
556 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
557 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
558 typename iterator_traits<_ForwardIterator>::value_type>)
559
560 // A specific debug-mode test will be necessary...
561 __glibcxx_requires_valid_range(__first, __last);
562
563 typedef typename iterator_traits<_ForwardIterator>::difference_type
564 _DistanceType;
565
566 _DistanceType __len = std::distance(__first, __last);
567
568 while (__len > 0)
569 {
570 _DistanceType __half = __len >> 1;
571 _ForwardIterator __middle = __first;
572 std::advance(__middle, __half);
573 if (__pred(*__middle))
574 {
575 __first = __middle;
576 ++__first;
577 __len = __len - __half - 1;
578 }
579 else
580 __len = __half;
581 }
582 return __first;
583 }
584#endif
585
586 template<typename _InputIterator, typename _OutputIterator,
587 typename _Predicate>
588 _GLIBCXX20_CONSTEXPR
589 _OutputIterator
590 __remove_copy_if(_InputIterator __first, _InputIterator __last,
591 _OutputIterator __result, _Predicate __pred)
592 {
593 for (; __first != __last; ++__first)
594 if (!__pred(__first))
595 {
596 *__result = *__first;
597 ++__result;
598 }
599 return __result;
600 }
601
602 /**
603 * @brief Copy a sequence, removing elements of a given value.
604 * @ingroup mutating_algorithms
605 * @param __first An input iterator.
606 * @param __last An input iterator.
607 * @param __result An output iterator.
608 * @param __value The value to be removed.
609 * @return An iterator designating the end of the resulting sequence.
610 *
611 * Copies each element in the range @p [__first,__last) not equal
612 * to @p __value to the range beginning at @p __result.
613 * remove_copy() is stable, so the relative order of elements that
614 * are copied is unchanged.
615 */
616 template<typename _InputIterator, typename _OutputIterator, typename _Tp>
617 _GLIBCXX20_CONSTEXPR
618 inline _OutputIterator
619 remove_copy(_InputIterator __first, _InputIterator __last,
620 _OutputIterator __result, const _Tp& __value)
621 {
622 // concept requirements
623 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
624 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
625 typename iterator_traits<_InputIterator>::value_type>)
626 __glibcxx_function_requires(_EqualOpConcept<
627 typename iterator_traits<_InputIterator>::value_type, _Tp>)
628 __glibcxx_requires_valid_range(__first, __last);
629
630 return std::__remove_copy_if(__first, __last, __result,
631 __gnu_cxx::__ops::__iter_equals_val(__value));
632 }
633
634 /**
635 * @brief Copy a sequence, removing elements for which a predicate is true.
636 * @ingroup mutating_algorithms
637 * @param __first An input iterator.
638 * @param __last An input iterator.
639 * @param __result An output iterator.
640 * @param __pred A predicate.
641 * @return An iterator designating the end of the resulting sequence.
642 *
643 * Copies each element in the range @p [__first,__last) for which
644 * @p __pred returns false to the range beginning at @p __result.
645 *
646 * remove_copy_if() is stable, so the relative order of elements that are
647 * copied is unchanged.
648 */
649 template<typename _InputIterator, typename _OutputIterator,
650 typename _Predicate>
651 _GLIBCXX20_CONSTEXPR
652 inline _OutputIterator
653 remove_copy_if(_InputIterator __first, _InputIterator __last,
654 _OutputIterator __result, _Predicate __pred)
655 {
656 // concept requirements
657 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
658 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
659 typename iterator_traits<_InputIterator>::value_type>)
660 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
661 typename iterator_traits<_InputIterator>::value_type>)
662 __glibcxx_requires_valid_range(__first, __last);
663
664 return std::__remove_copy_if(__first, __last, __result,
665 __gnu_cxx::__ops::__pred_iter(__pred));
666 }
667
668#if __cplusplus201402L >= 201103L
669 /**
670 * @brief Copy the elements of a sequence for which a predicate is true.
671 * @ingroup mutating_algorithms
672 * @param __first An input iterator.
673 * @param __last An input iterator.
674 * @param __result An output iterator.
675 * @param __pred A predicate.
676 * @return An iterator designating the end of the resulting sequence.
677 *
678 * Copies each element in the range @p [__first,__last) for which
679 * @p __pred returns true to the range beginning at @p __result.
680 *
681 * copy_if() is stable, so the relative order of elements that are
682 * copied is unchanged.
683 */
684 template<typename _InputIterator, typename _OutputIterator,
685 typename _Predicate>
686 _GLIBCXX20_CONSTEXPR
687 _OutputIterator
688 copy_if(_InputIterator __first, _InputIterator __last,
689 _OutputIterator __result, _Predicate __pred)
690 {
691 // concept requirements
692 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
693 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
694 typename iterator_traits<_InputIterator>::value_type>)
695 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
696 typename iterator_traits<_InputIterator>::value_type>)
697 __glibcxx_requires_valid_range(__first, __last);
698
699 for (; __first != __last; ++__first)
700 if (__pred(*__first))
701 {
702 *__result = *__first;
703 ++__result;
704 }
705 return __result;
706 }
707
708 template<typename _InputIterator, typename _Size, typename _OutputIterator>
709 _GLIBCXX20_CONSTEXPR
710 _OutputIterator
711 __copy_n_a(_InputIterator __first, _Size __n, _OutputIterator __result)
712 {
713 if (__n > 0)
714 {
715 while (true)
716 {
717 *__result = *__first;
718 ++__result;
719 if (--__n > 0)
720 ++__first;
721 else
722 break;
723 }
724 }
725 return __result;
726 }
727
728 template<typename _CharT, typename _Size>
729 __enable_if_t<__is_char<_CharT>::__value, _CharT*>
730 __copy_n_a(istreambuf_iterator<_CharT, char_traits<_CharT>>,
731 _Size, _CharT*);
732
733 template<typename _InputIterator, typename _Size, typename _OutputIterator>
734 _GLIBCXX20_CONSTEXPR
735 _OutputIterator
736 __copy_n(_InputIterator __first, _Size __n,
737 _OutputIterator __result, input_iterator_tag)
738 {
739 return std::__niter_wrap(__result,
740 __copy_n_a(__first, __n,
741 std::__niter_base(__result)));
742 }
743
744 template<typename _RandomAccessIterator, typename _Size,
745 typename _OutputIterator>
746 _GLIBCXX20_CONSTEXPR
747 inline _OutputIterator
748 __copy_n(_RandomAccessIterator __first, _Size __n,
749 _OutputIterator __result, random_access_iterator_tag)
750 { return std::copy(__first, __first + __n, __result); }
751
752 /**
753 * @brief Copies the range [first,first+n) into [result,result+n).
754 * @ingroup mutating_algorithms
755 * @param __first An input iterator.
756 * @param __n The number of elements to copy.
757 * @param __result An output iterator.
758 * @return result+n.
759 *
760 * This inline function will boil down to a call to @c memmove whenever
761 * possible. Failing that, if random access iterators are passed, then the
762 * loop count will be known (and therefore a candidate for compiler
763 * optimizations such as unrolling).
764 */
765 template<typename _InputIterator, typename _Size, typename _OutputIterator>
766 _GLIBCXX20_CONSTEXPR
767 inline _OutputIterator
768 copy_n(_InputIterator __first, _Size __n, _OutputIterator __result)
769 {
770 // concept requirements
771 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
772 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
773 typename iterator_traits<_InputIterator>::value_type>)
774 __glibcxx_requires_can_increment(__first, __n);
775 __glibcxx_requires_can_increment(__result, __n);
776
777 return std::__copy_n(__first, __n, __result,
778 std::__iterator_category(__first));
779 }
780
781 /**
782 * @brief Copy the elements of a sequence to separate output sequences
783 * depending on the truth value of a predicate.
784 * @ingroup mutating_algorithms
785 * @param __first An input iterator.
786 * @param __last An input iterator.
787 * @param __out_true An output iterator.
788 * @param __out_false An output iterator.
789 * @param __pred A predicate.
790 * @return A pair designating the ends of the resulting sequences.
791 *
792 * Copies each element in the range @p [__first,__last) for which
793 * @p __pred returns true to the range beginning at @p out_true
794 * and each element for which @p __pred returns false to @p __out_false.
795 */
796 template<typename _InputIterator, typename _OutputIterator1,
797 typename _OutputIterator2, typename _Predicate>
798 _GLIBCXX20_CONSTEXPR
799 pair<_OutputIterator1, _OutputIterator2>
800 partition_copy(_InputIterator __first, _InputIterator __last,
801 _OutputIterator1 __out_true, _OutputIterator2 __out_false,
802 _Predicate __pred)
803 {
804 // concept requirements
805 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
806 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator1,
807 typename iterator_traits<_InputIterator>::value_type>)
808 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator2,
809 typename iterator_traits<_InputIterator>::value_type>)
810 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
811 typename iterator_traits<_InputIterator>::value_type>)
812 __glibcxx_requires_valid_range(__first, __last);
813
814 for (; __first != __last; ++__first)
815 if (__pred(*__first))
816 {
817 *__out_true = *__first;
818 ++__out_true;
819 }
820 else
821 {
822 *__out_false = *__first;
823 ++__out_false;
824 }
825
826 return pair<_OutputIterator1, _OutputIterator2>(__out_true, __out_false);
827 }
828#endif // C++11
829
830 template<typename _ForwardIterator, typename _Predicate>
831 _GLIBCXX20_CONSTEXPR
832 _ForwardIterator
833 __remove_if(_ForwardIterator __first, _ForwardIterator __last,
834 _Predicate __pred)
835 {
836 __first = std::__find_if(__first, __last, __pred);
837 if (__first == __last)
838 return __first;
839 _ForwardIterator __result = __first;
840 ++__first;
841 for (; __first != __last; ++__first)
842 if (!__pred(__first))
843 {
844 *__result = _GLIBCXX_MOVE(*__first)std::move(*__first);
845 ++__result;
846 }
847 return __result;
848 }
849
850 /**
851 * @brief Remove elements from a sequence.
852 * @ingroup mutating_algorithms
853 * @param __first An input iterator.
854 * @param __last An input iterator.
855 * @param __value The value to be removed.
856 * @return An iterator designating the end of the resulting sequence.
857 *
858 * All elements equal to @p __value are removed from the range
859 * @p [__first,__last).
860 *
861 * remove() is stable, so the relative order of elements that are
862 * not removed is unchanged.
863 *
864 * Elements between the end of the resulting sequence and @p __last
865 * are still present, but their value is unspecified.
866 */
867 template<typename _ForwardIterator, typename _Tp>
868 _GLIBCXX20_CONSTEXPR
869 inline _ForwardIterator
870 remove(_ForwardIterator __first, _ForwardIterator __last,
871 const _Tp& __value)
872 {
873 // concept requirements
874 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
875 _ForwardIterator>)
876 __glibcxx_function_requires(_EqualOpConcept<
877 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
878 __glibcxx_requires_valid_range(__first, __last);
879
880 return std::__remove_if(__first, __last,
881 __gnu_cxx::__ops::__iter_equals_val(__value));
882 }
883
884 /**
885 * @brief Remove elements from a sequence using a predicate.
886 * @ingroup mutating_algorithms
887 * @param __first A forward iterator.
888 * @param __last A forward iterator.
889 * @param __pred A predicate.
890 * @return An iterator designating the end of the resulting sequence.
891 *
892 * All elements for which @p __pred returns true are removed from the range
893 * @p [__first,__last).
894 *
895 * remove_if() is stable, so the relative order of elements that are
896 * not removed is unchanged.
897 *
898 * Elements between the end of the resulting sequence and @p __last
899 * are still present, but their value is unspecified.
900 */
901 template<typename _ForwardIterator, typename _Predicate>
902 _GLIBCXX20_CONSTEXPR
903 inline _ForwardIterator
904 remove_if(_ForwardIterator __first, _ForwardIterator __last,
905 _Predicate __pred)
906 {
907 // concept requirements
908 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
909 _ForwardIterator>)
910 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
911 typename iterator_traits<_ForwardIterator>::value_type>)
912 __glibcxx_requires_valid_range(__first, __last);
913
914 return std::__remove_if(__first, __last,
915 __gnu_cxx::__ops::__pred_iter(__pred));
916 }
917
918 template<typename _ForwardIterator, typename _BinaryPredicate>
919 _GLIBCXX20_CONSTEXPR
920 _ForwardIterator
921 __adjacent_find(_ForwardIterator __first, _ForwardIterator __last,
922 _BinaryPredicate __binary_pred)
923 {
924 if (__first == __last)
925 return __last;
926 _ForwardIterator __next = __first;
927 while (++__next != __last)
928 {
929 if (__binary_pred(__first, __next))
930 return __first;
931 __first = __next;
932 }
933 return __last;
934 }
935
936 template<typename _ForwardIterator, typename _BinaryPredicate>
937 _GLIBCXX20_CONSTEXPR
938 _ForwardIterator
939 __unique(_ForwardIterator __first, _ForwardIterator __last,
940 _BinaryPredicate __binary_pred)
941 {
942 // Skip the beginning, if already unique.
943 __first = std::__adjacent_find(__first, __last, __binary_pred);
944 if (__first == __last)
945 return __last;
946
947 // Do the real copy work.
948 _ForwardIterator __dest = __first;
949 ++__first;
950 while (++__first != __last)
951 if (!__binary_pred(__dest, __first))
952 *++__dest = _GLIBCXX_MOVE(*__first)std::move(*__first);
953 return ++__dest;
954 }
955
956 /**
957 * @brief Remove consecutive duplicate values from a sequence.
958 * @ingroup mutating_algorithms
959 * @param __first A forward iterator.
960 * @param __last A forward iterator.
961 * @return An iterator designating the end of the resulting sequence.
962 *
963 * Removes all but the first element from each group of consecutive
964 * values that compare equal.
965 * unique() is stable, so the relative order of elements that are
966 * not removed is unchanged.
967 * Elements between the end of the resulting sequence and @p __last
968 * are still present, but their value is unspecified.
969 */
970 template<typename _ForwardIterator>
971 _GLIBCXX20_CONSTEXPR
972 inline _ForwardIterator
973 unique(_ForwardIterator __first, _ForwardIterator __last)
974 {
975 // concept requirements
976 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
977 _ForwardIterator>)
978 __glibcxx_function_requires(_EqualityComparableConcept<
979 typename iterator_traits<_ForwardIterator>::value_type>)
980 __glibcxx_requires_valid_range(__first, __last);
981
982 return std::__unique(__first, __last,
983 __gnu_cxx::__ops::__iter_equal_to_iter());
984 }
985
986 /**
987 * @brief Remove consecutive values from a sequence using a predicate.
988 * @ingroup mutating_algorithms
989 * @param __first A forward iterator.
990 * @param __last A forward iterator.
991 * @param __binary_pred A binary predicate.
992 * @return An iterator designating the end of the resulting sequence.
993 *
994 * Removes all but the first element from each group of consecutive
995 * values for which @p __binary_pred returns true.
996 * unique() is stable, so the relative order of elements that are
997 * not removed is unchanged.
998 * Elements between the end of the resulting sequence and @p __last
999 * are still present, but their value is unspecified.
1000 */
1001 template<typename _ForwardIterator, typename _BinaryPredicate>
1002 _GLIBCXX20_CONSTEXPR
1003 inline _ForwardIterator
1004 unique(_ForwardIterator __first, _ForwardIterator __last,
1005 _BinaryPredicate __binary_pred)
1006 {
1007 // concept requirements
1008 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1009 _ForwardIterator>)
1010 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1011 typename iterator_traits<_ForwardIterator>::value_type,
1012 typename iterator_traits<_ForwardIterator>::value_type>)
1013 __glibcxx_requires_valid_range(__first, __last);
1014
1015 return std::__unique(__first, __last,
1016 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred));
1017 }
1018
1019 /**
1020 * This is an uglified
1021 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1022 * _BinaryPredicate)
1023 * overloaded for forward iterators and output iterator as result.
1024 */
1025 template<typename _ForwardIterator, typename _OutputIterator,
1026 typename _BinaryPredicate>
1027 _GLIBCXX20_CONSTEXPR
1028 _OutputIterator
1029 __unique_copy(_ForwardIterator __first, _ForwardIterator __last,
1030 _OutputIterator __result, _BinaryPredicate __binary_pred,
1031 forward_iterator_tag, output_iterator_tag)
1032 {
1033 // concept requirements -- iterators already checked
1034 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1035 typename iterator_traits<_ForwardIterator>::value_type,
1036 typename iterator_traits<_ForwardIterator>::value_type>)
1037
1038 _ForwardIterator __next = __first;
1039 *__result = *__first;
1040 while (++__next != __last)
1041 if (!__binary_pred(__first, __next))
1042 {
1043 __first = __next;
1044 *++__result = *__first;
1045 }
1046 return ++__result;
1047 }
1048
1049 /**
1050 * This is an uglified
1051 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1052 * _BinaryPredicate)
1053 * overloaded for input iterators and output iterator as result.
1054 */
1055 template<typename _InputIterator, typename _OutputIterator,
1056 typename _BinaryPredicate>
1057 _GLIBCXX20_CONSTEXPR
1058 _OutputIterator
1059 __unique_copy(_InputIterator __first, _InputIterator __last,
1060 _OutputIterator __result, _BinaryPredicate __binary_pred,
1061 input_iterator_tag, output_iterator_tag)
1062 {
1063 // concept requirements -- iterators already checked
1064 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1065 typename iterator_traits<_InputIterator>::value_type,
1066 typename iterator_traits<_InputIterator>::value_type>)
1067
1068 typename iterator_traits<_InputIterator>::value_type __value = *__first;
1069 __decltype(__gnu_cxx::__ops::__iter_comp_val(__binary_pred))
1070 __rebound_pred
1071 = __gnu_cxx::__ops::__iter_comp_val(__binary_pred);
1072 *__result = __value;
1073 while (++__first != __last)
1074 if (!__rebound_pred(__first, __value))
1075 {
1076 __value = *__first;
1077 *++__result = __value;
1078 }
1079 return ++__result;
1080 }
1081
1082 /**
1083 * This is an uglified
1084 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1085 * _BinaryPredicate)
1086 * overloaded for input iterators and forward iterator as result.
1087 */
1088 template<typename _InputIterator, typename _ForwardIterator,
1089 typename _BinaryPredicate>
1090 _GLIBCXX20_CONSTEXPR
1091 _ForwardIterator
1092 __unique_copy(_InputIterator __first, _InputIterator __last,
1093 _ForwardIterator __result, _BinaryPredicate __binary_pred,
1094 input_iterator_tag, forward_iterator_tag)
1095 {
1096 // concept requirements -- iterators already checked
1097 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1098 typename iterator_traits<_ForwardIterator>::value_type,
1099 typename iterator_traits<_InputIterator>::value_type>)
1100 *__result = *__first;
1101 while (++__first != __last)
1102 if (!__binary_pred(__result, __first))
1103 *++__result = *__first;
1104 return ++__result;
1105 }
1106
1107 /**
1108 * This is an uglified reverse(_BidirectionalIterator,
1109 * _BidirectionalIterator)
1110 * overloaded for bidirectional iterators.
1111 */
1112 template<typename _BidirectionalIterator>
1113 _GLIBCXX20_CONSTEXPR
1114 void
1115 __reverse(_BidirectionalIterator __first, _BidirectionalIterator __last,
1116 bidirectional_iterator_tag)
1117 {
1118 while (true)
1119 if (__first == __last || __first == --__last)
1120 return;
1121 else
1122 {
1123 std::iter_swap(__first, __last);
1124 ++__first;
1125 }
1126 }
1127
1128 /**
1129 * This is an uglified reverse(_BidirectionalIterator,
1130 * _BidirectionalIterator)
1131 * overloaded for random access iterators.
1132 */
1133 template<typename _RandomAccessIterator>
1134 _GLIBCXX20_CONSTEXPR
1135 void
1136 __reverse(_RandomAccessIterator __first, _RandomAccessIterator __last,
1137 random_access_iterator_tag)
1138 {
1139 if (__first == __last)
1140 return;
1141 --__last;
1142 while (__first < __last)
1143 {
1144 std::iter_swap(__first, __last);
1145 ++__first;
1146 --__last;
1147 }
1148 }
1149
1150 /**
1151 * @brief Reverse a sequence.
1152 * @ingroup mutating_algorithms
1153 * @param __first A bidirectional iterator.
1154 * @param __last A bidirectional iterator.
1155 * @return reverse() returns no value.
1156 *
1157 * Reverses the order of the elements in the range @p [__first,__last),
1158 * so that the first element becomes the last etc.
1159 * For every @c i such that @p 0<=i<=(__last-__first)/2), @p reverse()
1160 * swaps @p *(__first+i) and @p *(__last-(i+1))
1161 */
1162 template<typename _BidirectionalIterator>
1163 _GLIBCXX20_CONSTEXPR
1164 inline void
1165 reverse(_BidirectionalIterator __first, _BidirectionalIterator __last)
1166 {
1167 // concept requirements
1168 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1169 _BidirectionalIterator>)
1170 __glibcxx_requires_valid_range(__first, __last);
1171 std::__reverse(__first, __last, std::__iterator_category(__first));
1172 }
1173
1174 /**
1175 * @brief Copy a sequence, reversing its elements.
1176 * @ingroup mutating_algorithms
1177 * @param __first A bidirectional iterator.
1178 * @param __last A bidirectional iterator.
1179 * @param __result An output iterator.
1180 * @return An iterator designating the end of the resulting sequence.
1181 *
1182 * Copies the elements in the range @p [__first,__last) to the
1183 * range @p [__result,__result+(__last-__first)) such that the
1184 * order of the elements is reversed. For every @c i such that @p
1185 * 0<=i<=(__last-__first), @p reverse_copy() performs the
1186 * assignment @p *(__result+(__last-__first)-1-i) = *(__first+i).
1187 * The ranges @p [__first,__last) and @p
1188 * [__result,__result+(__last-__first)) must not overlap.
1189 */
1190 template<typename _BidirectionalIterator, typename _OutputIterator>
1191 _GLIBCXX20_CONSTEXPR
1192 _OutputIterator
1193 reverse_copy(_BidirectionalIterator __first, _BidirectionalIterator __last,
1194 _OutputIterator __result)
1195 {
1196 // concept requirements
1197 __glibcxx_function_requires(_BidirectionalIteratorConcept<
1198 _BidirectionalIterator>)
1199 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1200 typename iterator_traits<_BidirectionalIterator>::value_type>)
1201 __glibcxx_requires_valid_range(__first, __last);
1202
1203 while (__first != __last)
1204 {
1205 --__last;
1206 *__result = *__last;
1207 ++__result;
1208 }
1209 return __result;
1210 }
1211
1212 /**
1213 * This is a helper function for the rotate algorithm specialized on RAIs.
1214 * It returns the greatest common divisor of two integer values.
1215 */
1216 template<typename _EuclideanRingElement>
1217 _GLIBCXX20_CONSTEXPR
1218 _EuclideanRingElement
1219 __gcd(_EuclideanRingElement __m, _EuclideanRingElement __n)
1220 {
1221 while (__n != 0)
1222 {
1223 _EuclideanRingElement __t = __m % __n;
1224 __m = __n;
1225 __n = __t;
1226 }
1227 return __m;
1228 }
1229
1230 inline namespace _V2
1231 {
1232
1233 /// This is a helper function for the rotate algorithm.
1234 template<typename _ForwardIterator>
1235 _GLIBCXX20_CONSTEXPR
1236 _ForwardIterator
1237 __rotate(_ForwardIterator __first,
1238 _ForwardIterator __middle,
1239 _ForwardIterator __last,
1240 forward_iterator_tag)
1241 {
1242 if (__first == __middle)
1243 return __last;
1244 else if (__last == __middle)
1245 return __first;
1246
1247 _ForwardIterator __first2 = __middle;
1248 do
1249 {
1250 std::iter_swap(__first, __first2);
1251 ++__first;
1252 ++__first2;
1253 if (__first == __middle)
1254 __middle = __first2;
1255 }
1256 while (__first2 != __last);
1257
1258 _ForwardIterator __ret = __first;
1259
1260 __first2 = __middle;
1261
1262 while (__first2 != __last)
1263 {
1264 std::iter_swap(__first, __first2);
1265 ++__first;
1266 ++__first2;
1267 if (__first == __middle)
1268 __middle = __first2;
1269 else if (__first2 == __last)
1270 __first2 = __middle;
1271 }
1272 return __ret;
1273 }
1274
1275 /// This is a helper function for the rotate algorithm.
1276 template<typename _BidirectionalIterator>
1277 _GLIBCXX20_CONSTEXPR
1278 _BidirectionalIterator
1279 __rotate(_BidirectionalIterator __first,
1280 _BidirectionalIterator __middle,
1281 _BidirectionalIterator __last,
1282 bidirectional_iterator_tag)
1283 {
1284 // concept requirements
1285 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1286 _BidirectionalIterator>)
1287
1288 if (__first == __middle)
1289 return __last;
1290 else if (__last == __middle)
1291 return __first;
1292
1293 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1294 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1295
1296 while (__first != __middle && __middle != __last)
1297 {
1298 std::iter_swap(__first, --__last);
1299 ++__first;
1300 }
1301
1302 if (__first == __middle)
1303 {
1304 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1305 return __last;
1306 }
1307 else
1308 {
1309 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1310 return __first;
1311 }
1312 }
1313
1314 /// This is a helper function for the rotate algorithm.
1315 template<typename _RandomAccessIterator>
1316 _GLIBCXX20_CONSTEXPR
1317 _RandomAccessIterator
1318 __rotate(_RandomAccessIterator __first,
1319 _RandomAccessIterator __middle,
1320 _RandomAccessIterator __last,
1321 random_access_iterator_tag)
1322 {
1323 // concept requirements
1324 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1325 _RandomAccessIterator>)
1326
1327 if (__first == __middle)
1328 return __last;
1329 else if (__last == __middle)
1330 return __first;
1331
1332 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
1333 _Distance;
1334 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1335 _ValueType;
1336
1337 _Distance __n = __last - __first;
1338 _Distance __k = __middle - __first;
1339
1340 if (__k == __n - __k)
1341 {
1342 std::swap_ranges(__first, __middle, __middle);
1343 return __middle;
1344 }
1345
1346 _RandomAccessIterator __p = __first;
1347 _RandomAccessIterator __ret = __first + (__last - __middle);
1348
1349 for (;;)
1350 {
1351 if (__k < __n - __k)
1352 {
1353 if (__is_pod(_ValueType) && __k == 1)
1354 {
1355 _ValueType __t = _GLIBCXX_MOVE(*__p)std::move(*__p);
1356 _GLIBCXX_MOVE3(__p + 1, __p + __n, __p)std::move(__p + 1, __p + __n, __p);
1357 *(__p + __n - 1) = _GLIBCXX_MOVE(__t)std::move(__t);
1358 return __ret;
1359 }
1360 _RandomAccessIterator __q = __p + __k;
1361 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1362 {
1363 std::iter_swap(__p, __q);
1364 ++__p;
1365 ++__q;
1366 }
1367 __n %= __k;
1368 if (__n == 0)
1369 return __ret;
1370 std::swap(__n, __k);
1371 __k = __n - __k;
1372 }
1373 else
1374 {
1375 __k = __n - __k;
1376 if (__is_pod(_ValueType) && __k == 1)
1377 {
1378 _ValueType __t = _GLIBCXX_MOVE(*(__p + __n - 1))std::move(*(__p + __n - 1));
1379 _GLIBCXX_MOVE_BACKWARD3(__p, __p + __n - 1, __p + __n)std::move_backward(__p, __p + __n - 1, __p + __n);
1380 *__p = _GLIBCXX_MOVE(__t)std::move(__t);
1381 return __ret;
1382 }
1383 _RandomAccessIterator __q = __p + __n;
1384 __p = __q - __k;
1385 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1386 {
1387 --__p;
1388 --__q;
1389 std::iter_swap(__p, __q);
1390 }
1391 __n %= __k;
1392 if (__n == 0)
1393 return __ret;
1394 std::swap(__n, __k);
1395 }
1396 }
1397 }
1398
1399 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1400 // DR 488. rotate throws away useful information
1401 /**
1402 * @brief Rotate the elements of a sequence.
1403 * @ingroup mutating_algorithms
1404 * @param __first A forward iterator.
1405 * @param __middle A forward iterator.
1406 * @param __last A forward iterator.
1407 * @return first + (last - middle).
1408 *
1409 * Rotates the elements of the range @p [__first,__last) by
1410 * @p (__middle - __first) positions so that the element at @p __middle
1411 * is moved to @p __first, the element at @p __middle+1 is moved to
1412 * @p __first+1 and so on for each element in the range
1413 * @p [__first,__last).
1414 *
1415 * This effectively swaps the ranges @p [__first,__middle) and
1416 * @p [__middle,__last).
1417 *
1418 * Performs
1419 * @p *(__first+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1420 * for each @p n in the range @p [0,__last-__first).
1421 */
1422 template<typename _ForwardIterator>
1423 _GLIBCXX20_CONSTEXPR
1424 inline _ForwardIterator
1425 rotate(_ForwardIterator __first, _ForwardIterator __middle,
1426 _ForwardIterator __last)
1427 {
1428 // concept requirements
1429 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1430 _ForwardIterator>)
1431 __glibcxx_requires_valid_range(__first, __middle);
1432 __glibcxx_requires_valid_range(__middle, __last);
1433
1434 return std::__rotate(__first, __middle, __last,
1435 std::__iterator_category(__first));
1436 }
1437
1438 } // namespace _V2
1439
1440 /**
1441 * @brief Copy a sequence, rotating its elements.
1442 * @ingroup mutating_algorithms
1443 * @param __first A forward iterator.
1444 * @param __middle A forward iterator.
1445 * @param __last A forward iterator.
1446 * @param __result An output iterator.
1447 * @return An iterator designating the end of the resulting sequence.
1448 *
1449 * Copies the elements of the range @p [__first,__last) to the
1450 * range beginning at @result, rotating the copied elements by
1451 * @p (__middle-__first) positions so that the element at @p __middle
1452 * is moved to @p __result, the element at @p __middle+1 is moved
1453 * to @p __result+1 and so on for each element in the range @p
1454 * [__first,__last).
1455 *
1456 * Performs
1457 * @p *(__result+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1458 * for each @p n in the range @p [0,__last-__first).
1459 */
1460 template<typename _ForwardIterator, typename _OutputIterator>
1461 _GLIBCXX20_CONSTEXPR
1462 inline _OutputIterator
1463 rotate_copy(_ForwardIterator __first, _ForwardIterator __middle,
1464 _ForwardIterator __last, _OutputIterator __result)
1465 {
1466 // concept requirements
1467 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
1468 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1469 typename iterator_traits<_ForwardIterator>::value_type>)
1470 __glibcxx_requires_valid_range(__first, __middle);
1471 __glibcxx_requires_valid_range(__middle, __last);
1472
1473 return std::copy(__first, __middle,
1474 std::copy(__middle, __last, __result));
1475 }
1476
1477 /// This is a helper function...
1478 template<typename _ForwardIterator, typename _Predicate>
1479 _GLIBCXX20_CONSTEXPR
1480 _ForwardIterator
1481 __partition(_ForwardIterator __first, _ForwardIterator __last,
1482 _Predicate __pred, forward_iterator_tag)
1483 {
1484 if (__first == __last)
1485 return __first;
1486
1487 while (__pred(*__first))
1488 if (++__first == __last)
1489 return __first;
1490
1491 _ForwardIterator __next = __first;
1492
1493 while (++__next != __last)
1494 if (__pred(*__next))
1495 {
1496 std::iter_swap(__first, __next);
1497 ++__first;
1498 }
1499
1500 return __first;
1501 }
1502
1503 /// This is a helper function...
1504 template<typename _BidirectionalIterator, typename _Predicate>
1505 _GLIBCXX20_CONSTEXPR
1506 _BidirectionalIterator
1507 __partition(_BidirectionalIterator __first, _BidirectionalIterator __last,
1508 _Predicate __pred, bidirectional_iterator_tag)
1509 {
1510 while (true)
1511 {
1512 while (true)
1513 if (__first == __last)
1514 return __first;
1515 else if (__pred(*__first))
1516 ++__first;
1517 else
1518 break;
1519 --__last;
1520 while (true)
1521 if (__first == __last)
1522 return __first;
1523 else if (!bool(__pred(*__last)))
1524 --__last;
1525 else
1526 break;
1527 std::iter_swap(__first, __last);
1528 ++__first;
1529 }
1530 }
1531
1532 // partition
1533
1534 /// This is a helper function...
1535 /// Requires __first != __last and !__pred(__first)
1536 /// and __len == distance(__first, __last).
1537 ///
1538 /// !__pred(__first) allows us to guarantee that we don't
1539 /// move-assign an element onto itself.
1540 template<typename _ForwardIterator, typename _Pointer, typename _Predicate,
1541 typename _Distance>
1542 _ForwardIterator
1543 __stable_partition_adaptive(_ForwardIterator __first,
1544 _ForwardIterator __last,
1545 _Predicate __pred, _Distance __len,
1546 _Pointer __buffer,
1547 _Distance __buffer_size)
1548 {
1549 if (__len == 1)
1550 return __first;
1551
1552 if (__len <= __buffer_size)
1553 {
1554 _ForwardIterator __result1 = __first;
1555 _Pointer __result2 = __buffer;
1556
1557 // The precondition guarantees that !__pred(__first), so
1558 // move that element to the buffer before starting the loop.
1559 // This ensures that we only call __pred once per element.
1560 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1561 ++__result2;
1562 ++__first;
1563 for (; __first != __last; ++__first)
1564 if (__pred(__first))
1565 {
1566 *__result1 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1567 ++__result1;
1568 }
1569 else
1570 {
1571 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1572 ++__result2;
1573 }
1574
1575 _GLIBCXX_MOVE3(__buffer, __result2, __result1)std::move(__buffer, __result2, __result1);
1576 return __result1;
1577 }
1578
1579 _ForwardIterator __middle = __first;
1580 std::advance(__middle, __len / 2);
1581 _ForwardIterator __left_split =
1582 std::__stable_partition_adaptive(__first, __middle, __pred,
1583 __len / 2, __buffer,
1584 __buffer_size);
1585
1586 // Advance past true-predicate values to satisfy this
1587 // function's preconditions.
1588 _Distance __right_len = __len - __len / 2;
1589 _ForwardIterator __right_split =
1590 std::__find_if_not_n(__middle, __right_len, __pred);
1591
1592 if (__right_len)
1593 __right_split =
1594 std::__stable_partition_adaptive(__right_split, __last, __pred,
1595 __right_len,
1596 __buffer, __buffer_size);
1597
1598 return std::rotate(__left_split, __middle, __right_split);
1599 }
1600
1601 template<typename _ForwardIterator, typename _Predicate>
1602 _ForwardIterator
1603 __stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1604 _Predicate __pred)
1605 {
1606 __first = std::__find_if_not(__first, __last, __pred);
1607
1608 if (__first == __last)
1609 return __first;
1610
1611 typedef typename iterator_traits<_ForwardIterator>::value_type
1612 _ValueType;
1613 typedef typename iterator_traits<_ForwardIterator>::difference_type
1614 _DistanceType;
1615
1616 _Temporary_buffer<_ForwardIterator, _ValueType>
1617 __buf(__first, std::distance(__first, __last));
1618 return
1619 std::__stable_partition_adaptive(__first, __last, __pred,
1620 _DistanceType(__buf.requested_size()),
1621 __buf.begin(),
1622 _DistanceType(__buf.size()));
1623 }
1624
1625 /**
1626 * @brief Move elements for which a predicate is true to the beginning
1627 * of a sequence, preserving relative ordering.
1628 * @ingroup mutating_algorithms
1629 * @param __first A forward iterator.
1630 * @param __last A forward iterator.
1631 * @param __pred A predicate functor.
1632 * @return An iterator @p middle such that @p __pred(i) is true for each
1633 * iterator @p i in the range @p [first,middle) and false for each @p i
1634 * in the range @p [middle,last).
1635 *
1636 * Performs the same function as @p partition() with the additional
1637 * guarantee that the relative ordering of elements in each group is
1638 * preserved, so any two elements @p x and @p y in the range
1639 * @p [__first,__last) such that @p __pred(x)==__pred(y) will have the same
1640 * relative ordering after calling @p stable_partition().
1641 */
1642 template<typename _ForwardIterator, typename _Predicate>
1643 inline _ForwardIterator
1644 stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1645 _Predicate __pred)
1646 {
1647 // concept requirements
1648 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1649 _ForwardIterator>)
1650 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
1651 typename iterator_traits<_ForwardIterator>::value_type>)
1652 __glibcxx_requires_valid_range(__first, __last);
1653
1654 return std::__stable_partition(__first, __last,
1655 __gnu_cxx::__ops::__pred_iter(__pred));
1656 }
1657
1658 /// This is a helper function for the sort routines.
1659 template<typename _RandomAccessIterator, typename _Compare>
1660 _GLIBCXX20_CONSTEXPR
1661 void
1662 __heap_select(_RandomAccessIterator __first,
1663 _RandomAccessIterator __middle,
1664 _RandomAccessIterator __last, _Compare __comp)
1665 {
1666 std::__make_heap(__first, __middle, __comp);
1667 for (_RandomAccessIterator __i = __middle; __i < __last; ++__i)
1668 if (__comp(__i, __first))
1669 std::__pop_heap(__first, __middle, __i, __comp);
1670 }
1671
1672 // partial_sort
1673
1674 template<typename _InputIterator, typename _RandomAccessIterator,
1675 typename _Compare>
1676 _GLIBCXX20_CONSTEXPR
1677 _RandomAccessIterator
1678 __partial_sort_copy(_InputIterator __first, _InputIterator __last,
1679 _RandomAccessIterator __result_first,
1680 _RandomAccessIterator __result_last,
1681 _Compare __comp)
1682 {
1683 typedef typename iterator_traits<_InputIterator>::value_type
1684 _InputValueType;
1685 typedef iterator_traits<_RandomAccessIterator> _RItTraits;
1686 typedef typename _RItTraits::difference_type _DistanceType;
1687
1688 if (__result_first == __result_last)
1689 return __result_last;
1690 _RandomAccessIterator __result_real_last = __result_first;
1691 while (__first != __last && __result_real_last != __result_last)
1692 {
1693 *__result_real_last = *__first;
1694 ++__result_real_last;
1695 ++__first;
1696 }
1697
1698 std::__make_heap(__result_first, __result_real_last, __comp);
1699 while (__first != __last)
1700 {
1701 if (__comp(__first, __result_first))
1702 std::__adjust_heap(__result_first, _DistanceType(0),
1703 _DistanceType(__result_real_last
1704 - __result_first),
1705 _InputValueType(*__first), __comp);
1706 ++__first;
1707 }
1708 std::__sort_heap(__result_first, __result_real_last, __comp);
1709 return __result_real_last;
1710 }
1711
1712 /**
1713 * @brief Copy the smallest elements of a sequence.
1714 * @ingroup sorting_algorithms
1715 * @param __first An iterator.
1716 * @param __last Another iterator.
1717 * @param __result_first A random-access iterator.
1718 * @param __result_last Another random-access iterator.
1719 * @return An iterator indicating the end of the resulting sequence.
1720 *
1721 * Copies and sorts the smallest N values from the range @p [__first,__last)
1722 * to the range beginning at @p __result_first, where the number of
1723 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1724 * @p (__result_last-__result_first).
1725 * After the sort if @e i and @e j are iterators in the range
1726 * @p [__result_first,__result_first+N) such that i precedes j then
1727 * *j<*i is false.
1728 * The value returned is @p __result_first+N.
1729 */
1730 template<typename _InputIterator, typename _RandomAccessIterator>
1731 _GLIBCXX20_CONSTEXPR
1732 inline _RandomAccessIterator
1733 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1734 _RandomAccessIterator __result_first,
1735 _RandomAccessIterator __result_last)
1736 {
1737#ifdef _GLIBCXX_CONCEPT_CHECKS
1738 typedef typename iterator_traits<_InputIterator>::value_type
1739 _InputValueType;
1740 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1741 _OutputValueType;
1742#endif
1743
1744 // concept requirements
1745 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1746 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1747 _OutputValueType>)
1748 __glibcxx_function_requires(_LessThanOpConcept<_InputValueType,
1749 _OutputValueType>)
1750 __glibcxx_function_requires(_LessThanComparableConcept<_OutputValueType>)
1751 __glibcxx_requires_valid_range(__first, __last);
1752 __glibcxx_requires_irreflexive(__first, __last);
1753 __glibcxx_requires_valid_range(__result_first, __result_last);
1754
1755 return std::__partial_sort_copy(__first, __last,
1756 __result_first, __result_last,
1757 __gnu_cxx::__ops::__iter_less_iter());
1758 }
1759
1760 /**
1761 * @brief Copy the smallest elements of a sequence using a predicate for
1762 * comparison.
1763 * @ingroup sorting_algorithms
1764 * @param __first An input iterator.
1765 * @param __last Another input iterator.
1766 * @param __result_first A random-access iterator.
1767 * @param __result_last Another random-access iterator.
1768 * @param __comp A comparison functor.
1769 * @return An iterator indicating the end of the resulting sequence.
1770 *
1771 * Copies and sorts the smallest N values from the range @p [__first,__last)
1772 * to the range beginning at @p result_first, where the number of
1773 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1774 * @p (__result_last-__result_first).
1775 * After the sort if @e i and @e j are iterators in the range
1776 * @p [__result_first,__result_first+N) such that i precedes j then
1777 * @p __comp(*j,*i) is false.
1778 * The value returned is @p __result_first+N.
1779 */
1780 template<typename _InputIterator, typename _RandomAccessIterator,
1781 typename _Compare>
1782 _GLIBCXX20_CONSTEXPR
1783 inline _RandomAccessIterator
1784 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1785 _RandomAccessIterator __result_first,
1786 _RandomAccessIterator __result_last,
1787 _Compare __comp)
1788 {
1789#ifdef _GLIBCXX_CONCEPT_CHECKS
1790 typedef typename iterator_traits<_InputIterator>::value_type
1791 _InputValueType;
1792 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1793 _OutputValueType;
1794#endif
1795
1796 // concept requirements
1797 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1798 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1799 _RandomAccessIterator>)
1800 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1801 _OutputValueType>)
1802 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1803 _InputValueType, _OutputValueType>)
1804 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1805 _OutputValueType, _OutputValueType>)
1806 __glibcxx_requires_valid_range(__first, __last);
1807 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
1808 __glibcxx_requires_valid_range(__result_first, __result_last);
1809
1810 return std::__partial_sort_copy(__first, __last,
1811 __result_first, __result_last,
1812 __gnu_cxx::__ops::__iter_comp_iter(__comp));
1813 }
1814
1815 /// This is a helper function for the sort routine.
1816 template<typename _RandomAccessIterator, typename _Compare>
1817 _GLIBCXX20_CONSTEXPR
1818 void
1819 __unguarded_linear_insert(_RandomAccessIterator __last,
1820 _Compare __comp)
1821 {
1822 typename iterator_traits<_RandomAccessIterator>::value_type
1823 __val = _GLIBCXX_MOVE(*__last)std::move(*__last);
1824 _RandomAccessIterator __next = __last;
1825 --__next;
1826 while (__comp(__val, __next))
1827 {
1828 *__last = _GLIBCXX_MOVE(*__next)std::move(*__next);
1829 __last = __next;
1830 --__next;
1831 }
1832 *__last = _GLIBCXX_MOVE(__val)std::move(__val);
1833 }
1834
1835 /// This is a helper function for the sort routine.
1836 template<typename _RandomAccessIterator, typename _Compare>
1837 _GLIBCXX20_CONSTEXPR
1838 void
1839 __insertion_sort(_RandomAccessIterator __first,
1840 _RandomAccessIterator __last, _Compare __comp)
1841 {
1842 if (__first == __last) return;
1843
1844 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
1845 {
1846 if (__comp(__i, __first))
1847 {
1848 typename iterator_traits<_RandomAccessIterator>::value_type
1849 __val = _GLIBCXX_MOVE(*__i)std::move(*__i);
1850 _GLIBCXX_MOVE_BACKWARD3(__first, __i, __i + 1)std::move_backward(__first, __i, __i + 1);
1851 *__first = _GLIBCXX_MOVE(__val)std::move(__val);
1852 }
1853 else
1854 std::__unguarded_linear_insert(__i,
1855 __gnu_cxx::__ops::__val_comp_iter(__comp));
1856 }
1857 }
1858
1859 /// This is a helper function for the sort routine.
1860 template<typename _RandomAccessIterator, typename _Compare>
1861 _GLIBCXX20_CONSTEXPR
1862 inline void
1863 __unguarded_insertion_sort(_RandomAccessIterator __first,
1864 _RandomAccessIterator __last, _Compare __comp)
1865 {
1866 for (_RandomAccessIterator __i = __first; __i != __last; ++__i)
1867 std::__unguarded_linear_insert(__i,
1868 __gnu_cxx::__ops::__val_comp_iter(__comp));
1869 }
1870
1871 /**
1872 * @doctodo
1873 * This controls some aspect of the sort routines.
1874 */
1875 enum { _S_threshold = 16 };
1876
1877 /// This is a helper function for the sort routine.
1878 template<typename _RandomAccessIterator, typename _Compare>
1879 _GLIBCXX20_CONSTEXPR
1880 void
1881 __final_insertion_sort(_RandomAccessIterator __first,
1882 _RandomAccessIterator __last, _Compare __comp)
1883 {
1884 if (__last - __first > int(_S_threshold))
1885 {
1886 std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
1887 std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
1888 __comp);
1889 }
1890 else
1891 std::__insertion_sort(__first, __last, __comp);
1892 }
1893
1894 /// This is a helper function...
1895 template<typename _RandomAccessIterator, typename _Compare>
1896 _GLIBCXX20_CONSTEXPR
1897 _RandomAccessIterator
1898 __unguarded_partition(_RandomAccessIterator __first,
1899 _RandomAccessIterator __last,
1900 _RandomAccessIterator __pivot, _Compare __comp)
1901 {
1902 while (true)
1903 {
1904 while (__comp(__first, __pivot))
1905 ++__first;
1906 --__last;
1907 while (__comp(__pivot, __last))
1908 --__last;
1909 if (!(__first < __last))
1910 return __first;
1911 std::iter_swap(__first, __last);
1912 ++__first;
1913 }
1914 }
1915
1916 /// This is a helper function...
1917 template<typename _RandomAccessIterator, typename _Compare>
1918 _GLIBCXX20_CONSTEXPR
1919 inline _RandomAccessIterator
1920 __unguarded_partition_pivot(_RandomAccessIterator __first,
1921 _RandomAccessIterator __last, _Compare __comp)
1922 {
1923 _RandomAccessIterator __mid = __first + (__last - __first) / 2;
1924 std::__move_median_to_first(__first, __first + 1, __mid, __last - 1,
1925 __comp);
1926 return std::__unguarded_partition(__first + 1, __last, __first, __comp);
1927 }
1928
1929 template<typename _RandomAccessIterator, typename _Compare>
1930 _GLIBCXX20_CONSTEXPR
1931 inline void
1932 __partial_sort(_RandomAccessIterator __first,
1933 _RandomAccessIterator __middle,
1934 _RandomAccessIterator __last,
1935 _Compare __comp)
1936 {
1937 std::__heap_select(__first, __middle, __last, __comp);
1938 std::__sort_heap(__first, __middle, __comp);
1939 }
1940
1941 /// This is a helper function for the sort routine.
1942 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1943 _GLIBCXX20_CONSTEXPR
1944 void
1945 __introsort_loop(_RandomAccessIterator __first,
1946 _RandomAccessIterator __last,
1947 _Size __depth_limit, _Compare __comp)
1948 {
1949 while (__last - __first > int(_S_threshold))
1950 {
1951 if (__depth_limit == 0)
1952 {
1953 std::__partial_sort(__first, __last, __last, __comp);
1954 return;
1955 }
1956 --__depth_limit;
1957 _RandomAccessIterator __cut =
1958 std::__unguarded_partition_pivot(__first, __last, __comp);
1959 std::__introsort_loop(__cut, __last, __depth_limit, __comp);
1960 __last = __cut;
1961 }
1962 }
1963
1964 // sort
1965
1966 template<typename _RandomAccessIterator, typename _Compare>
1967 _GLIBCXX20_CONSTEXPR
1968 inline void
1969 __sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
1970 _Compare __comp)
1971 {
1972 if (__first != __last)
1973 {
1974 std::__introsort_loop(__first, __last,
1975 std::__lg(__last - __first) * 2,
1976 __comp);
1977 std::__final_insertion_sort(__first, __last, __comp);
1978 }
1979 }
1980
1981 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1982 _GLIBCXX20_CONSTEXPR
1983 void
1984 __introselect(_RandomAccessIterator __first, _RandomAccessIterator __nth,
1985 _RandomAccessIterator __last, _Size __depth_limit,
1986 _Compare __comp)
1987 {
1988 while (__last - __first > 3)
1989 {
1990 if (__depth_limit == 0)
1991 {
1992 std::__heap_select(__first, __nth + 1, __last, __comp);
1993 // Place the nth largest element in its final position.
1994 std::iter_swap(__first, __nth);
1995 return;
1996 }
1997 --__depth_limit;
1998 _RandomAccessIterator __cut =
1999 std::__unguarded_partition_pivot(__first, __last, __comp);
2000 if (__cut <= __nth)
2001 __first = __cut;
2002 else
2003 __last = __cut;
2004