Bug Summary

File:llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Warning:line 1271, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DeadStoreElimination.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-20-210906-17489-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp

1//===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The code below implements dead store elimination using MemorySSA. It uses
10// the following general approach: given a MemoryDef, walk upwards to find
11// clobbering MemoryDefs that may be killed by the starting def. Then check
12// that there are no uses that may read the location of the original MemoryDef
13// in between both MemoryDefs. A bit more concretely:
14//
15// For all MemoryDefs StartDef:
16// 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
17// upwards.
18// 2. Check that there are no reads between EarlierAccess and the StartDef by
19// checking all uses starting at EarlierAccess and walking until we see
20// StartDef.
21// 3. For each found CurrentDef, check that:
22// 1. There are no barrier instructions between CurrentDef and StartDef (like
23// throws or stores with ordering constraints).
24// 2. StartDef is executed whenever CurrentDef is executed.
25// 3. StartDef completely overwrites CurrentDef.
26// 4. Erase CurrentDef from the function and MemorySSA.
27//
28//===----------------------------------------------------------------------===//
29
30#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/DenseMap.h"
33#include "llvm/ADT/MapVector.h"
34#include "llvm/ADT/PostOrderIterator.h"
35#include "llvm/ADT/SetVector.h"
36#include "llvm/ADT/SmallPtrSet.h"
37#include "llvm/ADT/SmallVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
40#include "llvm/Analysis/AliasAnalysis.h"
41#include "llvm/Analysis/CaptureTracking.h"
42#include "llvm/Analysis/GlobalsModRef.h"
43#include "llvm/Analysis/MemoryBuiltins.h"
44#include "llvm/Analysis/MemoryLocation.h"
45#include "llvm/Analysis/MemorySSA.h"
46#include "llvm/Analysis/MemorySSAUpdater.h"
47#include "llvm/Analysis/PostDominators.h"
48#include "llvm/Analysis/TargetLibraryInfo.h"
49#include "llvm/Analysis/ValueTracking.h"
50#include "llvm/IR/Argument.h"
51#include "llvm/IR/BasicBlock.h"
52#include "llvm/IR/Constant.h"
53#include "llvm/IR/Constants.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/Dominators.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/InstIterator.h"
58#include "llvm/IR/InstrTypes.h"
59#include "llvm/IR/Instruction.h"
60#include "llvm/IR/Instructions.h"
61#include "llvm/IR/IntrinsicInst.h"
62#include "llvm/IR/Intrinsics.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/PassManager.h"
66#include "llvm/IR/PatternMatch.h"
67#include "llvm/IR/Value.h"
68#include "llvm/InitializePasses.h"
69#include "llvm/Pass.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CommandLine.h"
72#include "llvm/Support/Debug.h"
73#include "llvm/Support/DebugCounter.h"
74#include "llvm/Support/ErrorHandling.h"
75#include "llvm/Support/MathExtras.h"
76#include "llvm/Support/raw_ostream.h"
77#include "llvm/Transforms/Scalar.h"
78#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
79#include "llvm/Transforms/Utils/Local.h"
80#include <algorithm>
81#include <cassert>
82#include <cstddef>
83#include <cstdint>
84#include <iterator>
85#include <map>
86#include <utility>
87
88using namespace llvm;
89using namespace PatternMatch;
90
91#define DEBUG_TYPE"dse" "dse"
92
93STATISTIC(NumRemainingStores, "Number of stores remaining after DSE")static llvm::Statistic NumRemainingStores = {"dse", "NumRemainingStores"
, "Number of stores remaining after DSE"}
;
94STATISTIC(NumRedundantStores, "Number of redundant stores deleted")static llvm::Statistic NumRedundantStores = {"dse", "NumRedundantStores"
, "Number of redundant stores deleted"}
;
95STATISTIC(NumFastStores, "Number of stores deleted")static llvm::Statistic NumFastStores = {"dse", "NumFastStores"
, "Number of stores deleted"}
;
96STATISTIC(NumFastOther, "Number of other instrs removed")static llvm::Statistic NumFastOther = {"dse", "NumFastOther",
"Number of other instrs removed"}
;
97STATISTIC(NumCompletePartials, "Number of stores dead by later partials")static llvm::Statistic NumCompletePartials = {"dse", "NumCompletePartials"
, "Number of stores dead by later partials"}
;
98STATISTIC(NumModifiedStores, "Number of stores modified")static llvm::Statistic NumModifiedStores = {"dse", "NumModifiedStores"
, "Number of stores modified"}
;
99STATISTIC(NumCFGChecks, "Number of stores modified")static llvm::Statistic NumCFGChecks = {"dse", "NumCFGChecks",
"Number of stores modified"}
;
100STATISTIC(NumCFGTries, "Number of stores modified")static llvm::Statistic NumCFGTries = {"dse", "NumCFGTries", "Number of stores modified"
}
;
101STATISTIC(NumCFGSuccess, "Number of stores modified")static llvm::Statistic NumCFGSuccess = {"dse", "NumCFGSuccess"
, "Number of stores modified"}
;
102STATISTIC(NumGetDomMemoryDefPassed,static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
103 "Number of times a valid candidate is returned from getDomMemoryDef")static llvm::Statistic NumGetDomMemoryDefPassed = {"dse", "NumGetDomMemoryDefPassed"
, "Number of times a valid candidate is returned from getDomMemoryDef"
}
;
104STATISTIC(NumDomMemDefChecks,static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
105 "Number iterations check for reads in getDomMemoryDef")static llvm::Statistic NumDomMemDefChecks = {"dse", "NumDomMemDefChecks"
, "Number iterations check for reads in getDomMemoryDef"}
;
106
107DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
108 "Controls which MemoryDefs are eliminated.")static const unsigned MemorySSACounter = DebugCounter::registerCounter
("dse-memoryssa", "Controls which MemoryDefs are eliminated."
)
;
109
110static cl::opt<bool>
111EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
112 cl::init(true), cl::Hidden,
113 cl::desc("Enable partial-overwrite tracking in DSE"));
114
115static cl::opt<bool>
116EnablePartialStoreMerging("enable-dse-partial-store-merging",
117 cl::init(true), cl::Hidden,
118 cl::desc("Enable partial store merging in DSE"));
119
120static cl::opt<unsigned>
121 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
122 cl::desc("The number of memory instructions to scan for "
123 "dead store elimination (default = 100)"));
124static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
125 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
126 cl::desc("The maximum number of steps while walking upwards to find "
127 "MemoryDefs that may be killed (default = 90)"));
128
129static cl::opt<unsigned> MemorySSAPartialStoreLimit(
130 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
131 cl::desc("The maximum number candidates that only partially overwrite the "
132 "killing MemoryDef to consider"
133 " (default = 5)"));
134
135static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
136 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
137 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
138 "other stores per basic block (default = 5000)"));
139
140static cl::opt<unsigned> MemorySSASameBBStepCost(
141 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
142 cl::desc(
143 "The cost of a step in the same basic block as the killing MemoryDef"
144 "(default = 1)"));
145
146static cl::opt<unsigned>
147 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
148 cl::Hidden,
149 cl::desc("The cost of a step in a different basic "
150 "block than the killing MemoryDef"
151 "(default = 5)"));
152
153static cl::opt<unsigned> MemorySSAPathCheckLimit(
154 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
155 cl::desc("The maximum number of blocks to check when trying to prove that "
156 "all paths to an exit go through a killing block (default = 50)"));
157
158//===----------------------------------------------------------------------===//
159// Helper functions
160//===----------------------------------------------------------------------===//
161using OverlapIntervalsTy = std::map<int64_t, int64_t>;
162using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
163
164/// Does this instruction write some memory? This only returns true for things
165/// that we can analyze with other helpers below.
166static bool hasAnalyzableMemoryWrite(Instruction *I,
167 const TargetLibraryInfo &TLI) {
168 if (isa<StoreInst>(I))
77
'I' is not a 'StoreInst'
78
Taking false branch
169 return true;
170 if (IntrinsicInst *II
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
79.1
'II' is null
= dyn_cast<IntrinsicInst>(I)) {
79
'I' is not a 'IntrinsicInst'
80
Taking false branch
171 switch (II->getIntrinsicID()) {
172 default:
173 return false;
174 case Intrinsic::memset:
175 case Intrinsic::memmove:
176 case Intrinsic::memcpy:
177 case Intrinsic::memcpy_inline:
178 case Intrinsic::memcpy_element_unordered_atomic:
179 case Intrinsic::memmove_element_unordered_atomic:
180 case Intrinsic::memset_element_unordered_atomic:
181 case Intrinsic::init_trampoline:
182 case Intrinsic::lifetime_end:
183 case Intrinsic::masked_store:
184 return true;
185 }
186 }
187 if (auto *CB
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
81.1
'CB' is non-null
= dyn_cast<CallBase>(I)) {
81
Assuming 'I' is a 'CallBase'
82
Taking true branch
188 LibFunc LF;
189 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
83
Calling 'TargetLibraryInfo::getLibFunc'
88
Returning from 'TargetLibraryInfo::getLibFunc'
89
Assuming the condition is true
90
Calling 'TargetLibraryInfo::has'
99
Returning from 'TargetLibraryInfo::has'
100
Assuming the condition is true
101
Taking true branch
190 switch (LF) {
102
Control jumps to 'case LibFunc_strcpy:' at line 191
191 case LibFunc_strcpy:
192 case LibFunc_strncpy:
193 case LibFunc_strcat:
194 case LibFunc_strncat:
195 return true;
103
Returning the value 1, which participates in a condition later
196 default:
197 return false;
198 }
199 }
200 }
201 return false;
202}
203
204/// Return a Location stored to by the specified instruction. If isRemovable
205/// returns true, this function and getLocForRead completely describe the memory
206/// operations for this instruction.
207static MemoryLocation getLocForWrite(Instruction *Inst,
208 const TargetLibraryInfo &TLI) {
209 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
210 return MemoryLocation::get(SI);
211
212 // memcpy/memmove/memset.
213 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
214 return MemoryLocation::getForDest(MI);
215
216 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
217 switch (II->getIntrinsicID()) {
218 default:
219 return MemoryLocation(); // Unhandled intrinsic.
220 case Intrinsic::init_trampoline:
221 return MemoryLocation::getAfter(II->getArgOperand(0));
222 case Intrinsic::masked_store:
223 return MemoryLocation::getForArgument(II, 1, TLI);
224 case Intrinsic::lifetime_end: {
225 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
226 return MemoryLocation(II->getArgOperand(1), Len);
227 }
228 }
229 }
230 if (auto *CB = dyn_cast<CallBase>(Inst))
231 // All the supported TLI functions so far happen to have dest as their
232 // first argument.
233 return MemoryLocation::getAfter(CB->getArgOperand(0));
234 return MemoryLocation();
235}
236
237/// If the value of this instruction and the memory it writes to is unused, may
238/// we delete this instruction?
239static bool isRemovable(Instruction *I) {
240 // Don't remove volatile/atomic stores.
241 if (StoreInst *SI = dyn_cast<StoreInst>(I))
242 return SI->isUnordered();
243
244 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
245 switch (II->getIntrinsicID()) {
246 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate")::llvm::llvm_unreachable_internal("doesn't pass 'hasAnalyzableMemoryWrite' predicate"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 246)
;
247 case Intrinsic::lifetime_end:
248 // Never remove dead lifetime_end's, e.g. because it is followed by a
249 // free.
250 return false;
251 case Intrinsic::init_trampoline:
252 // Always safe to remove init_trampoline.
253 return true;
254 case Intrinsic::memset:
255 case Intrinsic::memmove:
256 case Intrinsic::memcpy:
257 case Intrinsic::memcpy_inline:
258 // Don't remove volatile memory intrinsics.
259 return !cast<MemIntrinsic>(II)->isVolatile();
260 case Intrinsic::memcpy_element_unordered_atomic:
261 case Intrinsic::memmove_element_unordered_atomic:
262 case Intrinsic::memset_element_unordered_atomic:
263 case Intrinsic::masked_store:
264 return true;
265 }
266 }
267
268 // note: only get here for calls with analyzable writes - i.e. libcalls
269 if (auto *CB = dyn_cast<CallBase>(I))
270 return CB->use_empty();
271
272 return false;
273}
274
275/// Returns true if the end of this instruction can be safely shortened in
276/// length.
277static bool isShortenableAtTheEnd(Instruction *I) {
278 // Don't shorten stores for now
279 if (isa<StoreInst>(I))
280 return false;
281
282 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
283 switch (II->getIntrinsicID()) {
284 default: return false;
285 case Intrinsic::memset:
286 case Intrinsic::memcpy:
287 case Intrinsic::memcpy_element_unordered_atomic:
288 case Intrinsic::memset_element_unordered_atomic:
289 // Do shorten memory intrinsics.
290 // FIXME: Add memmove if it's also safe to transform.
291 return true;
292 }
293 }
294
295 // Don't shorten libcalls calls for now.
296
297 return false;
298}
299
300/// Returns true if the beginning of this instruction can be safely shortened
301/// in length.
302static bool isShortenableAtTheBeginning(Instruction *I) {
303 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
304 // easily done by offsetting the source address.
305 return isa<AnyMemSetInst>(I);
306}
307
308static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
309 const TargetLibraryInfo &TLI,
310 const Function *F) {
311 uint64_t Size;
312 ObjectSizeOpts Opts;
313 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
314
315 if (getObjectSize(V, Size, DL, &TLI, Opts))
316 return Size;
317 return MemoryLocation::UnknownSize;
318}
319
320namespace {
321
322enum OverwriteResult {
323 OW_Begin,
324 OW_Complete,
325 OW_End,
326 OW_PartialEarlierWithFullLater,
327 OW_MaybePartial,
328 OW_Unknown
329};
330
331} // end anonymous namespace
332
333/// Check if two instruction are masked stores that completely
334/// overwrite one another. More specifically, \p Later has to
335/// overwrite \p Earlier.
336static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
337 const Instruction *Earlier,
338 BatchAAResults &AA) {
339 const auto *IIL = dyn_cast<IntrinsicInst>(Later);
340 const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
341 if (IIL == nullptr || IIE == nullptr)
342 return OW_Unknown;
343 if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
344 IIE->getIntrinsicID() != Intrinsic::masked_store)
345 return OW_Unknown;
346 // Pointers.
347 Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
348 Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
349 if (LP != EP && !AA.isMustAlias(LP, EP))
350 return OW_Unknown;
351 // Masks.
352 // TODO: check that Later's mask is a superset of the Earlier's mask.
353 if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
354 return OW_Unknown;
355 return OW_Complete;
356}
357
358/// Return 'OW_Complete' if a store to the 'Later' location completely
359/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
360/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
361/// beginning of the 'Earlier' location is overwritten by 'Later'.
362/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
363/// overwritten by a latter (smaller) store which doesn't write outside the big
364/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
365/// NOTE: This function must only be called if both \p Later and \p Earlier
366/// write to the same underlying object with valid \p EarlierOff and \p
367/// LaterOff.
368static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
369 const MemoryLocation &Earlier,
370 int64_t EarlierOff, int64_t LaterOff,
371 Instruction *DepWrite,
372 InstOverlapIntervalsTy &IOL) {
373 const uint64_t LaterSize = Later.Size.getValue();
374 const uint64_t EarlierSize = Earlier.Size.getValue();
375 // We may now overlap, although the overlap is not complete. There might also
376 // be other incomplete overlaps, and together, they might cover the complete
377 // earlier write.
378 // Note: The correctness of this logic depends on the fact that this function
379 // is not even called providing DepWrite when there are any intervening reads.
380 if (EnablePartialOverwriteTracking &&
381 LaterOff < int64_t(EarlierOff + EarlierSize) &&
382 int64_t(LaterOff + LaterSize) >= EarlierOff) {
383
384 // Insert our part of the overlap into the map.
385 auto &IM = IOL[DepWrite];
386 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOffdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
387 << ", " << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
388 << ") Later [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
389 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Later [" << LaterOff <<
", " << int64_t(LaterOff + LaterSize) << ")\n"; }
} while (false)
;
390
391 // Make sure that we only insert non-overlapping intervals and combine
392 // adjacent intervals. The intervals are stored in the map with the ending
393 // offset as the key (in the half-open sense) and the starting offset as
394 // the value.
395 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
396
397 // Find any intervals ending at, or after, LaterIntStart which start
398 // before LaterIntEnd.
399 auto ILI = IM.lower_bound(LaterIntStart);
400 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
401 // This existing interval is overlapped with the current store somewhere
402 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
403 // intervals and adjusting our start and end.
404 LaterIntStart = std::min(LaterIntStart, ILI->second);
405 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
406 ILI = IM.erase(ILI);
407
408 // Continue erasing and adjusting our end in case other previous
409 // intervals are also overlapped with the current store.
410 //
411 // |--- ealier 1 ---| |--- ealier 2 ---|
412 // |------- later---------|
413 //
414 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
415 assert(ILI->second > LaterIntStart && "Unexpected interval")(static_cast <bool> (ILI->second > LaterIntStart &&
"Unexpected interval") ? void (0) : __assert_fail ("ILI->second > LaterIntStart && \"Unexpected interval\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 415, __extension__ __PRETTY_FUNCTION__))
;
416 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
417 ILI = IM.erase(ILI);
418 }
419 }
420
421 IM[LaterIntEnd] = LaterIntStart;
422
423 ILI = IM.begin();
424 if (ILI->second <= EarlierOff &&
425 ILI->first >= int64_t(EarlierOff + EarlierSize)) {
426 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
427 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
428 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
429 << ") Composite Later [" << ILI->second << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
430 << ILI->first << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") Composite Later [" << ILI->
second << ", " << ILI->first << ")\n"; }
} while (false)
;
431 ++NumCompletePartials;
432 return OW_Complete;
433 }
434 }
435
436 // Check for an earlier store which writes to all the memory locations that
437 // the later store writes to.
438 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
439 int64_t(EarlierOff + EarlierSize) > LaterOff &&
440 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
441 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
442 << EarlierOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
443 << int64_t(EarlierOff + EarlierSize)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
444 << ") by a later store [" << LaterOff << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
445 << int64_t(LaterOff + LaterSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", " << int64_t(EarlierOff
+ EarlierSize) << ") by a later store [" << LaterOff
<< ", " << int64_t(LaterOff + LaterSize) <<
")\n"; } } while (false)
;
446 // TODO: Maybe come up with a better name?
447 return OW_PartialEarlierWithFullLater;
448 }
449
450 // Another interesting case is if the later store overwrites the end of the
451 // earlier store.
452 //
453 // |--earlier--|
454 // |-- later --|
455 //
456 // In this case we may want to trim the size of earlier to avoid generating
457 // writes to addresses which will definitely be overwritten later
458 if (!EnablePartialOverwriteTracking &&
459 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
460 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
461 return OW_End;
462
463 // Finally, we also need to check if the later store overwrites the beginning
464 // of the earlier store.
465 //
466 // |--earlier--|
467 // |-- later --|
468 //
469 // In this case we may want to move the destination address and trim the size
470 // of earlier to avoid generating writes to addresses which will definitely
471 // be overwritten later.
472 if (!EnablePartialOverwriteTracking &&
473 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
474 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 475, __extension__ __PRETTY_FUNCTION__))
475 "Expect to be handled as OW_Complete")(static_cast <bool> (int64_t(LaterOff + LaterSize) <
int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"
) ? void (0) : __assert_fail ("int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && \"Expect to be handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 475, __extension__ __PRETTY_FUNCTION__))
;
476 return OW_Begin;
477 }
478 // Otherwise, they don't completely overlap.
479 return OW_Unknown;
480}
481
482/// Returns true if the memory which is accessed by the second instruction is not
483/// modified between the first and the second instruction.
484/// Precondition: Second instruction must be dominated by the first
485/// instruction.
486static bool
487memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
488 BatchAAResults &AA, const DataLayout &DL,
489 DominatorTree *DT) {
490 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
491 // instructions which can modify the memory location accessed by SecondI.
492 //
493 // While doing the walk keep track of the address to check. It might be
494 // different in different basic blocks due to PHI translation.
495 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
496 SmallVector<BlockAddressPair, 16> WorkList;
497 // Keep track of the address we visited each block with. Bail out if we
498 // visit a block with different addresses.
499 DenseMap<BasicBlock *, Value *> Visited;
500
501 BasicBlock::iterator FirstBBI(FirstI);
502 ++FirstBBI;
503 BasicBlock::iterator SecondBBI(SecondI);
504 BasicBlock *FirstBB = FirstI->getParent();
505 BasicBlock *SecondBB = SecondI->getParent();
506 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
507 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
508
509 // Start checking the SecondBB.
510 WorkList.push_back(
511 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
512 bool isFirstBlock = true;
513
514 // Check all blocks going backward until we reach the FirstBB.
515 while (!WorkList.empty()) {
516 BlockAddressPair Current = WorkList.pop_back_val();
517 BasicBlock *B = Current.first;
518 PHITransAddr &Addr = Current.second;
519 Value *Ptr = Addr.getAddr();
520
521 // Ignore instructions before FirstI if this is the FirstBB.
522 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
523
524 BasicBlock::iterator EI;
525 if (isFirstBlock) {
526 // Ignore instructions after SecondI if this is the first visit of SecondBB.
527 assert(B == SecondBB && "first block is not the store block")(static_cast <bool> (B == SecondBB && "first block is not the store block"
) ? void (0) : __assert_fail ("B == SecondBB && \"first block is not the store block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 527, __extension__ __PRETTY_FUNCTION__))
;
528 EI = SecondBBI;
529 isFirstBlock = false;
530 } else {
531 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
532 // In this case we also have to look at instructions after SecondI.
533 EI = B->end();
534 }
535 for (; BI != EI; ++BI) {
536 Instruction *I = &*BI;
537 if (I->mayWriteToMemory() && I != SecondI)
538 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
539 return false;
540 }
541 if (B != FirstBB) {
542 assert(B != &FirstBB->getParent()->getEntryBlock() &&(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
543 "Should not hit the entry block because SI must be dominated by LI")(static_cast <bool> (B != &FirstBB->getParent()->
getEntryBlock() && "Should not hit the entry block because SI must be dominated by LI"
) ? void (0) : __assert_fail ("B != &FirstBB->getParent()->getEntryBlock() && \"Should not hit the entry block because SI must be dominated by LI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 for (BasicBlock *Pred : predecessors(B)) {
545 PHITransAddr PredAddr = Addr;
546 if (PredAddr.NeedsPHITranslationFromBlock(B)) {
547 if (!PredAddr.IsPotentiallyPHITranslatable())
548 return false;
549 if (PredAddr.PHITranslateValue(B, Pred, DT, false))
550 return false;
551 }
552 Value *TranslatedPtr = PredAddr.getAddr();
553 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
554 if (!Inserted.second) {
555 // We already visited this block before. If it was with a different
556 // address - bail out!
557 if (TranslatedPtr != Inserted.first->second)
558 return false;
559 // ... otherwise just skip it.
560 continue;
561 }
562 WorkList.push_back(std::make_pair(Pred, PredAddr));
563 }
564 }
565 }
566 return true;
567}
568
569static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
570 uint64_t &EarlierSize, int64_t LaterStart,
571 uint64_t LaterSize, bool IsOverwriteEnd) {
572 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
573 Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
574
575 // We assume that memet/memcpy operates in chunks of the "largest" native
576 // type size and aligned on the same value. That means optimal start and size
577 // of memset/memcpy should be modulo of preferred alignment of that type. That
578 // is it there is no any sense in trying to reduce store size any further
579 // since any "extra" stores comes for free anyway.
580 // On the other hand, maximum alignment we can achieve is limited by alignment
581 // of initial store.
582
583 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
584 // "largest" native type.
585 // Note: What is the proper way to get that value?
586 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
587 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
588
589 int64_t ToRemoveStart = 0;
590 uint64_t ToRemoveSize = 0;
591 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
592 // maintained on the remaining store.
593 if (IsOverwriteEnd) {
594 // Calculate required adjustment for 'LaterStart'in order to keep remaining
595 // store size aligned on 'PerfAlign'.
596 uint64_t Off =
597 offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
598 ToRemoveStart = LaterStart + Off;
599 if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
600 return false;
601 ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
602 } else {
603 ToRemoveStart = EarlierStart;
604 assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
605 "Not overlapping accesses?")(static_cast <bool> (LaterSize >= uint64_t(EarlierStart
- LaterStart) && "Not overlapping accesses?") ? void
(0) : __assert_fail ("LaterSize >= uint64_t(EarlierStart - LaterStart) && \"Not overlapping accesses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
;
606 ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
607 // Calculate required adjustment for 'ToRemoveSize'in order to keep
608 // start of the remaining store aligned on 'PerfAlign'.
609 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
610 if (Off != 0) {
611 if (ToRemoveSize <= (PrefAlign.value() - Off))
612 return false;
613 ToRemoveSize -= PrefAlign.value() - Off;
614 }
615 assert(isAligned(PrefAlign, ToRemoveSize) &&(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
616 "Should preserve selected alignment")(static_cast <bool> (isAligned(PrefAlign, ToRemoveSize)
&& "Should preserve selected alignment") ? void (0) :
__assert_fail ("isAligned(PrefAlign, ToRemoveSize) && \"Should preserve selected alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
;
617 }
618
619 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove")(static_cast <bool> (ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove"
) ? void (0) : __assert_fail ("ToRemoveSize > 0 && \"Shouldn't reach here if nothing to remove\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 assert(EarlierSize > ToRemoveSize && "Can't remove more than original size")(static_cast <bool> (EarlierSize > ToRemoveSize &&
"Can't remove more than original size") ? void (0) : __assert_fail
("EarlierSize > ToRemoveSize && \"Can't remove more than original size\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 620, __extension__ __PRETTY_FUNCTION__))
;
621
622 uint64_t NewSize = EarlierSize - ToRemoveSize;
623 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
624 // When shortening an atomic memory intrinsic, the newly shortened
625 // length must remain an integer multiple of the element size.
626 const uint32_t ElementSize = AMI->getElementSizeInBytes();
627 if (0 != NewSize % ElementSize)
628 return false;
629 }
630
631 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
632 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
633 << *EarlierWrite << "\n KILLER [" << ToRemoveStart << ", "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
634 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n OW " <<
(IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart << ", "
<< int64_t(ToRemoveStart + ToRemoveSize) << ")\n"
; } } while (false)
;
635
636 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
637 Value *TrimmedLength =
638 ConstantInt::get(EarlierWriteLength->getType(), NewSize);
639 EarlierIntrinsic->setLength(TrimmedLength);
640 EarlierIntrinsic->setDestAlignment(PrefAlign);
641
642 if (!IsOverwriteEnd) {
643 Value *Indices[1] = {
644 ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
645 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
646 EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
647 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
648 NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
649 EarlierIntrinsic->setDest(NewDestGEP);
650 }
651
652 // Finally update start and size of earlier access.
653 if (!IsOverwriteEnd)
654 EarlierStart += ToRemoveSize;
655 EarlierSize = NewSize;
656
657 return true;
658}
659
660static bool tryToShortenEnd(Instruction *EarlierWrite,
661 OverlapIntervalsTy &IntervalMap,
662 int64_t &EarlierStart, uint64_t &EarlierSize) {
663 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
664 return false;
665
666 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
667 int64_t LaterStart = OII->second;
668 uint64_t LaterSize = OII->first - LaterStart;
669
670 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 670, __extension__ __PRETTY_FUNCTION__))
;
671
672 if (LaterStart > EarlierStart &&
673 // Note: "LaterStart - EarlierStart" is known to be positive due to
674 // preceding check.
675 (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
676 // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
677 // be non negative due to preceding checks.
678 LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
679 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
680 LaterSize, true)) {
681 IntervalMap.erase(OII);
682 return true;
683 }
684 }
685 return false;
686}
687
688static bool tryToShortenBegin(Instruction *EarlierWrite,
689 OverlapIntervalsTy &IntervalMap,
690 int64_t &EarlierStart, uint64_t &EarlierSize) {
691 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
692 return false;
693
694 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
695 int64_t LaterStart = OII->second;
696 uint64_t LaterSize = OII->first - LaterStart;
697
698 assert(OII->first - LaterStart >= 0 && "Size expected to be positive")(static_cast <bool> (OII->first - LaterStart >= 0
&& "Size expected to be positive") ? void (0) : __assert_fail
("OII->first - LaterStart >= 0 && \"Size expected to be positive\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 698, __extension__ __PRETTY_FUNCTION__))
;
699
700 if (LaterStart <= EarlierStart &&
701 // Note: "EarlierStart - LaterStart" is known to be non negative due to
702 // preceding check.
703 LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
704 // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
705 // positive due to preceding checks.
706 assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
707 "Should have been handled as OW_Complete")(static_cast <bool> (LaterSize - (uint64_t)(EarlierStart
- LaterStart) < EarlierSize && "Should have been handled as OW_Complete"
) ? void (0) : __assert_fail ("LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize && \"Should have been handled as OW_Complete\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
;
708 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
709 LaterSize, false)) {
710 IntervalMap.erase(OII);
711 return true;
712 }
713 }
714 return false;
715}
716
717static bool removePartiallyOverlappedStores(const DataLayout &DL,
718 InstOverlapIntervalsTy &IOL,
719 const TargetLibraryInfo &TLI) {
720 bool Changed = false;
721 for (auto OI : IOL) {
722 Instruction *EarlierWrite = OI.first;
723 MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
724 assert(isRemovable(EarlierWrite) && "Expect only removable instruction")(static_cast <bool> (isRemovable(EarlierWrite) &&
"Expect only removable instruction") ? void (0) : __assert_fail
("isRemovable(EarlierWrite) && \"Expect only removable instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 724, __extension__ __PRETTY_FUNCTION__))
;
725
726 const Value *Ptr = Loc.Ptr->stripPointerCasts();
727 int64_t EarlierStart = 0;
728 uint64_t EarlierSize = Loc.Size.getValue();
729 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
730 OverlapIntervalsTy &IntervalMap = OI.second;
731 Changed |=
732 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
733 if (IntervalMap.empty())
734 continue;
735 Changed |=
736 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
737 }
738 return Changed;
739}
740
741static Constant *tryToMergePartialOverlappingStores(
742 StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
743 int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA,
744 DominatorTree *DT) {
745
746 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
747 DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
748 Later && isa<ConstantInt>(Later->getValueOperand()) &&
749 DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
750 memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
751 // If the store we find is:
752 // a) partially overwritten by the store to 'Loc'
753 // b) the later store is fully contained in the earlier one and
754 // c) they both have a constant value
755 // d) none of the two stores need padding
756 // Merge the two stores, replacing the earlier store's value with a
757 // merge of both values.
758 // TODO: Deal with other constant types (vectors, etc), and probably
759 // some mem intrinsics (if needed)
760
761 APInt EarlierValue =
762 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
763 APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
764 unsigned LaterBits = LaterValue.getBitWidth();
765 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth())(static_cast <bool> (EarlierValue.getBitWidth() > LaterValue
.getBitWidth()) ? void (0) : __assert_fail ("EarlierValue.getBitWidth() > LaterValue.getBitWidth()"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 765, __extension__ __PRETTY_FUNCTION__))
;
766 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
767
768 // Offset of the smaller store inside the larger store
769 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
770 unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
771 BitOffsetDiff - LaterBits
772 : BitOffsetDiff;
773 APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
774 LShiftAmount + LaterBits);
775 // Clear the bits we'll be replacing, then OR with the smaller
776 // store, shifted appropriately.
777 APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
778 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlierdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
779 << "\n Later: " << *Laterdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
780 << "\n Merged Value: " << Merged << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Merge Stores:\n Earlier: " <<
*Earlier << "\n Later: " << *Later << "\n Merged Value: "
<< Merged << '\n'; } } while (false)
;
781 return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
782 }
783 return nullptr;
784}
785
786namespace {
787// Returns true if \p I is an intrisnic that does not read or write memory.
788bool isNoopIntrinsic(Instruction *I) {
789 if (const IntrinsicInst *II
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
27.1
'II' is null
56.1
'II' is null
= dyn_cast<IntrinsicInst>(I)) {
27
Assuming 'I' is not a 'IntrinsicInst'
28
Taking false branch
56
Assuming 'I' is not a 'IntrinsicInst'
57
Taking false branch
790 switch (II->getIntrinsicID()) {
791 case Intrinsic::lifetime_start:
792 case Intrinsic::lifetime_end:
793 case Intrinsic::invariant_end:
794 case Intrinsic::launder_invariant_group:
795 case Intrinsic::assume:
796 return true;
797 case Intrinsic::dbg_addr:
798 case Intrinsic::dbg_declare:
799 case Intrinsic::dbg_label:
800 case Intrinsic::dbg_value:
801 llvm_unreachable("Intrinsic should not be modeled in MemorySSA")::llvm::llvm_unreachable_internal("Intrinsic should not be modeled in MemorySSA"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 801)
;
802 default:
803 return false;
804 }
805 }
806 return false;
29
Returning zero, which participates in a condition later
58
Returning zero, which participates in a condition later
807}
808
809// Check if we can ignore \p D for DSE.
810bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
811 Instruction *DI = D->getMemoryInst();
812 // Calls that only access inaccessible memory cannot read or write any memory
813 // locations we consider for elimination.
814 if (auto *CB
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
21.1
'CB' is null
= dyn_cast<CallBase>(DI))
21
Assuming 'DI' is not a 'CallBase'
22
Taking false branch
815 if (CB->onlyAccessesInaccessibleMemory())
816 return true;
817
818 // We can eliminate stores to locations not visible to the caller across
819 // throwing instructions.
820 if (DI->mayThrow() && !DefVisibleToCaller)
23
Assuming the condition is false
821 return true;
822
823 // We can remove the dead stores, irrespective of the fence and its ordering
824 // (release/acquire/seq_cst). Fences only constraints the ordering of
825 // already visible stores, it does not make a store visible to other
826 // threads. So, skipping over a fence does not change a store from being
827 // dead.
828 if (isa<FenceInst>(DI))
24
Assuming 'DI' is not a 'FenceInst'
25
Taking false branch
829 return true;
830
831 // Skip intrinsics that do not really read or modify memory.
832 if (isNoopIntrinsic(D->getMemoryInst()))
26
Calling 'isNoopIntrinsic'
30
Returning from 'isNoopIntrinsic'
31
Taking false branch
833 return true;
834
835 return false;
32
Returning zero, which participates in a condition later
836}
837
838struct DSEState {
839 Function &F;
840 AliasAnalysis &AA;
841
842 /// The single BatchAA instance that is used to cache AA queries. It will
843 /// not be invalidated over the whole run. This is safe, because:
844 /// 1. Only memory writes are removed, so the alias cache for memory
845 /// locations remains valid.
846 /// 2. No new instructions are added (only instructions removed), so cached
847 /// information for a deleted value cannot be accessed by a re-used new
848 /// value pointer.
849 BatchAAResults BatchAA;
850
851 MemorySSA &MSSA;
852 DominatorTree &DT;
853 PostDominatorTree &PDT;
854 const TargetLibraryInfo &TLI;
855 const DataLayout &DL;
856
857 // All MemoryDefs that potentially could kill other MemDefs.
858 SmallVector<MemoryDef *, 64> MemDefs;
859 // Any that should be skipped as they are already deleted
860 SmallPtrSet<MemoryAccess *, 4> SkipStores;
861 // Keep track of all of the objects that are invisible to the caller before
862 // the function returns.
863 // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
864 DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
865 // Keep track of all of the objects that are invisible to the caller after
866 // the function returns.
867 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
868 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
869 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
870 // Post-order numbers for each basic block. Used to figure out if memory
871 // accesses are executed before another access.
872 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
873
874 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
875 /// basic block.
876 DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
877
878 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
879 PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
880 : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
881 DL(F.getParent()->getDataLayout()) {}
882
883 static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
884 DominatorTree &DT, PostDominatorTree &PDT,
885 const TargetLibraryInfo &TLI) {
886 DSEState State(F, AA, MSSA, DT, PDT, TLI);
887 // Collect blocks with throwing instructions not modeled in MemorySSA and
888 // alloc-like objects.
889 unsigned PO = 0;
890 for (BasicBlock *BB : post_order(&F)) {
891 State.PostOrderNumbers[BB] = PO++;
892 for (Instruction &I : *BB) {
893 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
894 if (I.mayThrow() && !MA)
895 State.ThrowingBlocks.insert(I.getParent());
896
897 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
898 if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
899 (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
900 State.MemDefs.push_back(MD);
901 }
902 }
903
904 // Treat byval or inalloca arguments the same as Allocas, stores to them are
905 // dead at the end of the function.
906 for (Argument &AI : F.args())
907 if (AI.hasPassPointeeByValueCopyAttr()) {
908 // For byval, the caller doesn't know the address of the allocation.
909 if (AI.hasByValAttr())
910 State.InvisibleToCallerBeforeRet.insert({&AI, true});
911 State.InvisibleToCallerAfterRet.insert({&AI, true});
912 }
913
914 return State;
915 }
916
917 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
918 /// instruction) completely overwrites a store to the 'Earlier' location.
919 /// (by \p EarlierI instruction).
920 /// Return OW_MaybePartial if \p Later does not completely overwrite
921 /// \p Earlier, but they both write to the same underlying object. In that
922 /// case, use isPartialOverwrite to check if \p Later partially overwrites
923 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
924 OverwriteResult
925 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
926 const MemoryLocation &Later, const MemoryLocation &Earlier,
927 int64_t &EarlierOff, int64_t &LaterOff) {
928 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
929 // get imprecise values here, though (except for unknown sizes).
930 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
931 // In case no constant size is known, try to an IR values for the number
932 // of bytes written and check if they match.
933 const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI);
934 const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI);
935 if (LaterMemI && EarlierMemI) {
936 const Value *LaterV = LaterMemI->getLength();
937 const Value *EarlierV = EarlierMemI->getLength();
938 if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later))
939 return OW_Complete;
940 }
941
942 // Masked stores have imprecise locations, but we can reason about them
943 // to some extent.
944 return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA);
945 }
946
947 const uint64_t LaterSize = Later.Size.getValue();
948 const uint64_t EarlierSize = Earlier.Size.getValue();
949
950 // Query the alias information
951 AliasResult AAR = BatchAA.alias(Later, Earlier);
952
953 // If the start pointers are the same, we just have to compare sizes to see if
954 // the later store was larger than the earlier store.
955 if (AAR == AliasResult::MustAlias) {
956 // Make sure that the Later size is >= the Earlier size.
957 if (LaterSize >= EarlierSize)
958 return OW_Complete;
959 }
960
961 // If we hit a partial alias we may have a full overwrite
962 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
963 int32_t Off = AAR.getOffset();
964 if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize)
965 return OW_Complete;
966 }
967
968 // Check to see if the later store is to the entire object (either a global,
969 // an alloca, or a byval/inalloca argument). If so, then it clearly
970 // overwrites any other store to the same object.
971 const Value *P1 = Earlier.Ptr->stripPointerCasts();
972 const Value *P2 = Later.Ptr->stripPointerCasts();
973 const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
974
975 // If we can't resolve the same pointers to the same object, then we can't
976 // analyze them at all.
977 if (UO1 != UO2)
978 return OW_Unknown;
979
980 // If the "Later" store is to a recognizable object, get its size.
981 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F);
982 if (ObjectSize != MemoryLocation::UnknownSize)
983 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
984 return OW_Complete;
985
986 // Okay, we have stores to two completely different pointers. Try to
987 // decompose the pointer into a "base + constant_offset" form. If the base
988 // pointers are equal, then we can reason about the two stores.
989 EarlierOff = 0;
990 LaterOff = 0;
991 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
992 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
993
994 // If the base pointers still differ, we have two completely different stores.
995 if (BP1 != BP2)
996 return OW_Unknown;
997
998 // The later access completely overlaps the earlier store if and only if
999 // both start and end of the earlier one is "inside" the later one:
1000 // |<->|--earlier--|<->|
1001 // |-------later-------|
1002 // Accesses may overlap if and only if start of one of them is "inside"
1003 // another one:
1004 // |<->|--earlier--|<----->|
1005 // |-------later-------|
1006 // OR
1007 // |----- earlier -----|
1008 // |<->|---later---|<----->|
1009 //
1010 // We have to be careful here as *Off is signed while *.Size is unsigned.
1011
1012 // Check if the earlier access starts "not before" the later one.
1013 if (EarlierOff >= LaterOff) {
1014 // If the earlier access ends "not after" the later access then the earlier
1015 // one is completely overwritten by the later one.
1016 if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
1017 return OW_Complete;
1018 // If start of the earlier access is "before" end of the later access then
1019 // accesses overlap.
1020 else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
1021 return OW_MaybePartial;
1022 }
1023 // If start of the later access is "before" end of the earlier access then
1024 // accesses overlap.
1025 else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
1026 return OW_MaybePartial;
1027 }
1028
1029 // Can reach here only if accesses are known not to overlap. There is no
1030 // dedicated code to indicate no overlap so signal "unknown".
1031 return OW_Unknown;
1032 }
1033
1034 bool isInvisibleToCallerAfterRet(const Value *V) {
1035 if (isa<AllocaInst>(V))
1036 return true;
1037 auto I = InvisibleToCallerAfterRet.insert({V, false});
1038 if (I.second) {
1039 if (!isInvisibleToCallerBeforeRet(V)) {
1040 I.first->second = false;
1041 } else {
1042 auto *Inst = dyn_cast<Instruction>(V);
1043 if (Inst && isAllocLikeFn(Inst, &TLI))
1044 I.first->second = !PointerMayBeCaptured(V, true, false);
1045 }
1046 }
1047 return I.first->second;
1048 }
1049
1050 bool isInvisibleToCallerBeforeRet(const Value *V) {
1051 if (isa<AllocaInst>(V))
1052 return true;
1053 auto I = InvisibleToCallerBeforeRet.insert({V, false});
1054 if (I.second) {
1055 auto *Inst = dyn_cast<Instruction>(V);
1056 if (Inst && isAllocLikeFn(Inst, &TLI))
1057 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1058 // with the killing MemoryDef. But we refrain from doing so for now to
1059 // limit compile-time and this does not cause any changes to the number
1060 // of stores removed on a large test set in practice.
1061 I.first->second = !PointerMayBeCaptured(V, false, true);
1062 }
1063 return I.first->second;
1064 }
1065
1066 Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1067 if (!I->mayWriteToMemory())
1068 return None;
1069
1070 if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1071 return {MemoryLocation::getForDest(MTI)};
1072
1073 if (auto *CB = dyn_cast<CallBase>(I)) {
1074 // If the functions may write to memory we do not know about, bail out.
1075 if (!CB->onlyAccessesArgMemory() &&
1076 !CB->onlyAccessesInaccessibleMemOrArgMem())
1077 return None;
1078
1079 LibFunc LF;
1080 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1081 switch (LF) {
1082 case LibFunc_strcpy:
1083 case LibFunc_strncpy:
1084 case LibFunc_strcat:
1085 case LibFunc_strncat:
1086 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1087 default:
1088 break;
1089 }
1090 }
1091 switch (CB->getIntrinsicID()) {
1092 case Intrinsic::init_trampoline:
1093 return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1094 case Intrinsic::masked_store:
1095 return {MemoryLocation::getForArgument(CB, 1, TLI)};
1096 default:
1097 break;
1098 }
1099 return None;
1100 }
1101
1102 return MemoryLocation::getOrNone(I);
1103 }
1104
1105 /// Returns true if \p UseInst completely overwrites \p DefLoc
1106 /// (stored by \p DefInst).
1107 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1108 Instruction *UseInst) {
1109 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1110 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1111 // MemoryDef.
1112 if (!UseInst->mayWriteToMemory())
1113 return false;
1114
1115 if (auto *CB = dyn_cast<CallBase>(UseInst))
1116 if (CB->onlyAccessesInaccessibleMemory())
1117 return false;
1118
1119 int64_t InstWriteOffset, DepWriteOffset;
1120 if (auto CC = getLocForWriteEx(UseInst))
1121 return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset,
1122 InstWriteOffset) == OW_Complete;
1123 return false;
1124 }
1125
1126 /// Returns true if \p Def is not read before returning from the function.
1127 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1128 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1129 << *Def->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
1130 << ") is at the end the function \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Check if def " << *Def <<
" (" << *Def->getMemoryInst() << ") is at the end the function \n"
; } } while (false)
;
1131
1132 auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1133 if (!MaybeLoc) {
1134 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... could not get location for write.\n"
; } } while (false)
;
1135 return false;
1136 }
1137
1138 SmallVector<MemoryAccess *, 4> WorkList;
1139 SmallPtrSet<MemoryAccess *, 8> Visited;
1140 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1141 if (!Visited.insert(Acc).second)
1142 return;
1143 for (Use &U : Acc->uses())
1144 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1145 };
1146 PushMemUses(Def);
1147 for (unsigned I = 0; I < WorkList.size(); I++) {
1148 if (WorkList.size() >= MemorySSAScanLimit) {
1149 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit exploration limit.\n"; }
} while (false)
;
1150 return false;
1151 }
1152
1153 MemoryAccess *UseAccess = WorkList[I];
1154 // Simply adding the users of MemoryPhi to the worklist is not enough,
1155 // because we might miss read clobbers in different iterations of a loop,
1156 // for example.
1157 // TODO: Add support for phi translation to handle the loop case.
1158 if (isa<MemoryPhi>(UseAccess))
1159 return false;
1160
1161 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1162 // of times this is called and/or caching it.
1163 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1164 if (isReadClobber(*MaybeLoc, UseInst)) {
1165 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit read clobber " <<
*UseInst << ".\n"; } } while (false)
;
1166 return false;
1167 }
1168
1169 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1170 PushMemUses(UseDef);
1171 }
1172 return true;
1173 }
1174
1175 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1176 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1177 /// indicating whether \p I is a free-like call.
1178 Optional<std::pair<MemoryLocation, bool>>
1179 getLocForTerminator(Instruction *I) const {
1180 uint64_t Len;
1181 Value *Ptr;
1182 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1183 m_Value(Ptr))))
1184 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1185
1186 if (auto *CB = dyn_cast<CallBase>(I)) {
1187 if (isFreeCall(I, &TLI))
1188 return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1189 true)};
1190 }
1191
1192 return None;
1193 }
1194
1195 /// Returns true if \p I is a memory terminator instruction like
1196 /// llvm.lifetime.end or free.
1197 bool isMemTerminatorInst(Instruction *I) const {
1198 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1199 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1200 isFreeCall(I, &TLI);
1201 }
1202
1203 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1204 /// instruction \p AccessI.
1205 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1206 Instruction *MaybeTerm) {
1207 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1208 getLocForTerminator(MaybeTerm);
1209
1210 if (!MaybeTermLoc)
1211 return false;
1212
1213 // If the terminator is a free-like call, all accesses to the underlying
1214 // object can be considered terminated.
1215 if (getUnderlyingObject(Loc.Ptr) !=
1216 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1217 return false;
1218
1219 auto TermLoc = MaybeTermLoc->first;
1220 if (MaybeTermLoc->second) {
1221 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1222 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1223 }
1224 int64_t InstWriteOffset, DepWriteOffset;
1225 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset,
1226 InstWriteOffset) == OW_Complete;
1227 }
1228
1229 // Returns true if \p Use may read from \p DefLoc.
1230 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1231 if (isNoopIntrinsic(UseInst))
55
Calling 'isNoopIntrinsic'
59
Returning from 'isNoopIntrinsic'
60
Taking false branch
1232 return false;
1233
1234 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1235 // treated as read clobber.
1236 if (auto SI
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
61.1
'SI' is null
= dyn_cast<StoreInst>(UseInst))
61
Assuming 'UseInst' is not a 'StoreInst'
62
Taking false branch
1237 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1238
1239 if (!UseInst->mayReadFromMemory())
63
Assuming the condition is true
64
Taking true branch
1240 return false;
65
Returning zero, which participates in a condition later
1241
1242 if (auto *CB = dyn_cast<CallBase>(UseInst))
1243 if (CB->onlyAccessesInaccessibleMemory())
1244 return false;
1245
1246 // NOTE: For calls, the number of stores removed could be slightly improved
1247 // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1248 // be expensive compared to the benefits in practice. For now, avoid more
1249 // expensive analysis to limit compile-time.
1250 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1251 }
1252
1253 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1254 /// loop. In particular, this guarantees that it only references a single
1255 /// MemoryLocation during execution of the containing function.
1256 bool IsGuaranteedLoopInvariant(Value *Ptr) {
1257 auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1258 Ptr = Ptr->stripPointerCasts();
1259 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1260 if (isa<AllocaInst>(Ptr))
1261 return true;
1262
1263 if (isAllocLikeFn(I, &TLI))
1264 return true;
1265
1266 return false;
1267 }
1268 return true;
1269 };
1270
1271 Ptr = Ptr->stripPointerCasts();
121
Called C++ object pointer is null
1272 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1273 if (I->getParent()->isEntryBlock())
1274 return true;
1275 }
1276 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1277 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1278 GEP->hasAllConstantIndices();
1279 }
1280 return IsGuaranteedLoopInvariantBase(Ptr);
1281 }
1282
1283 // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1284 // no read access between them or on any other path to a function exit block
1285 // if \p DefLoc is not accessible after the function returns. If there is no
1286 // such MemoryDef, return None. The returned value may not (completely)
1287 // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1288 // MemoryUse (read).
1289 Optional<MemoryAccess *>
1290 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1291 const MemoryLocation &DefLoc, const Value *DefUO,
1292 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1293 bool IsMemTerm, unsigned &PartialLimit) {
1294 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1
Assuming 'ScanLimit' is not equal to 0
2
Assuming 'WalkerStepLimit' is not equal to 0
3
Taking false branch
1295 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1296 return None;
1297 }
1298
1299 MemoryAccess *Current = StartAccess;
1300 Instruction *KillingI = KillingDef->getMemoryInst();
1301 bool StepAgain;
1302 LLVM_DEBUG(dbgs() << " trying to get dominating access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " trying to get dominating access\n"
; } } while (false)
;
4
Assuming 'DebugFlag' is false
5
Loop condition is false. Exiting loop
1303
1304 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1305 Optional<MemoryLocation> CurrentLoc;
1306 do {
1307 StepAgain = false;
1308 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
6
Loop condition is false. Exiting loop
1309 dbgs() << " visiting " << *Current;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1310 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1311 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1312 << ")";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1313 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
1314 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { { dbgs() << " visiting " << *Current
; if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef
>(Current)) dbgs() << " (" << *cast<MemoryUseOrDef
>(Current)->getMemoryInst() << ")"; dbgs() <<
"\n"; }; } } while (false)
;
1315
1316 // Reached TOP.
1317 if (MSSA.isLiveOnEntryDef(Current)) {
7
Calling 'MemorySSA::isLiveOnEntryDef'
10
Returning from 'MemorySSA::isLiveOnEntryDef'
11
Taking false branch
1318 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found LiveOnEntryDef\n"; }
} while (false)
;
1319 return None;
1320 }
1321
1322 // Cost of a step. Accesses in the same block are more likely to be valid
1323 // candidates for elimination, hence consider them cheaper.
1324 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
12
Assuming the condition is false
13
'?' condition is false
1325 ? MemorySSASameBBStepCost
1326 : MemorySSAOtherBBStepCost;
1327 if (WalkerStepLimit <= StepCost) {
14
Assuming 'WalkerStepLimit' is > 'StepCost'
15
Taking false branch
1328 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... hit walker step limit\n";
} } while (false)
;
1329 return None;
1330 }
1331 WalkerStepLimit -= StepCost;
1332
1333 // Return for MemoryPhis. They cannot be eliminated directly and the
1334 // caller is responsible for traversing them.
1335 if (isa<MemoryPhi>(Current)) {
16
Assuming 'Current' is not a 'MemoryPhi'
17
Taking false branch
1336 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found MemoryPhi\n"; } } while
(false)
;
1337 return Current;
1338 }
1339
1340 // Below, check if CurrentDef is a valid candidate to be eliminated by
1341 // KillingDef. If it is not, check the next candidate.
1342 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
18
'Current' is a 'MemoryDef'
1343 Instruction *CurrentI = CurrentDef->getMemoryInst();
1344
1345 if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
19
Assuming the condition is false
20
Calling 'canSkipDef'
33
Returning from 'canSkipDef'
34
Taking false branch
1346 StepAgain = true;
1347 Current = CurrentDef->getDefiningAccess();
1348 continue;
1349 }
1350
1351 // Before we try to remove anything, check for any extra throwing
1352 // instructions that block us from DSEing
1353 if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
35
Calling 'DSEState::mayThrowBetween'
44
Returning from 'DSEState::mayThrowBetween'
45
Taking false branch
1354 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, may throw!\n"; } } while
(false)
;
1355 return None;
1356 }
1357
1358 // Check for anything that looks like it will be a barrier to further
1359 // removal
1360 if (isDSEBarrier(DefUO, CurrentI)) {
46
Calling 'DSEState::isDSEBarrier'
51
Returning from 'DSEState::isDSEBarrier'
52
Taking false branch
1361 LLVM_DEBUG(dbgs() << " ... skip, barrier\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skip, barrier\n"; } } while
(false)
;
1362 return None;
1363 }
1364
1365 // If Current is known to be on path that reads DefLoc or is a read
1366 // clobber, bail out, as the path is not profitable. We skip this check
1367 // for intrinsic calls, because the code knows how to handle memcpy
1368 // intrinsics.
1369 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
53
Assuming 'CurrentI' is not a 'IntrinsicInst'
54
Calling 'DSEState::isReadClobber'
66
Returning from 'DSEState::isReadClobber'
67
Taking false branch
1370 return None;
1371
1372 // Quick check if there are direct uses that are read-clobbers.
1373 if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
68
Calling 'any_of<llvm::iterator_range<llvm::Value::use_iterator_impl<llvm::Use>>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
74
Returning from 'any_of<llvm::iterator_range<llvm::Value::use_iterator_impl<llvm::Use>>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
75
Taking false branch
1374 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1375 return !MSSA.dominates(StartAccess, UseOrDef) &&
1376 isReadClobber(DefLoc, UseOrDef->getMemoryInst());
1377 return false;
1378 })) {
1379 LLVM_DEBUG(dbgs() << " ... found a read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found a read clobber\n"; }
} while (false)
;
1380 return None;
1381 }
1382
1383 // If Current cannot be analyzed or is not removable, check the next
1384 // candidate.
1385 if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
76
Calling 'hasAnalyzableMemoryWrite'
104
Returning from 'hasAnalyzableMemoryWrite'
105
Assuming the condition is false
106
Taking false branch
1386 StepAgain = true;
1387 Current = CurrentDef->getDefiningAccess();
1388 continue;
1389 }
1390
1391 // If Current does not have an analyzable write location, skip it
1392 CurrentLoc = getLocForWriteEx(CurrentI);
107
Null pointer value stored to 'CurrentLoc.Storage..value.Ptr'
1393 if (!CurrentLoc) {
108
Calling 'Optional::operator bool'
116
Returning from 'Optional::operator bool'
117
Taking false branch
1394 StepAgain = true;
1395 Current = CurrentDef->getDefiningAccess();
1396 continue;
1397 }
1398
1399 // AliasAnalysis does not account for loops. Limit elimination to
1400 // candidates for which we can guarantee they always store to the same
1401 // memory location and not multiple locations in a loop.
1402 if (Current->getBlock() != KillingDef->getBlock() &&
118
Assuming the condition is true
1403 !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
119
Passing null pointer value via 1st parameter 'Ptr'
120
Calling 'DSEState::IsGuaranteedLoopInvariant'
1404 StepAgain = true;
1405 Current = CurrentDef->getDefiningAccess();
1406 WalkerStepLimit -= 1;
1407 continue;
1408 }
1409
1410 if (IsMemTerm) {
1411 // If the killing def is a memory terminator (e.g. lifetime.end), check
1412 // the next candidate if the current Current does not write the same
1413 // underlying object as the terminator.
1414 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1415 StepAgain = true;
1416 Current = CurrentDef->getDefiningAccess();
1417 }
1418 continue;
1419 } else {
1420 int64_t InstWriteOffset, DepWriteOffset;
1421 auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc,
1422 DepWriteOffset, InstWriteOffset);
1423 // If Current does not write to the same object as KillingDef, check
1424 // the next candidate.
1425 if (OR == OW_Unknown) {
1426 StepAgain = true;
1427 Current = CurrentDef->getDefiningAccess();
1428 } else if (OR == OW_MaybePartial) {
1429 // If KillingDef only partially overwrites Current, check the next
1430 // candidate if the partial step limit is exceeded. This aggressively
1431 // limits the number of candidates for partial store elimination,
1432 // which are less likely to be removable in the end.
1433 if (PartialLimit <= 1) {
1434 StepAgain = true;
1435 Current = CurrentDef->getDefiningAccess();
1436 WalkerStepLimit -= 1;
1437 continue;
1438 }
1439 PartialLimit -= 1;
1440 }
1441 }
1442 } while (StepAgain);
1443
1444 // Accesses to objects accessible after the function returns can only be
1445 // eliminated if the access is killed along all paths to the exit. Collect
1446 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1447 // they cover all paths from EarlierAccess to any function exit.
1448 SmallPtrSet<Instruction *, 16> KillingDefs;
1449 KillingDefs.insert(KillingDef->getMemoryInst());
1450 MemoryAccess *EarlierAccess = Current;
1451 Instruction *EarlierMemInst =
1452 cast<MemoryDef>(EarlierAccess)->getMemoryInst();
1453 LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
1454 << *EarlierMemInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking for reads of " <<
*EarlierAccess << " (" << *EarlierMemInst <<
")\n"; } } while (false)
;
1455
1456 SmallSetVector<MemoryAccess *, 32> WorkList;
1457 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1458 for (Use &U : Acc->uses())
1459 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1460 };
1461 PushMemUses(EarlierAccess);
1462
1463 // Optimistically collect all accesses for reads. If we do not find any
1464 // read clobbers, add them to the cache.
1465 SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
1466 if (!EarlierMemInst->mayReadFromMemory())
1467 KnownNoReads.insert(EarlierAccess);
1468 // Check if EarlierDef may be read.
1469 for (unsigned I = 0; I < WorkList.size(); I++) {
1470 MemoryAccess *UseAccess = WorkList[I];
1471
1472 LLVM_DEBUG(dbgs() << " " << *UseAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " " << *UseAccess; } } while
(false)
;
1473 // Bail out if the number of accesses to check exceeds the scan limit.
1474 if (ScanLimit < (WorkList.size() - I)) {
1475 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... hit scan limit\n"; } }
while (false)
;
1476 return None;
1477 }
1478 --ScanLimit;
1479 NumDomMemDefChecks++;
1480 KnownNoReads.insert(UseAccess);
1481
1482 if (isa<MemoryPhi>(UseAccess)) {
1483 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1484 return DT.properlyDominates(KI->getParent(),
1485 UseAccess->getBlock());
1486 })) {
1487 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing block\n"
; } } while (false)
;
1488 continue;
1489 }
1490 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding PHI uses\n"; } }
while (false)
;
1491 PushMemUses(UseAccess);
1492 continue;
1493 }
1494
1495 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1496 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *UseInst << ")\n"
; } } while (false)
;
1497
1498 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1499 return DT.dominates(KI, UseInst);
1500 })) {
1501 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, dominated by killing def\n"
; } } while (false)
;
1502 continue;
1503 }
1504
1505 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1506 // MemoryAccesses. We do not have to check it's users.
1507 if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
1508 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1509 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
1510 << " ... skipping, memterminator invalidates following accesses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping, memterminator invalidates following accesses\n"
; } } while (false)
;
1511 continue;
1512 }
1513
1514 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1515 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... adding uses of intrinsic\n"
; } } while (false)
;
1516 PushMemUses(UseAccess);
1517 continue;
1518 }
1519
1520 if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
1521 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found throwing instruction\n"
; } } while (false)
;
1522 return None;
1523 }
1524
1525 // Uses which may read the original MemoryDef mean we cannot eliminate the
1526 // original MD. Stop walk.
1527 if (isReadClobber(*CurrentLoc, UseInst)) {
1528 LLVM_DEBUG(dbgs() << " ... found read clobber\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found read clobber\n"; } }
while (false)
;
1529 return None;
1530 }
1531
1532 // For the KillingDef and EarlierAccess we only have to check if it reads
1533 // the memory location.
1534 // TODO: It would probably be better to check for self-reads before
1535 // calling the function.
1536 if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
1537 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... skipping killing def/dom access\n"
; } } while (false)
;
1538 continue;
1539 }
1540
1541 // Check all uses for MemoryDefs, except for defs completely overwriting
1542 // the original location. Otherwise we have to check uses of *all*
1543 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1544 // miss cases like the following
1545 // 1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1546 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1547 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1548 // (The Use points to the *first* Def it may alias)
1549 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1550 // stores [0,1]
1551 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1552 if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
1553 if (!isInvisibleToCallerAfterRet(DefUO) &&
1554 UseAccess != EarlierAccess) {
1555 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1556 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1557 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
1558
1559 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
1560 << " ... found killing def " << *UseInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... found killing def " <<
*UseInst << "\n"; } } while (false)
;
1561 KillingDefs.insert(UseInst);
1562 }
1563 }
1564 } else
1565 PushMemUses(UseDef);
1566 }
1567 }
1568
1569 // For accesses to locations visible after the function returns, make sure
1570 // that the location is killed (=overwritten) along all paths from
1571 // EarlierAccess to the exit.
1572 if (!isInvisibleToCallerAfterRet(DefUO)) {
1573 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1574 for (Instruction *KD : KillingDefs)
1575 KillingBlocks.insert(KD->getParent());
1576 assert(!KillingBlocks.empty() &&(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1577, __extension__ __PRETTY_FUNCTION__))
1577 "Expected at least a single killing block")(static_cast <bool> (!KillingBlocks.empty() && "Expected at least a single killing block"
) ? void (0) : __assert_fail ("!KillingBlocks.empty() && \"Expected at least a single killing block\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1577, __extension__ __PRETTY_FUNCTION__))
;
1578
1579 // Find the common post-dominator of all killing blocks.
1580 BasicBlock *CommonPred = *KillingBlocks.begin();
1581 for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1582 I != E; I++) {
1583 if (!CommonPred)
1584 break;
1585 CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1586 }
1587
1588 // If CommonPred is in the set of killing blocks, just check if it
1589 // post-dominates EarlierAccess.
1590 if (KillingBlocks.count(CommonPred)) {
1591 if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
1592 return {EarlierAccess};
1593 return None;
1594 }
1595
1596 // If the common post-dominator does not post-dominate EarlierAccess,
1597 // there is a path from EarlierAccess to an exit not going through a
1598 // killing block.
1599 if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
1600 SetVector<BasicBlock *> WorkList;
1601
1602 // If CommonPred is null, there are multiple exits from the function.
1603 // They all have to be added to the worklist.
1604 if (CommonPred)
1605 WorkList.insert(CommonPred);
1606 else
1607 for (BasicBlock *R : PDT.roots())
1608 WorkList.insert(R);
1609
1610 NumCFGTries++;
1611 // Check if all paths starting from an exit node go through one of the
1612 // killing blocks before reaching EarlierAccess.
1613 for (unsigned I = 0; I < WorkList.size(); I++) {
1614 NumCFGChecks++;
1615 BasicBlock *Current = WorkList[I];
1616 if (KillingBlocks.count(Current))
1617 continue;
1618 if (Current == EarlierAccess->getBlock())
1619 return None;
1620
1621 // EarlierAccess is reachable from the entry, so we don't have to
1622 // explore unreachable blocks further.
1623 if (!DT.isReachableFromEntry(Current))
1624 continue;
1625
1626 for (BasicBlock *Pred : predecessors(Current))
1627 WorkList.insert(Pred);
1628
1629 if (WorkList.size() >= MemorySSAPathCheckLimit)
1630 return None;
1631 }
1632 NumCFGSuccess++;
1633 return {EarlierAccess};
1634 }
1635 return None;
1636 }
1637
1638 // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1639 // potentially dead.
1640 return {EarlierAccess};
1641 }
1642
1643 // Delete dead memory defs
1644 void deleteDeadInstruction(Instruction *SI) {
1645 MemorySSAUpdater Updater(&MSSA);
1646 SmallVector<Instruction *, 32> NowDeadInsts;
1647 NowDeadInsts.push_back(SI);
1648 --NumFastOther;
1649
1650 while (!NowDeadInsts.empty()) {
1651 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1652 ++NumFastOther;
1653
1654 // Try to preserve debug information attached to the dead instruction.
1655 salvageDebugInfo(*DeadInst);
1656 salvageKnowledge(DeadInst);
1657
1658 // Remove the Instruction from MSSA.
1659 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1660 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1661 SkipStores.insert(MD);
1662 }
1663 Updater.removeMemoryAccess(MA);
1664 }
1665
1666 auto I = IOLs.find(DeadInst->getParent());
1667 if (I != IOLs.end())
1668 I->second.erase(DeadInst);
1669 // Remove its operands
1670 for (Use &O : DeadInst->operands())
1671 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1672 O = nullptr;
1673 if (isInstructionTriviallyDead(OpI, &TLI))
1674 NowDeadInsts.push_back(OpI);
1675 }
1676
1677 DeadInst->eraseFromParent();
1678 }
1679 }
1680
1681 // Check for any extra throws between SI and NI that block DSE. This only
1682 // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1683 // throw are handled during the walk from one def to the next.
1684 bool mayThrowBetween(Instruction *SI, Instruction *NI,
1685 const Value *SILocUnd) {
1686 // First see if we can ignore it by using the fact that SI is an
1687 // alloca/alloca like object that is not visible to the caller during
1688 // execution of the function.
1689 if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
36
Assuming 'SILocUnd' is null
1690 return false;
1691
1692 if (SI->getParent() == NI->getParent())
37
Assuming the condition is false
38
Taking false branch
1693 return ThrowingBlocks.count(SI->getParent());
1694 return !ThrowingBlocks.empty();
39
Calling 'SmallPtrSetImplBase::empty'
42
Returning from 'SmallPtrSetImplBase::empty'
43
Returning zero, which participates in a condition later
1695 }
1696
1697 // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1698 // act as barriers:
1699 // * A memory instruction that may throw and \p SI accesses a non-stack
1700 // object.
1701 // * Atomic stores stronger that monotonic.
1702 bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
1703 // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1704 // like object that does not escape.
1705 if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
47
Assuming the condition is false
1706 return true;
1707
1708 // If NI is an atomic load/store stronger than monotonic, do not try to
1709 // eliminate/reorder it.
1710 if (NI->isAtomic()) {
48
Assuming the condition is false
49
Taking false branch
1711 if (auto *LI = dyn_cast<LoadInst>(NI))
1712 return isStrongerThanMonotonic(LI->getOrdering());
1713 if (auto *SI = dyn_cast<StoreInst>(NI))
1714 return isStrongerThanMonotonic(SI->getOrdering());
1715 if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
1716 return isStrongerThanMonotonic(ARMW->getOrdering());
1717 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
1718 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1719 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1720 llvm_unreachable("other instructions should be skipped in MemorySSA")::llvm::llvm_unreachable_internal("other instructions should be skipped in MemorySSA"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1720)
;
1721 }
1722 return false;
50
Returning zero, which participates in a condition later
1723 }
1724
1725 /// Eliminate writes to objects that are not visible in the caller and are not
1726 /// accessed before returning from the function.
1727 bool eliminateDeadWritesAtEndOfFunction() {
1728 bool MadeChange = false;
1729 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1730 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
1731 << "Trying to eliminate MemoryDefs at the end of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n"
; } } while (false)
;
1732 for (int I = MemDefs.size() - 1; I >= 0; I--) {
1733 MemoryDef *Def = MemDefs[I];
1734 if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1735 continue;
1736
1737 Instruction *DefI = Def->getMemoryInst();
1738 SmallVector<const Value *, 4> Pointers;
1739 auto DefLoc = getLocForWriteEx(DefI);
1740 if (!DefLoc)
1741 continue;
1742
1743 // NOTE: Currently eliminating writes at the end of a function is limited
1744 // to MemoryDefs with a single underlying object, to save compile-time. In
1745 // practice it appears the case with multiple underlying objects is very
1746 // uncommon. If it turns out to be important, we can use
1747 // getUnderlyingObjects here instead.
1748 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1749 if (!UO || !isInvisibleToCallerAfterRet(UO))
1750 continue;
1751
1752 if (isWriteAtEndOfFunction(Def)) {
1753 // See through pointer-to-pointer bitcasts
1754 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
1755 "of the function\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n"; } } while (false)
;
1756 deleteDeadInstruction(DefI);
1757 ++NumFastStores;
1758 MadeChange = true;
1759 }
1760 }
1761 return MadeChange;
1762 }
1763
1764 /// \returns true if \p Def is a no-op store, either because it
1765 /// directly stores back a loaded value or stores zero to a calloced object.
1766 bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
1767 const Value *DefUO) {
1768 StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1769 MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1770 Constant *StoredConstant = nullptr;
1771 if (Store)
1772 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1773 if (MemSet)
1774 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1775
1776 if (StoredConstant && StoredConstant->isNullValue()) {
1777 auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1778 if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
1779 auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1780 // If UnderlyingDef is the clobbering access of Def, no instructions
1781 // between them can modify the memory location.
1782 auto *ClobberDef =
1783 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1784 return UnderlyingDef == ClobberDef;
1785 }
1786 }
1787
1788 if (!Store)
1789 return false;
1790
1791 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1792 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1793 // Get the defining access for the load.
1794 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1795 // Fast path: the defining accesses are the same.
1796 if (LoadAccess == Def->getDefiningAccess())
1797 return true;
1798
1799 // Look through phi accesses. Recursively scan all phi accesses by
1800 // adding them to a worklist. Bail when we run into a memory def that
1801 // does not match LoadAccess.
1802 SetVector<MemoryAccess *> ToCheck;
1803 MemoryAccess *Current =
1804 MSSA.getWalker()->getClobberingMemoryAccess(Def);
1805 // We don't want to bail when we run into the store memory def. But,
1806 // the phi access may point to it. So, pretend like we've already
1807 // checked it.
1808 ToCheck.insert(Def);
1809 ToCheck.insert(Current);
1810 // Start at current (1) to simulate already having checked Def.
1811 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1812 Current = ToCheck[I];
1813 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1814 // Check all the operands.
1815 for (auto &Use : PhiAccess->incoming_values())
1816 ToCheck.insert(cast<MemoryAccess>(&Use));
1817 continue;
1818 }
1819
1820 // If we found a memory def, bail. This happens when we have an
1821 // unrelated write in between an otherwise noop store.
1822 assert(isa<MemoryDef>(Current) &&(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
1823 "Only MemoryDefs should reach here.")(static_cast <bool> (isa<MemoryDef>(Current) &&
"Only MemoryDefs should reach here.") ? void (0) : __assert_fail
("isa<MemoryDef>(Current) && \"Only MemoryDefs should reach here.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
;
1824 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1825 // We are searching for the definition of the store's destination.
1826 // So, if that is the same definition as the load, then this is a
1827 // noop. Otherwise, fail.
1828 if (LoadAccess != Current)
1829 return false;
1830 }
1831 return true;
1832 }
1833 }
1834
1835 return false;
1836 }
1837};
1838
1839bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1840 DominatorTree &DT, PostDominatorTree &PDT,
1841 const TargetLibraryInfo &TLI) {
1842 bool MadeChange = false;
1843
1844 DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
1845 // For each store:
1846 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1847 MemoryDef *KillingDef = State.MemDefs[I];
1848 if (State.SkipStores.count(KillingDef))
1849 continue;
1850 Instruction *SI = KillingDef->getMemoryInst();
1851
1852 Optional<MemoryLocation> MaybeSILoc;
1853 if (State.isMemTerminatorInst(SI))
1854 MaybeSILoc = State.getLocForTerminator(SI).map(
1855 [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1856 else
1857 MaybeSILoc = State.getLocForWriteEx(SI);
1858
1859 if (!MaybeSILoc) {
1860 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
1861 << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Failed to find analyzable write location for "
<< *SI << "\n"; } } while (false)
;
1862 continue;
1863 }
1864 MemoryLocation SILoc = *MaybeSILoc;
1865 assert(SILoc.Ptr && "SILoc should not be null")(static_cast <bool> (SILoc.Ptr && "SILoc should not be null"
) ? void (0) : __assert_fail ("SILoc.Ptr && \"SILoc should not be null\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp"
, 1865, __extension__ __PRETTY_FUNCTION__))
;
1866 const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
1867
1868 MemoryAccess *Current = KillingDef;
1869 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
1870 << *Current << " (" << *SI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "Trying to eliminate MemoryDefs killed by "
<< *Current << " (" << *SI << ")\n";
} } while (false)
;
1871
1872 unsigned ScanLimit = MemorySSAScanLimit;
1873 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1874 unsigned PartialLimit = MemorySSAPartialStoreLimit;
1875 // Worklist of MemoryAccesses that may be killed by KillingDef.
1876 SetVector<MemoryAccess *> ToCheck;
1877
1878 if (SILocUnd)
1879 ToCheck.insert(KillingDef->getDefiningAccess());
1880
1881 bool Shortend = false;
1882 bool IsMemTerm = State.isMemTerminatorInst(SI);
1883 // Check if MemoryAccesses in the worklist are killed by KillingDef.
1884 for (unsigned I = 0; I < ToCheck.size(); I++) {
1885 Current = ToCheck[I];
1886 if (State.SkipStores.count(Current))
1887 continue;
1888
1889 Optional<MemoryAccess *> Next = State.getDomMemoryDef(
1890 KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
1891 IsMemTerm, PartialLimit);
1892
1893 if (!Next) {
1894 LLVM_DEBUG(dbgs() << " finished walk\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " finished walk\n"; } } while (false
)
;
1895 continue;
1896 }
1897
1898 MemoryAccess *EarlierAccess = *Next;
1899 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " Checking if we can kill " <<
*EarlierAccess; } } while (false)
;
1900 if (isa<MemoryPhi>(EarlierAccess)) {
1901 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "\n ... adding incoming values to worklist\n"
; } } while (false)
;
1902 for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
1903 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1904 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1905 BasicBlock *PhiBlock = EarlierAccess->getBlock();
1906
1907 // We only consider incoming MemoryAccesses that come before the
1908 // MemoryPhi. Otherwise we could discover candidates that do not
1909 // strictly dominate our starting def.
1910 if (State.PostOrderNumbers[IncomingBlock] >
1911 State.PostOrderNumbers[PhiBlock])
1912 ToCheck.insert(IncomingAccess);
1913 }
1914 continue;
1915 }
1916 auto *NextDef = cast<MemoryDef>(EarlierAccess);
1917 Instruction *NI = NextDef->getMemoryInst();
1918 LLVM_DEBUG(dbgs() << " (" << *NI << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << " (" << *NI << ")\n"; }
} while (false)
;
1919 ToCheck.insert(NextDef->getDefiningAccess());
1920 NumGetDomMemoryDefPassed++;
1921
1922 if (!DebugCounter::shouldExecute(MemorySSACounter))
1923 continue;
1924
1925 MemoryLocation NILoc = *State.getLocForWriteEx(NI);
1926
1927 if (IsMemTerm) {
1928 const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
1929 if (SILocUnd != NIUnd)
1930 continue;
1931 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
1932 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
1933 State.deleteDeadInstruction(NI);
1934 ++NumFastStores;
1935 MadeChange = true;
1936 } else {
1937 // Check if NI overwrites SI.
1938 int64_t InstWriteOffset, DepWriteOffset;
1939 OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc,
1940 DepWriteOffset, InstWriteOffset);
1941 if (OR == OW_MaybePartial) {
1942 auto Iter = State.IOLs.insert(
1943 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
1944 NI->getParent(), InstOverlapIntervalsTy()));
1945 auto &IOL = Iter.first->second;
1946 OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
1947 NI, IOL);
1948 }
1949
1950 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
1951 auto *Earlier = dyn_cast<StoreInst>(NI);
1952 auto *Later = dyn_cast<StoreInst>(SI);
1953 // We are re-using tryToMergePartialOverlappingStores, which requires
1954 // Earlier to domiante Later.
1955 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
1956 if (Earlier && Later && DT.dominates(Earlier, Later)) {
1957 if (Constant *Merged = tryToMergePartialOverlappingStores(
1958 Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
1959 State.BatchAA, &DT)) {
1960
1961 // Update stored value of earlier store to merged constant.
1962 Earlier->setOperand(0, Merged);
1963 ++NumModifiedStores;
1964 MadeChange = true;
1965
1966 Shortend = true;
1967 // Remove later store and remove any outstanding overlap intervals
1968 // for the updated store.
1969 State.deleteDeadInstruction(Later);
1970 auto I = State.IOLs.find(Earlier->getParent());
1971 if (I != State.IOLs.end())
1972 I->second.erase(Earlier);
1973 break;
1974 }
1975 }
1976 }
1977
1978 if (OR == OW_Complete) {
1979 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
1980 << "\n KILLER: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *NI << "\n KILLER: " << *SI << '\n'
; } } while (false)
;
1981 State.deleteDeadInstruction(NI);
1982 ++NumFastStores;
1983 MadeChange = true;
1984 }
1985 }
1986 }
1987
1988 // Check if the store is a no-op.
1989 if (!Shortend && isRemovable(SI) &&
1990 State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
1991 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dse")) { dbgs() << "DSE: Remove No-Op Store:\n DEAD: "
<< *SI << '\n'; } } while (false)
;
1992 State.deleteDeadInstruction(SI);
1993 NumRedundantStores++;
1994 MadeChange = true;
1995 continue;
1996 }
1997 }
1998
1999 if (EnablePartialOverwriteTracking)
2000 for (auto &KV : State.IOLs)
2001 MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2002
2003 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2004 return MadeChange;
2005}
2006} // end anonymous namespace
2007
2008//===----------------------------------------------------------------------===//
2009// DSE Pass
2010//===----------------------------------------------------------------------===//
2011PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2012 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2013 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2014 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2015 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2016 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2017
2018 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2019
2020#ifdef LLVM_ENABLE_STATS1
2021 if (AreStatisticsEnabled())
2022 for (auto &I : instructions(F))
2023 NumRemainingStores += isa<StoreInst>(&I);
2024#endif
2025
2026 if (!Changed)
2027 return PreservedAnalyses::all();
2028
2029 PreservedAnalyses PA;
2030 PA.preserveSet<CFGAnalyses>();
2031 PA.preserve<MemorySSAAnalysis>();
2032 return PA;
2033}
2034
2035namespace {
2036
2037/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2038class DSELegacyPass : public FunctionPass {
2039public:
2040 static char ID; // Pass identification, replacement for typeid
2041
2042 DSELegacyPass() : FunctionPass(ID) {
2043 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2044 }
2045
2046 bool runOnFunction(Function &F) override {
2047 if (skipFunction(F))
2048 return false;
2049
2050 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2051 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2052 const TargetLibraryInfo &TLI =
2053 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2054 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2055 PostDominatorTree &PDT =
2056 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2057
2058 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2059
2060#ifdef LLVM_ENABLE_STATS1
2061 if (AreStatisticsEnabled())
2062 for (auto &I : instructions(F))
2063 NumRemainingStores += isa<StoreInst>(&I);
2064#endif
2065
2066 return Changed;
2067 }
2068
2069 void getAnalysisUsage(AnalysisUsage &AU) const override {
2070 AU.setPreservesCFG();
2071 AU.addRequired<AAResultsWrapperPass>();
2072 AU.addRequired<TargetLibraryInfoWrapperPass>();
2073 AU.addPreserved<GlobalsAAWrapperPass>();
2074 AU.addRequired<DominatorTreeWrapperPass>();
2075 AU.addPreserved<DominatorTreeWrapperPass>();
2076 AU.addRequired<PostDominatorTreeWrapperPass>();
2077 AU.addRequired<MemorySSAWrapperPass>();
2078 AU.addPreserved<PostDominatorTreeWrapperPass>();
2079 AU.addPreserved<MemorySSAWrapperPass>();
2080 }
2081};
2082
2083} // end anonymous namespace
2084
2085char DSELegacyPass::ID = 0;
2086
2087INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2088 false)static void *initializeDSELegacyPassPassOnce(PassRegistry &
Registry) {
2089INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
2090INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry);
2091INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
2092INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
2093INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
2094INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry);
2095INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
2096INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2097 false)PassInfo *PI = new PassInfo( "Dead Store Elimination", "dse",
&DSELegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<DSELegacyPass>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeDSELegacyPassPassFlag
; void llvm::initializeDSELegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeDSELegacyPassPassFlag, initializeDSELegacyPassPassOnce
, std::ref(Registry)); }
2098
2099FunctionPass *llvm::createDeadStoreEliminationPass() {
2100 return new DSELegacyPass();
2101}

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h

1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file exposes an interface to building/using memory SSA to
11/// walk memory instructions using a use/def graph.
12///
13/// Memory SSA class builds an SSA form that links together memory access
14/// instructions such as loads, stores, atomics, and calls. Additionally, it
15/// does a trivial form of "heap versioning" Every time the memory state changes
16/// in the program, we generate a new heap version. It generates
17/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18///
19/// As a trivial example,
20/// define i32 @main() #0 {
21/// entry:
22/// %call = call noalias i8* @_Znwm(i64 4) #2
23/// %0 = bitcast i8* %call to i32*
24/// %call1 = call noalias i8* @_Znwm(i64 4) #2
25/// %1 = bitcast i8* %call1 to i32*
26/// store i32 5, i32* %0, align 4
27/// store i32 7, i32* %1, align 4
28/// %2 = load i32* %0, align 4
29/// %3 = load i32* %1, align 4
30/// %add = add nsw i32 %2, %3
31/// ret i32 %add
32/// }
33///
34/// Will become
35/// define i32 @main() #0 {
36/// entry:
37/// ; 1 = MemoryDef(0)
38/// %call = call noalias i8* @_Znwm(i64 4) #3
39/// %2 = bitcast i8* %call to i32*
40/// ; 2 = MemoryDef(1)
41/// %call1 = call noalias i8* @_Znwm(i64 4) #3
42/// %4 = bitcast i8* %call1 to i32*
43/// ; 3 = MemoryDef(2)
44/// store i32 5, i32* %2, align 4
45/// ; 4 = MemoryDef(3)
46/// store i32 7, i32* %4, align 4
47/// ; MemoryUse(3)
48/// %7 = load i32* %2, align 4
49/// ; MemoryUse(4)
50/// %8 = load i32* %4, align 4
51/// %add = add nsw i32 %7, %8
52/// ret i32 %add
53/// }
54///
55/// Given this form, all the stores that could ever effect the load at %8 can be
56/// gotten by using the MemoryUse associated with it, and walking from use to
57/// def until you hit the top of the function.
58///
59/// Each def also has a list of users associated with it, so you can walk from
60/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61/// but not the RHS of MemoryDefs. You can see this above at %7, which would
62/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63/// store, all the MemoryUses on its use lists are may-aliases of that store
64/// (but the MemoryDefs on its use list may not be).
65///
66/// MemoryDefs are not disambiguated because it would require multiple reaching
67/// definitions, which would require multiple phis, and multiple memoryaccesses
68/// per instruction.
69//
70//===----------------------------------------------------------------------===//
71
72#ifndef LLVM_ANALYSIS_MEMORYSSA_H
73#define LLVM_ANALYSIS_MEMORYSSA_H
74
75#include "llvm/ADT/DenseMap.h"
76#include "llvm/ADT/GraphTraits.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/ilist.h"
80#include "llvm/ADT/ilist_node.h"
81#include "llvm/ADT/iterator.h"
82#include "llvm/ADT/iterator_range.h"
83#include "llvm/ADT/simple_ilist.h"
84#include "llvm/Analysis/AliasAnalysis.h"
85#include "llvm/Analysis/MemoryLocation.h"
86#include "llvm/Analysis/PHITransAddr.h"
87#include "llvm/IR/BasicBlock.h"
88#include "llvm/IR/DerivedUser.h"
89#include "llvm/IR/Dominators.h"
90#include "llvm/IR/Module.h"
91#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/IR/ValueHandle.h"
97#include "llvm/Pass.h"
98#include "llvm/Support/Casting.h"
99#include "llvm/Support/CommandLine.h"
100#include <algorithm>
101#include <cassert>
102#include <cstddef>
103#include <iterator>
104#include <memory>
105#include <utility>
106
107namespace llvm {
108
109/// Enables memory ssa as a dependency for loop passes.
110extern cl::opt<bool> EnableMSSALoopDependency;
111
112class AllocaInst;
113class Function;
114class Instruction;
115class MemoryAccess;
116class MemorySSAWalker;
117class LLVMContext;
118class raw_ostream;
119
120namespace MSSAHelpers {
121
122struct AllAccessTag {};
123struct DefsOnlyTag {};
124
125} // end namespace MSSAHelpers
126
127enum : unsigned {
128 // Used to signify what the default invalid ID is for MemoryAccess's
129 // getID()
130 INVALID_MEMORYACCESS_ID = -1U
131};
132
133template <class T> class memoryaccess_def_iterator_base;
134using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
135using const_memoryaccess_def_iterator =
136 memoryaccess_def_iterator_base<const MemoryAccess>;
137
138// The base for all memory accesses. All memory accesses in a block are
139// linked together using an intrusive list.
140class MemoryAccess
141 : public DerivedUser,
142 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
143 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
144public:
145 using AllAccessType =
146 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
147 using DefsOnlyType =
148 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
149
150 MemoryAccess(const MemoryAccess &) = delete;
151 MemoryAccess &operator=(const MemoryAccess &) = delete;
152
153 void *operator new(size_t) = delete;
154
155 // Methods for support type inquiry through isa, cast, and
156 // dyn_cast
157 static bool classof(const Value *V) {
158 unsigned ID = V->getValueID();
159 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
160 }
161
162 BasicBlock *getBlock() const { return Block; }
163
164 void print(raw_ostream &OS) const;
165 void dump() const;
166
167 /// The user iterators for a memory access
168 using iterator = user_iterator;
169 using const_iterator = const_user_iterator;
170
171 /// This iterator walks over all of the defs in a given
172 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
173 /// MemoryUse/MemoryDef, this walks the defining access.
174 memoryaccess_def_iterator defs_begin();
175 const_memoryaccess_def_iterator defs_begin() const;
176 memoryaccess_def_iterator defs_end();
177 const_memoryaccess_def_iterator defs_end() const;
178
179 /// Get the iterators for the all access list and the defs only list
180 /// We default to the all access list.
181 AllAccessType::self_iterator getIterator() {
182 return this->AllAccessType::getIterator();
183 }
184 AllAccessType::const_self_iterator getIterator() const {
185 return this->AllAccessType::getIterator();
186 }
187 AllAccessType::reverse_self_iterator getReverseIterator() {
188 return this->AllAccessType::getReverseIterator();
189 }
190 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
191 return this->AllAccessType::getReverseIterator();
192 }
193 DefsOnlyType::self_iterator getDefsIterator() {
194 return this->DefsOnlyType::getIterator();
195 }
196 DefsOnlyType::const_self_iterator getDefsIterator() const {
197 return this->DefsOnlyType::getIterator();
198 }
199 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
200 return this->DefsOnlyType::getReverseIterator();
201 }
202 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
203 return this->DefsOnlyType::getReverseIterator();
204 }
205
206protected:
207 friend class MemoryDef;
208 friend class MemoryPhi;
209 friend class MemorySSA;
210 friend class MemoryUse;
211 friend class MemoryUseOrDef;
212
213 /// Used by MemorySSA to change the block of a MemoryAccess when it is
214 /// moved.
215 void setBlock(BasicBlock *BB) { Block = BB; }
216
217 /// Used for debugging and tracking things about MemoryAccesses.
218 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
219 inline unsigned getID() const;
220
221 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
222 BasicBlock *BB, unsigned NumOperands)
223 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
224 Block(BB) {}
225
226 // Use deleteValue() to delete a generic MemoryAccess.
227 ~MemoryAccess() = default;
228
229private:
230 BasicBlock *Block;
231};
232
233template <>
234struct ilist_alloc_traits<MemoryAccess> {
235 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
236};
237
238inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
239 MA.print(OS);
240 return OS;
241}
242
243/// Class that has the common methods + fields of memory uses/defs. It's
244/// a little awkward to have, but there are many cases where we want either a
245/// use or def, and there are many cases where uses are needed (defs aren't
246/// acceptable), and vice-versa.
247///
248/// This class should never be instantiated directly; make a MemoryUse or
249/// MemoryDef instead.
250class MemoryUseOrDef : public MemoryAccess {
251public:
252 void *operator new(size_t) = delete;
253
254 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
255
256 /// Get the instruction that this MemoryUse represents.
257 Instruction *getMemoryInst() const { return MemoryInstruction; }
258
259 /// Get the access that produces the memory state used by this Use.
260 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
261
262 static bool classof(const Value *MA) {
263 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
264 }
265
266 // Sadly, these have to be public because they are needed in some of the
267 // iterators.
268 inline bool isOptimized() const;
269 inline MemoryAccess *getOptimized() const;
270 inline void setOptimized(MemoryAccess *);
271
272 // Retrieve AliasResult type of the optimized access. Ideally this would be
273 // returned by the caching walker and may go away in the future.
274 Optional<AliasResult> getOptimizedAccessType() const {
275 return isOptimized() ? OptimizedAccessAlias : None;
276 }
277
278 /// Reset the ID of what this MemoryUse was optimized to, causing it to
279 /// be rewalked by the walker if necessary.
280 /// This really should only be called by tests.
281 inline void resetOptimized();
282
283protected:
284 friend class MemorySSA;
285 friend class MemorySSAUpdater;
286
287 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
288 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
289 unsigned NumOperands)
290 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
291 MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) {
292 setDefiningAccess(DMA);
293 }
294
295 // Use deleteValue() to delete a generic MemoryUseOrDef.
296 ~MemoryUseOrDef() = default;
297
298 void setOptimizedAccessType(Optional<AliasResult> AR) {
299 OptimizedAccessAlias = AR;
300 }
301
302 void setDefiningAccess(
303 MemoryAccess *DMA, bool Optimized = false,
304 Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) {
305 if (!Optimized) {
306 setOperand(0, DMA);
307 return;
308 }
309 setOptimized(DMA);
310 setOptimizedAccessType(AR);
311 }
312
313private:
314 Instruction *MemoryInstruction;
315 Optional<AliasResult> OptimizedAccessAlias;
316};
317
318/// Represents read-only accesses to memory
319///
320/// In particular, the set of Instructions that will be represented by
321/// MemoryUse's is exactly the set of Instructions for which
322/// AliasAnalysis::getModRefInfo returns "Ref".
323class MemoryUse final : public MemoryUseOrDef {
324public:
325 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
326
327 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
328 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
329 /*NumOperands=*/1) {}
330
331 // allocate space for exactly one operand
332 void *operator new(size_t s) { return User::operator new(s, 1); }
333
334 static bool classof(const Value *MA) {
335 return MA->getValueID() == MemoryUseVal;
336 }
337
338 void print(raw_ostream &OS) const;
339
340 void setOptimized(MemoryAccess *DMA) {
341 OptimizedID = DMA->getID();
342 setOperand(0, DMA);
343 }
344
345 bool isOptimized() const {
346 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
347 }
348
349 MemoryAccess *getOptimized() const {
350 return getDefiningAccess();
351 }
352
353 void resetOptimized() {
354 OptimizedID = INVALID_MEMORYACCESS_ID;
355 }
356
357protected:
358 friend class MemorySSA;
359
360private:
361 static void deleteMe(DerivedUser *Self);
362
363 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
364};
365
366template <>
367struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
368DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits
<MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator
MemoryUse::op_begin() const { return OperandTraits<MemoryUse
>::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse
::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse
>::op_end(this); } MemoryUse::const_op_iterator MemoryUse::
op_end() const { return OperandTraits<MemoryUse>::op_end
(const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUse>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 368, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUse>::op_begin
(const_cast<MemoryUse*>(this))[i_nocapture].get()); } void
MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryUse>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 368, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryUse::getNumOperands() const { return OperandTraits
<MemoryUse>::operands(this); } template <int Idx_nocapture
> Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryUse::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
369
370/// Represents a read-write access to memory, whether it is a must-alias,
371/// or a may-alias.
372///
373/// In particular, the set of Instructions that will be represented by
374/// MemoryDef's is exactly the set of Instructions for which
375/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
376/// Note that, in order to provide def-def chains, all defs also have a use
377/// associated with them. This use points to the nearest reaching
378/// MemoryDef/MemoryPhi.
379class MemoryDef final : public MemoryUseOrDef {
380public:
381 friend class MemorySSA;
382
383 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
384
385 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
386 unsigned Ver)
387 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
388 /*NumOperands=*/2),
389 ID(Ver) {}
390
391 // allocate space for exactly two operands
392 void *operator new(size_t s) { return User::operator new(s, 2); }
393
394 static bool classof(const Value *MA) {
395 return MA->getValueID() == MemoryDefVal;
396 }
397
398 void setOptimized(MemoryAccess *MA) {
399 setOperand(1, MA);
400 OptimizedID = MA->getID();
401 }
402
403 MemoryAccess *getOptimized() const {
404 return cast_or_null<MemoryAccess>(getOperand(1));
405 }
406
407 bool isOptimized() const {
408 return getOptimized() && OptimizedID == getOptimized()->getID();
409 }
410
411 void resetOptimized() {
412 OptimizedID = INVALID_MEMORYACCESS_ID;
413 setOperand(1, nullptr);
414 }
415
416 void print(raw_ostream &OS) const;
417
418 unsigned getID() const { return ID; }
419
420private:
421 static void deleteMe(DerivedUser *Self);
422
423 const unsigned ID;
424 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
425};
426
427template <>
428struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
429DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits
<MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator
MemoryDef::op_begin() const { return OperandTraits<MemoryDef
>::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef
::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef
>::op_end(this); } MemoryDef::const_op_iterator MemoryDef::
op_end() const { return OperandTraits<MemoryDef>::op_end
(const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 429, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryDef>::op_begin
(const_cast<MemoryDef*>(this))[i_nocapture].get()); } void
MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryDef>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 429, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryDef::getNumOperands() const { return OperandTraits
<MemoryDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryDef::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
430
431template <>
432struct OperandTraits<MemoryUseOrDef> {
433 static Use *op_begin(MemoryUseOrDef *MUD) {
434 if (auto *MU = dyn_cast<MemoryUse>(MUD))
435 return OperandTraits<MemoryUse>::op_begin(MU);
436 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
437 }
438
439 static Use *op_end(MemoryUseOrDef *MUD) {
440 if (auto *MU = dyn_cast<MemoryUse>(MUD))
441 return OperandTraits<MemoryUse>::op_end(MU);
442 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
443 }
444
445 static unsigned operands(const MemoryUseOrDef *MUD) {
446 if (const auto *MU = dyn_cast<MemoryUse>(MUD))
447 return OperandTraits<MemoryUse>::operands(MU);
448 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
449 }
450};
451DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return
OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef
::const_op_iterator MemoryUseOrDef::op_begin() const { return
OperandTraits<MemoryUseOrDef>::op_begin(const_cast<
MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef
::op_end() { return OperandTraits<MemoryUseOrDef>::op_end
(this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end
() const { return OperandTraits<MemoryUseOrDef>::op_end
(const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 451, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin
(const_cast<MemoryUseOrDef*>(this))[i_nocapture].get())
; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<MemoryUseOrDef>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 451, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits
<MemoryUseOrDef>::operands(this); } template <int Idx_nocapture
> Use &MemoryUseOrDef::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &MemoryUseOrDef::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
452
453/// Represents phi nodes for memory accesses.
454///
455/// These have the same semantic as regular phi nodes, with the exception that
456/// only one phi will ever exist in a given basic block.
457/// Guaranteeing one phi per block means guaranteeing there is only ever one
458/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
459/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
460/// a MemoryPhi's operands.
461/// That is, given
462/// if (a) {
463/// store %a
464/// store %b
465/// }
466/// it *must* be transformed into
467/// if (a) {
468/// 1 = MemoryDef(liveOnEntry)
469/// store %a
470/// 2 = MemoryDef(1)
471/// store %b
472/// }
473/// and *not*
474/// if (a) {
475/// 1 = MemoryDef(liveOnEntry)
476/// store %a
477/// 2 = MemoryDef(liveOnEntry)
478/// store %b
479/// }
480/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
481/// end of the branch, and if there are not two phi nodes, one will be
482/// disconnected completely from the SSA graph below that point.
483/// Because MemoryUse's do not generate new definitions, they do not have this
484/// issue.
485class MemoryPhi final : public MemoryAccess {
486 // allocate space for exactly zero operands
487 void *operator new(size_t s) { return User::operator new(s); }
488
489public:
490 /// Provide fast operand accessors
491 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline
void setOperand(unsigned, MemoryAccess*); inline op_iterator
op_begin(); inline const_op_iterator op_begin() const; inline
op_iterator op_end(); inline const_op_iterator op_end() const
; protected: template <int> inline Use &Op(); template
<int> inline const Use &Op() const; public: inline
unsigned getNumOperands() const
;
492
493 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
494 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
495 ReservedSpace(NumPreds) {
496 allocHungoffUses(ReservedSpace);
497 }
498
499 // Block iterator interface. This provides access to the list of incoming
500 // basic blocks, which parallels the list of incoming values.
501 using block_iterator = BasicBlock **;
502 using const_block_iterator = BasicBlock *const *;
503
504 block_iterator block_begin() {
505 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
506 }
507
508 const_block_iterator block_begin() const {
509 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
510 }
511
512 block_iterator block_end() { return block_begin() + getNumOperands(); }
513
514 const_block_iterator block_end() const {
515 return block_begin() + getNumOperands();
516 }
517
518 iterator_range<block_iterator> blocks() {
519 return make_range(block_begin(), block_end());
520 }
521
522 iterator_range<const_block_iterator> blocks() const {
523 return make_range(block_begin(), block_end());
524 }
525
526 op_range incoming_values() { return operands(); }
527
528 const_op_range incoming_values() const { return operands(); }
529
530 /// Return the number of incoming edges
531 unsigned getNumIncomingValues() const { return getNumOperands(); }
532
533 /// Return incoming value number x
534 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
535 void setIncomingValue(unsigned I, MemoryAccess *V) {
536 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 536, __extension__ __PRETTY_FUNCTION__))
;
537 setOperand(I, V);
538 }
539
540 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
541 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
542
543 /// Return incoming basic block number @p i.
544 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
545
546 /// Return incoming basic block corresponding
547 /// to an operand of the PHI.
548 BasicBlock *getIncomingBlock(const Use &U) const {
549 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 549, __extension__ __PRETTY_FUNCTION__))
;
550 return getIncomingBlock(unsigned(&U - op_begin()));
551 }
552
553 /// Return incoming basic block corresponding
554 /// to value use iterator.
555 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
556 return getIncomingBlock(I.getUse());
557 }
558
559 void setIncomingBlock(unsigned I, BasicBlock *BB) {
560 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 560, __extension__ __PRETTY_FUNCTION__))
;
561 block_begin()[I] = BB;
562 }
563
564 /// Add an incoming value to the end of the PHI list
565 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
566 if (getNumOperands() == ReservedSpace)
567 growOperands(); // Get more space!
568 // Initialize some new operands.
569 setNumHungOffUseOperands(getNumOperands() + 1);
570 setIncomingValue(getNumOperands() - 1, V);
571 setIncomingBlock(getNumOperands() - 1, BB);
572 }
573
574 /// Return the first index of the specified basic
575 /// block in the value list for this PHI. Returns -1 if no instance.
576 int getBasicBlockIndex(const BasicBlock *BB) const {
577 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
578 if (block_begin()[I] == BB)
579 return I;
580 return -1;
581 }
582
583 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
584 int Idx = getBasicBlockIndex(BB);
585 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 585, __extension__ __PRETTY_FUNCTION__))
;
586 return getIncomingValue(Idx);
587 }
588
589 // After deleting incoming position I, the order of incoming may be changed.
590 void unorderedDeleteIncoming(unsigned I) {
591 unsigned E = getNumOperands();
592 assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry."
) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
594 // itself should be deleted.
595 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
"at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 596, __extension__ __PRETTY_FUNCTION__))
596 "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
"at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 setIncomingValue(I, getIncomingValue(E - 1));
598 setIncomingBlock(I, block_begin()[E - 1]);
599 setOperand(E - 1, nullptr);
600 block_begin()[E - 1] = nullptr;
601 setNumHungOffUseOperands(getNumOperands() - 1);
602 }
603
604 // After deleting entries that satisfy Pred, remaining entries may have
605 // changed order.
606 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
607 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
608 if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
609 unorderedDeleteIncoming(I);
610 E = getNumOperands();
611 --I;
612 }
613 assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 &&
"Cannot remove all incoming blocks in a MemoryPhi.") ? void (
0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 614, __extension__ __PRETTY_FUNCTION__))
614 "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 &&
"Cannot remove all incoming blocks in a MemoryPhi.") ? void (
0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 614, __extension__ __PRETTY_FUNCTION__))
;
615 }
616
617 // After deleting incoming block BB, the incoming blocks order may be changed.
618 void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
619 unorderedDeleteIncomingIf(
620 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
621 }
622
623 // After deleting incoming memory access MA, the incoming accesses order may
624 // be changed.
625 void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
626 unorderedDeleteIncomingIf(
627 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
628 }
629
630 static bool classof(const Value *V) {
631 return V->getValueID() == MemoryPhiVal;
632 }
633
634 void print(raw_ostream &OS) const;
635
636 unsigned getID() const { return ID; }
637
638protected:
639 friend class MemorySSA;
640
641 /// this is more complicated than the generic
642 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
643 /// values and pointers to the incoming blocks, all in one allocation.
644 void allocHungoffUses(unsigned N) {
645 User::allocHungoffUses(N, /* IsPhi */ true);
646 }
647
648private:
649 // For debugging only
650 const unsigned ID;
651 unsigned ReservedSpace;
652
653 /// This grows the operand list in response to a push_back style of
654 /// operation. This grows the number of ops by 1.5 times.
655 void growOperands() {
656 unsigned E = getNumOperands();
657 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
658 ReservedSpace = std::max(E + E / 2, 2u);
659 growHungoffUses(ReservedSpace, /* IsPhi */ true);
660 }
661
662 static void deleteMe(DerivedUser *Self);
663};
664
665inline unsigned MemoryAccess::getID() const {
666 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 667, __extension__ __PRETTY_FUNCTION__))
667 "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa
<MemoryPhi>(this)) && "only memory defs and phis have ids"
) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 if (const auto *MD = dyn_cast<MemoryDef>(this))
669 return MD->getID();
670 return cast<MemoryPhi>(this)->getID();
671}
672
673inline bool MemoryUseOrDef::isOptimized() const {
674 if (const auto *MD = dyn_cast<MemoryDef>(this))
675 return MD->isOptimized();
676 return cast<MemoryUse>(this)->isOptimized();
677}
678
679inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
680 if (const auto *MD = dyn_cast<MemoryDef>(this))
681 return MD->getOptimized();
682 return cast<MemoryUse>(this)->getOptimized();
683}
684
685inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
686 if (auto *MD = dyn_cast<MemoryDef>(this))
687 MD->setOptimized(MA);
688 else
689 cast<MemoryUse>(this)->setOptimized(MA);
690}
691
692inline void MemoryUseOrDef::resetOptimized() {
693 if (auto *MD = dyn_cast<MemoryDef>(this))
694 MD->resetOptimized();
695 else
696 cast<MemoryUse>(this)->resetOptimized();
697}
698
699template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
700DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits
<MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator
MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi
>::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi
::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi
>::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi::
op_end() const { return OperandTraits<MemoryPhi>::op_end
(const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<MemoryPhi>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 700, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<MemoryAccess>( OperandTraits<MemoryPhi>::op_begin
(const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void
MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture
) { (static_cast <bool> (i_nocapture < OperandTraits
<MemoryPhi>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 700, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned MemoryPhi::getNumOperands() const { return OperandTraits
<MemoryPhi>::operands(this); } template <int Idx_nocapture
> Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
701
702/// Encapsulates MemorySSA, including all data associated with memory
703/// accesses.
704class MemorySSA {
705public:
706 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
707
708 // MemorySSA must remain where it's constructed; Walkers it creates store
709 // pointers to it.
710 MemorySSA(MemorySSA &&) = delete;
711
712 ~MemorySSA();
713
714 MemorySSAWalker *getWalker();
715 MemorySSAWalker *getSkipSelfWalker();
716
717 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
718 /// access associated with it. If passed a basic block gets the memory phi
719 /// node that exists for that block, if there is one. Otherwise, this will get
720 /// a MemoryUseOrDef.
721 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
722 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
723 }
724
725 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
726 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
727 }
728
729 DominatorTree &getDomTree() const { return *DT; }
730
731 void dump() const;
732 void print(raw_ostream &) const;
733
734 /// Return true if \p MA represents the live on entry value
735 ///
736 /// Loads and stores from pointer arguments and other global values may be
737 /// defined by memory operations that do not occur in the current function, so
738 /// they may be live on entry to the function. MemorySSA represents such
739 /// memory state by the live on entry definition, which is guaranteed to occur
740 /// before any other memory access in the function.
741 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
742 return MA == LiveOnEntryDef.get();
8
Assuming the condition is false
9
Returning zero, which participates in a condition later
743 }
744
745 inline MemoryAccess *getLiveOnEntryDef() const {
746 return LiveOnEntryDef.get();
747 }
748
749 // Sadly, iplists, by default, owns and deletes pointers added to the
750 // list. It's not currently possible to have two iplists for the same type,
751 // where one owns the pointers, and one does not. This is because the traits
752 // are per-type, not per-tag. If this ever changes, we should make the
753 // DefList an iplist.
754 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
755 using DefsList =
756 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
757
758 /// Return the list of MemoryAccess's for a given basic block.
759 ///
760 /// This list is not modifiable by the user.
761 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
762 return getWritableBlockAccesses(BB);
763 }
764
765 /// Return the list of MemoryDef's and MemoryPhi's for a given basic
766 /// block.
767 ///
768 /// This list is not modifiable by the user.
769 const DefsList *getBlockDefs(const BasicBlock *BB) const {
770 return getWritableBlockDefs(BB);
771 }
772
773 /// Given two memory accesses in the same basic block, determine
774 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
775 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
776
777 /// Given two memory accesses in potentially different blocks,
778 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
779 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
780
781 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
782 /// dominates Use \p B.
783 bool dominates(const MemoryAccess *A, const Use &B) const;
784
785 /// Verify that MemorySSA is self consistent (IE definitions dominate
786 /// all uses, uses appear in the right places). This is used by unit tests.
787 void verifyMemorySSA() const;
788
789 /// Used in various insertion functions to specify whether we are talking
790 /// about the beginning or end of a block.
791 enum InsertionPlace { Beginning, End, BeforeTerminator };
792
793protected:
794 // Used by Memory SSA annotater, dumpers, and wrapper pass
795 friend class MemorySSAAnnotatedWriter;
796 friend class MemorySSAPrinterLegacyPass;
797 friend class MemorySSAUpdater;
798
799 void verifyOrderingDominationAndDefUses(Function &F) const;
800 void verifyDominationNumbers(const Function &F) const;
801 void verifyPrevDefInPhis(Function &F) const;
802
803 // This is used by the use optimizer and updater.
804 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
805 auto It = PerBlockAccesses.find(BB);
806 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
807 }
808
809 // This is used by the use optimizer and updater.
810 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
811 auto It = PerBlockDefs.find(BB);
812 return It == PerBlockDefs.end() ? nullptr : It->second.get();
813 }
814
815 // These is used by the updater to perform various internal MemorySSA
816 // machinsations. They do not always leave the IR in a correct state, and
817 // relies on the updater to fixup what it breaks, so it is not public.
818
819 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
820 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
821
822 // Rename the dominator tree branch rooted at BB.
823 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
824 SmallPtrSetImpl<BasicBlock *> &Visited) {
825 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
826 }
827
828 void removeFromLookups(MemoryAccess *);
829 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
830 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
831 InsertionPlace);
832 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
833 AccessList::iterator);
834 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
835 const MemoryUseOrDef *Template = nullptr,
836 bool CreationMustSucceed = true);
837
838private:
839 template <class AliasAnalysisType> class ClobberWalkerBase;
840 template <class AliasAnalysisType> class CachingWalker;
841 template <class AliasAnalysisType> class SkipSelfWalker;
842 class OptimizeUses;
843
844 CachingWalker<AliasAnalysis> *getWalkerImpl();
845 void buildMemorySSA(BatchAAResults &BAA);
846
847 void prepareForMoveTo(MemoryAccess *, BasicBlock *);
848 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
849
850 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
851 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
852
853 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
854 MemoryPhi *createMemoryPhi(BasicBlock *BB);
855 template <typename AliasAnalysisType>
856 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
857 const MemoryUseOrDef *Template = nullptr);
858 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
859 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
860 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
861 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
862 SmallPtrSetImpl<BasicBlock *> &Visited,
863 bool SkipVisited = false, bool RenameAllUses = false);
864 AccessList *getOrCreateAccessList(const BasicBlock *);
865 DefsList *getOrCreateDefsList(const BasicBlock *);
866 void renumberBlock(const BasicBlock *) const;
867 AliasAnalysis *AA;
868 DominatorTree *DT;
869 Function &F;
870
871 // Memory SSA mappings
872 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
873
874 // These two mappings contain the main block to access/def mappings for
875 // MemorySSA. The list contained in PerBlockAccesses really owns all the
876 // MemoryAccesses.
877 // Both maps maintain the invariant that if a block is found in them, the
878 // corresponding list is not empty, and if a block is not found in them, the
879 // corresponding list is empty.
880 AccessMap PerBlockAccesses;
881 DefsMap PerBlockDefs;
882 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
883
884 // Domination mappings
885 // Note that the numbering is local to a block, even though the map is
886 // global.
887 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
888 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
889
890 // Memory SSA building info
891 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
892 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
893 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
894 unsigned NextID;
895};
896
897// Internal MemorySSA utils, for use by MemorySSA classes and walkers
898class MemorySSAUtil {
899protected:
900 friend class GVNHoist;
901 friend class MemorySSAWalker;
902
903 // This function should not be used by new passes.
904 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
905 AliasAnalysis &AA);
906};
907
908// This pass does eager building and then printing of MemorySSA. It is used by
909// the tests to be able to build, dump, and verify Memory SSA.
910class MemorySSAPrinterLegacyPass : public FunctionPass {
911public:
912 MemorySSAPrinterLegacyPass();
913
914 bool runOnFunction(Function &) override;
915 void getAnalysisUsage(AnalysisUsage &AU) const override;
916
917 static char ID;
918};
919
920/// An analysis that produces \c MemorySSA for a function.
921///
922class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
923 friend AnalysisInfoMixin<MemorySSAAnalysis>;
924
925 static AnalysisKey Key;
926
927public:
928 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
929 // pointers after construction. Use a wrapper class instead of plain
930 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
931 struct Result {
932 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
933
934 MemorySSA &getMSSA() { return *MSSA.get(); }
935
936 std::unique_ptr<MemorySSA> MSSA;
937
938 bool invalidate(Function &F, const PreservedAnalyses &PA,
939 FunctionAnalysisManager::Invalidator &Inv);
940 };
941
942 Result run(Function &F, FunctionAnalysisManager &AM);
943};
944
945/// Printer pass for \c MemorySSA.
946class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
947 raw_ostream &OS;
948
949public:
950 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
951
952 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
953};
954
955/// Verifier pass for \c MemorySSA.
956struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
957 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
958};
959
960/// Legacy analysis pass which computes \c MemorySSA.
961class MemorySSAWrapperPass : public FunctionPass {
962public:
963 MemorySSAWrapperPass();
964
965 static char ID;
966
967 bool runOnFunction(Function &) override;
968 void releaseMemory() override;
969 MemorySSA &getMSSA() { return *MSSA; }
970 const MemorySSA &getMSSA() const { return *MSSA; }
971
972 void getAnalysisUsage(AnalysisUsage &AU) const override;
973
974 void verifyAnalysis() const override;
975 void print(raw_ostream &OS, const Module *M = nullptr) const override;
976
977private:
978 std::unique_ptr<MemorySSA> MSSA;
979};
980
981/// This is the generic walker interface for walkers of MemorySSA.
982/// Walkers are used to be able to further disambiguate the def-use chains
983/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
984/// you.
985/// In particular, while the def-use chains provide basic information, and are
986/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
987/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
988/// information. In particular, they may want to use SCEV info to further
989/// disambiguate memory accesses, or they may want the nearest dominating
990/// may-aliasing MemoryDef for a call or a store. This API enables a
991/// standardized interface to getting and using that info.
992class MemorySSAWalker {
993public:
994 MemorySSAWalker(MemorySSA *);
995 virtual ~MemorySSAWalker() = default;
996
997 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
998
999 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1000 /// will give you the nearest dominating MemoryAccess that Mod's the location
1001 /// the instruction accesses (by skipping any def which AA can prove does not
1002 /// alias the location(s) accessed by the instruction given).
1003 ///
1004 /// Note that this will return a single access, and it must dominate the
1005 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1006 /// this will return the MemoryPhi, not the operand. This means that
1007 /// given:
1008 /// if (a) {
1009 /// 1 = MemoryDef(liveOnEntry)
1010 /// store %a
1011 /// } else {
1012 /// 2 = MemoryDef(liveOnEntry)
1013 /// store %b
1014 /// }
1015 /// 3 = MemoryPhi(2, 1)
1016 /// MemoryUse(3)
1017 /// load %a
1018 ///
1019 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1020 /// in the if (a) branch.
1021 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
1022 MemoryAccess *MA = MSSA->getMemoryAccess(I);
1023 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?"
) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1023, __extension__ __PRETTY_FUNCTION__))
;
1024 return getClobberingMemoryAccess(MA);
1025 }
1026
1027 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1028 /// but takes a MemoryAccess instead of an Instruction.
1029 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
1030
1031 /// Given a potentially clobbering memory access and a new location,
1032 /// calling this will give you the nearest dominating clobbering MemoryAccess
1033 /// (by skipping non-aliasing def links).
1034 ///
1035 /// This version of the function is mainly used to disambiguate phi translated
1036 /// pointers, where the value of a pointer may have changed from the initial
1037 /// memory access. Note that this expects to be handed either a MemoryUse,
1038 /// or an already potentially clobbering access. Unlike the above API, if
1039 /// given a MemoryDef that clobbers the pointer as the starting access, it
1040 /// will return that MemoryDef, whereas the above would return the clobber
1041 /// starting from the use side of the memory def.
1042 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1043 const MemoryLocation &) = 0;
1044
1045 /// Given a memory access, invalidate anything this walker knows about
1046 /// that access.
1047 /// This API is used by walkers that store information to perform basic cache
1048 /// invalidation. This will be called by MemorySSA at appropriate times for
1049 /// the walker it uses or returns.
1050 virtual void invalidateInfo(MemoryAccess *) {}
1051
1052protected:
1053 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1054 // constructor.
1055 MemorySSA *MSSA;
1056};
1057
1058/// A MemorySSAWalker that does no alias queries, or anything else. It
1059/// simply returns the links as they were constructed by the builder.
1060class DoNothingMemorySSAWalker final : public MemorySSAWalker {
1061public:
1062 // Keep the overrides below from hiding the Instruction overload of
1063 // getClobberingMemoryAccess.
1064 using MemorySSAWalker::getClobberingMemoryAccess;
1065
1066 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1067 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1068 const MemoryLocation &) override;
1069};
1070
1071using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1072using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1073
1074/// Iterator base class used to implement const and non-const iterators
1075/// over the defining accesses of a MemoryAccess.
1076template <class T>
1077class memoryaccess_def_iterator_base
1078 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1079 std::forward_iterator_tag, T, ptrdiff_t, T *,
1080 T *> {
1081 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1082
1083public:
1084 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1085 memoryaccess_def_iterator_base() = default;
1086
1087 bool operator==(const memoryaccess_def_iterator_base &Other) const {
1088 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1089 }
1090
1091 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1092 // block from the operand in constant time (In a PHINode, the uselist has
1093 // both, so it's just subtraction). We provide it as part of the
1094 // iterator to avoid callers having to linear walk to get the block.
1095 // If the operation becomes constant time on MemoryPHI's, this bit of
1096 // abstraction breaking should be removed.
1097 BasicBlock *getPhiArgBlock() const {
1098 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1099 assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI"
) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1099, __extension__ __PRETTY_FUNCTION__))
;
1100 return MP->getIncomingBlock(ArgNo);
1101 }
1102
1103 typename std::iterator_traits<BaseT>::pointer operator*() const {
1104 assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1104, __extension__ __PRETTY_FUNCTION__))
;
1105 // Go to the first argument for phis, and the defining access for everything
1106 // else.
1107 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1108 return MP->getIncomingValue(ArgNo);
1109 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1110 }
1111
1112 using BaseT::operator++;
1113 memoryaccess_def_iterator_base &operator++() {
1114 assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator"
) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1114, __extension__ __PRETTY_FUNCTION__))
;
1115 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1116 if (++ArgNo >= MP->getNumIncomingValues()) {
1117 ArgNo = 0;
1118 Access = nullptr;
1119 }
1120 } else {
1121 Access = nullptr;
1122 }
1123 return *this;
1124 }
1125
1126private:
1127 T *Access = nullptr;
1128 unsigned ArgNo = 0;
1129};
1130
1131inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1132 return memoryaccess_def_iterator(this);
1133}
1134
1135inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1136 return const_memoryaccess_def_iterator(this);
1137}
1138
1139inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1140 return memoryaccess_def_iterator();
1141}
1142
1143inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1144 return const_memoryaccess_def_iterator();
1145}
1146
1147/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1148/// and uses in the inverse case.
1149template <> struct GraphTraits<MemoryAccess *> {
1150 using NodeRef = MemoryAccess *;
1151 using ChildIteratorType = memoryaccess_def_iterator;
1152
1153 static NodeRef getEntryNode(NodeRef N) { return N; }
1154 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1155 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1156};
1157
1158template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1159 using NodeRef = MemoryAccess *;
1160 using ChildIteratorType = MemoryAccess::iterator;
1161
1162 static NodeRef getEntryNode(NodeRef N) { return N; }
1163 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1164 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1165};
1166
1167/// Provide an iterator that walks defs, giving both the memory access,
1168/// and the current pointer location, updating the pointer location as it
1169/// changes due to phi node translation.
1170///
1171/// This iterator, while somewhat specialized, is what most clients actually
1172/// want when walking upwards through MemorySSA def chains. It takes a pair of
1173/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1174/// memory location through phi nodes for the user.
1175class upward_defs_iterator
1176 : public iterator_facade_base<upward_defs_iterator,
1177 std::forward_iterator_tag,
1178 const MemoryAccessPair> {
1179 using BaseT = upward_defs_iterator::iterator_facade_base;
1180
1181public:
1182 upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT,
1183 bool *PerformedPhiTranslation = nullptr)
1184 : DefIterator(Info.first), Location(Info.second),
1185 OriginalAccess(Info.first), DT(DT),
1186 PerformedPhiTranslation(PerformedPhiTranslation) {
1187 CurrentPair.first = nullptr;
1188
1189 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1190 fillInCurrentPair();
1191 }
1192
1193 upward_defs_iterator() { CurrentPair.first = nullptr; }
1194
1195 bool operator==(const upward_defs_iterator &Other) const {
1196 return DefIterator == Other.DefIterator;
1197 }
1198
1199 typename std::iterator_traits<BaseT>::reference operator*() const {
1200 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1201, __extension__ __PRETTY_FUNCTION__))
1201 "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of our iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1201, __extension__ __PRETTY_FUNCTION__))
;
1202 return CurrentPair;
1203 }
1204
1205 using BaseT::operator++;
1206 upward_defs_iterator &operator++() {
1207 assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
1208 "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess->
defs_end() && "Tried to access past the end of the iterator"
) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
;
1209 ++DefIterator;
1210 if (DefIterator != OriginalAccess->defs_end())
1211 fillInCurrentPair();
1212 return *this;
1213 }
1214
1215 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1216
1217private:
1218 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1219 /// loop. In particular, this guarantees that it only references a single
1220 /// MemoryLocation during execution of the containing function.
1221 bool IsGuaranteedLoopInvariant(Value *Ptr) const;
1222
1223 void fillInCurrentPair() {
1224 CurrentPair.first = *DefIterator;
1225 CurrentPair.second = Location;
1226 if (WalkingPhi && Location.Ptr) {
1227 // Mark size as unknown, if the location is not guaranteed to be
1228 // loop-invariant for any possible loop in the function. Setting the size
1229 // to unknown guarantees that any memory accesses that access locations
1230 // after the pointer are considered as clobbers, which is important to
1231 // catch loop carried dependences.
1232 if (Location.Ptr &&
1233 !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
1234 CurrentPair.second =
1235 Location.getWithNewSize(LocationSize::beforeOrAfterPointer());
1236 PHITransAddr Translator(
1237 const_cast<Value *>(Location.Ptr),
1238 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1239
1240 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1241 DefIterator.getPhiArgBlock(), DT,
1242 true)) {
1243 Value *TransAddr = Translator.getAddr();
1244 if (TransAddr != Location.Ptr) {
1245 CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
1246
1247 if (TransAddr &&
1248 !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
1249 CurrentPair.second = CurrentPair.second.getWithNewSize(
1250 LocationSize::beforeOrAfterPointer());
1251
1252 if (PerformedPhiTranslation)
1253 *PerformedPhiTranslation = true;
1254 }
1255 }
1256 }
1257 }
1258
1259 MemoryAccessPair CurrentPair;
1260 memoryaccess_def_iterator DefIterator;
1261 MemoryLocation Location;
1262 MemoryAccess *OriginalAccess = nullptr;
1263 DominatorTree *DT = nullptr;
1264 bool WalkingPhi = false;
1265 bool *PerformedPhiTranslation = nullptr;
1266};
1267
1268inline upward_defs_iterator
1269upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT,
1270 bool *PerformedPhiTranslation = nullptr) {
1271 return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation);
1272}
1273
1274inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1275
1276inline iterator_range<upward_defs_iterator>
1277upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
1278 return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
1279}
1280
1281/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1282/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1283/// comparing against a null def_chain_iterator, this will compare equal only
1284/// after walking said Phi/liveOnEntry.
1285///
1286/// The UseOptimizedChain flag specifies whether to walk the clobbering
1287/// access chain, or all the accesses.
1288///
1289/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1290/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1291/// a phi node. The optimized chain walks the clobbering access of a store.
1292/// So if you are just trying to find, given a store, what the next
1293/// thing that would clobber the same memory is, you want the optimized chain.
1294template <class T, bool UseOptimizedChain = false>
1295struct def_chain_iterator
1296 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1297 std::forward_iterator_tag, MemoryAccess *> {
1298 def_chain_iterator() : MA(nullptr) {}
1299 def_chain_iterator(T MA) : MA(MA) {}
1300
1301 T operator*() const { return MA; }
1302
1303 def_chain_iterator &operator++() {
1304 // N.B. liveOnEntry has a null defining access.
1305 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1306 if (UseOptimizedChain && MUD->isOptimized())
1307 MA = MUD->getOptimized();
1308 else
1309 MA = MUD->getDefiningAccess();
1310 } else {
1311 MA = nullptr;
1312 }
1313
1314 return *this;
1315 }
1316
1317 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1318
1319private:
1320 T MA;
1321};
1322
1323template <class T>
1324inline iterator_range<def_chain_iterator<T>>
1325def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1326#ifdef EXPENSIVE_CHECKS
1327 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1328, __extension__ __PRETTY_FUNCTION__))
1328 "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo
) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!"
) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/MemorySSA.h"
, 1328, __extension__ __PRETTY_FUNCTION__))
;
1329#endif
1330 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1331}
1332
1333template <class T>
1334inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1335 return make_range(def_chain_iterator<T, true>(MA),
1336 def_chain_iterator<T, true>(nullptr));
1337}
1338
1339} // end namespace llvm
1340
1341#endif // LLVM_ANALYSIS_MEMORYSSA_H

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h

1//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallPtrSet class. See the doxygen comment for
10// SmallPtrSetImplBase for more details on the algorithm used.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_SMALLPTRSET_H
15#define LLVM_ADT_SMALLPTRSET_H
16
17#include "llvm/ADT/EpochTracker.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/ReverseIteration.h"
20#include "llvm/Support/type_traits.h"
21#include <cassert>
22#include <cstddef>
23#include <cstdlib>
24#include <cstring>
25#include <initializer_list>
26#include <iterator>
27#include <utility>
28
29namespace llvm {
30
31/// SmallPtrSetImplBase - This is the common code shared among all the
32/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
33/// for small and one for large sets.
34///
35/// Small sets use an array of pointers allocated in the SmallPtrSet object,
36/// which is treated as a simple array of pointers. When a pointer is added to
37/// the set, the array is scanned to see if the element already exists, if not
38/// the element is 'pushed back' onto the array. If we run out of space in the
39/// array, we grow into the 'large set' case. SmallSet should be used when the
40/// sets are often small. In this case, no memory allocation is used, and only
41/// light-weight and cache-efficient scanning is used.
42///
43/// Large sets use a classic exponentially-probed hash table. Empty buckets are
44/// represented with an illegal pointer value (-1) to allow null pointers to be
45/// inserted. Tombstones are represented with another illegal pointer value
46/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
47/// more. When this happens, the table is doubled in size.
48///
49class SmallPtrSetImplBase : public DebugEpochBase {
50 friend class SmallPtrSetIteratorImpl;
51
52protected:
53 /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
54 const void **SmallArray;
55 /// CurArray - This is the current set of buckets. If equal to SmallArray,
56 /// then the set is in 'small mode'.
57 const void **CurArray;
58 /// CurArraySize - The allocated size of CurArray, always a power of two.
59 unsigned CurArraySize;
60
61 /// Number of elements in CurArray that contain a value or are a tombstone.
62 /// If small, all these elements are at the beginning of CurArray and the rest
63 /// is uninitialized.
64 unsigned NumNonEmpty;
65 /// Number of tombstones in CurArray.
66 unsigned NumTombstones;
67
68 // Helpers to copy and move construct a SmallPtrSet.
69 SmallPtrSetImplBase(const void **SmallStorage,
70 const SmallPtrSetImplBase &that);
71 SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
72 SmallPtrSetImplBase &&that);
73
74 explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
75 : SmallArray(SmallStorage), CurArray(SmallStorage),
76 CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
77 assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&(static_cast <bool> (SmallSize && (SmallSize &
(SmallSize-1)) == 0 && "Initial size must be a power of two!"
) ? void (0) : __assert_fail ("SmallSize && (SmallSize & (SmallSize-1)) == 0 && \"Initial size must be a power of two!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 78, __extension__ __PRETTY_FUNCTION__))
78 "Initial size must be a power of two!")(static_cast <bool> (SmallSize && (SmallSize &
(SmallSize-1)) == 0 && "Initial size must be a power of two!"
) ? void (0) : __assert_fail ("SmallSize && (SmallSize & (SmallSize-1)) == 0 && \"Initial size must be a power of two!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 78, __extension__ __PRETTY_FUNCTION__))
;
79 }
80
81 ~SmallPtrSetImplBase() {
82 if (!isSmall())
83 free(CurArray);
84 }
85
86public:
87 using size_type = unsigned;
88
89 SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
90
91 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return size() == 0; }
40
Assuming the condition is true
41
Returning the value 1, which participates in a condition later
92 size_type size() const { return NumNonEmpty - NumTombstones; }
93
94 void clear() {
95 incrementEpoch();
96 // If the capacity of the array is huge, and the # elements used is small,
97 // shrink the array.
98 if (!isSmall()) {
99 if (size() * 4 < CurArraySize && CurArraySize > 32)
100 return shrink_and_clear();
101 // Fill the array with empty markers.
102 memset(CurArray, -1, CurArraySize * sizeof(void *));
103 }
104
105 NumNonEmpty = 0;
106 NumTombstones = 0;
107 }
108
109protected:
110 static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
111
112 static void *getEmptyMarker() {
113 // Note that -1 is chosen to make clear() efficiently implementable with
114 // memset and because it's not a valid pointer value.
115 return reinterpret_cast<void*>(-1);
116 }
117
118 const void **EndPointer() const {
119 return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
120 }
121
122 /// insert_imp - This returns true if the pointer was new to the set, false if
123 /// it was already in the set. This is hidden from the client so that the
124 /// derived class can check that the right type of pointer is passed in.
125 std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
126 if (isSmall()) {
127 // Check to see if it is already in the set.
128 const void **LastTombstone = nullptr;
129 for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
130 APtr != E; ++APtr) {
131 const void *Value = *APtr;
132 if (Value == Ptr)
133 return std::make_pair(APtr, false);
134 if (Value == getTombstoneMarker())
135 LastTombstone = APtr;
136 }
137
138 // Did we find any tombstone marker?
139 if (LastTombstone != nullptr) {
140 *LastTombstone = Ptr;
141 --NumTombstones;
142 incrementEpoch();
143 return std::make_pair(LastTombstone, true);
144 }
145
146 // Nope, there isn't. If we stay small, just 'pushback' now.
147 if (NumNonEmpty < CurArraySize) {
148 SmallArray[NumNonEmpty++] = Ptr;
149 incrementEpoch();
150 return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
151 }
152 // Otherwise, hit the big set case, which will call grow.
153 }
154 return insert_imp_big(Ptr);
155 }
156
157 /// erase_imp - If the set contains the specified pointer, remove it and
158 /// return true, otherwise return false. This is hidden from the client so
159 /// that the derived class can check that the right type of pointer is passed
160 /// in.
161 bool erase_imp(const void * Ptr) {
162 const void *const *P = find_imp(Ptr);
163 if (P == EndPointer())
164 return false;
165
166 const void **Loc = const_cast<const void **>(P);
167 assert(*Loc == Ptr && "broken find!")(static_cast <bool> (*Loc == Ptr && "broken find!"
) ? void (0) : __assert_fail ("*Loc == Ptr && \"broken find!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 167, __extension__ __PRETTY_FUNCTION__))
;
168 *Loc = getTombstoneMarker();
169 NumTombstones++;
170 return true;
171 }
172
173 /// Returns the raw pointer needed to construct an iterator. If element not
174 /// found, this will be EndPointer. Otherwise, it will be a pointer to the
175 /// slot which stores Ptr;
176 const void *const * find_imp(const void * Ptr) const {
177 if (isSmall()) {
178 // Linear search for the item.
179 for (const void *const *APtr = SmallArray,
180 *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
181 if (*APtr == Ptr)
182 return APtr;
183 return EndPointer();
184 }
185
186 // Big set case.
187 auto *Bucket = FindBucketFor(Ptr);
188 if (*Bucket == Ptr)
189 return Bucket;
190 return EndPointer();
191 }
192
193private:
194 bool isSmall() const { return CurArray == SmallArray; }
195
196 std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
197
198 const void * const *FindBucketFor(const void *Ptr) const;
199 void shrink_and_clear();
200
201 /// Grow - Allocate a larger backing store for the buckets and move it over.
202 void Grow(unsigned NewSize);
203
204protected:
205 /// swap - Swaps the elements of two sets.
206 /// Note: This method assumes that both sets have the same small size.
207 void swap(SmallPtrSetImplBase &RHS);
208
209 void CopyFrom(const SmallPtrSetImplBase &RHS);
210 void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
211
212private:
213 /// Code shared by MoveFrom() and move constructor.
214 void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
215 /// Code shared by CopyFrom() and copy constructor.
216 void CopyHelper(const SmallPtrSetImplBase &RHS);
217};
218
219/// SmallPtrSetIteratorImpl - This is the common base class shared between all
220/// instances of SmallPtrSetIterator.
221class SmallPtrSetIteratorImpl {
222protected:
223 const void *const *Bucket;
224 const void *const *End;
225
226public:
227 explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
228 : Bucket(BP), End(E) {
229 if (shouldReverseIterate()) {
230 RetreatIfNotValid();
231 return;
232 }
233 AdvanceIfNotValid();
234 }
235
236 bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
237 return Bucket == RHS.Bucket;
238 }
239 bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
240 return Bucket != RHS.Bucket;
241 }
242
243protected:
244 /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
245 /// that is. This is guaranteed to stop because the end() bucket is marked
246 /// valid.
247 void AdvanceIfNotValid() {
248 assert(Bucket <= End)(static_cast <bool> (Bucket <= End) ? void (0) : __assert_fail
("Bucket <= End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 248, __extension__ __PRETTY_FUNCTION__))
;
249 while (Bucket != End &&
250 (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
251 *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
252 ++Bucket;
253 }
254 void RetreatIfNotValid() {
255 assert(Bucket >= End)(static_cast <bool> (Bucket >= End) ? void (0) : __assert_fail
("Bucket >= End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 255, __extension__ __PRETTY_FUNCTION__))
;
256 while (Bucket != End &&
257 (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
258 Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
259 --Bucket;
260 }
261 }
262};
263
264/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
265template <typename PtrTy>
266class SmallPtrSetIterator : public SmallPtrSetIteratorImpl,
267 DebugEpochBase::HandleBase {
268 using PtrTraits = PointerLikeTypeTraits<PtrTy>;
269
270public:
271 using value_type = PtrTy;
272 using reference = PtrTy;
273 using pointer = PtrTy;
274 using difference_type = std::ptrdiff_t;
275 using iterator_category = std::forward_iterator_tag;
276
277 explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
278 const DebugEpochBase &Epoch)
279 : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
280
281 // Most methods are provided by the base class.
282
283 const PtrTy operator*() const {
284 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 284, __extension__ __PRETTY_FUNCTION__))
;
285 if (shouldReverseIterate()) {
286 assert(Bucket > End)(static_cast <bool> (Bucket > End) ? void (0) : __assert_fail
("Bucket > End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 286, __extension__ __PRETTY_FUNCTION__))
;
287 return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
288 }
289 assert(Bucket < End)(static_cast <bool> (Bucket < End) ? void (0) : __assert_fail
("Bucket < End", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 289, __extension__ __PRETTY_FUNCTION__))
;
290 return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
291 }
292
293 inline SmallPtrSetIterator& operator++() { // Preincrement
294 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/SmallPtrSet.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 if (shouldReverseIterate()) {
296 --Bucket;
297 RetreatIfNotValid();
298 return *this;
299 }
300 ++Bucket;
301 AdvanceIfNotValid();
302 return *this;
303 }
304
305 SmallPtrSetIterator operator++(int) { // Postincrement
306 SmallPtrSetIterator tmp = *this;
307 ++*this;
308 return tmp;
309 }
310};
311
312/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
313/// power of two (which means N itself if N is already a power of two).
314template<unsigned N>
315struct RoundUpToPowerOfTwo;
316
317/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
318/// helper template used to implement RoundUpToPowerOfTwo.
319template<unsigned N, bool isPowerTwo>
320struct RoundUpToPowerOfTwoH {
321 enum { Val = N };
322};
323template<unsigned N>
324struct RoundUpToPowerOfTwoH<N, false> {
325 enum {
326 // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
327 // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
328 Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
329 };
330};
331
332template<unsigned N>
333struct RoundUpToPowerOfTwo {
334 enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
335};
336
337/// A templated base class for \c SmallPtrSet which provides the
338/// typesafe interface that is common across all small sizes.
339///
340/// This is particularly useful for passing around between interface boundaries
341/// to avoid encoding a particular small size in the interface boundary.
342template <typename PtrType>
343class SmallPtrSetImpl : public SmallPtrSetImplBase {
344 using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
345 using PtrTraits = PointerLikeTypeTraits<PtrType>;
346 using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
347
348protected:
349 // Forward constructors to the base.
350 using SmallPtrSetImplBase::SmallPtrSetImplBase;
351
352public:
353 using iterator = SmallPtrSetIterator<PtrType>;
354 using const_iterator = SmallPtrSetIterator<PtrType>;
355 using key_type = ConstPtrType;
356 using value_type = PtrType;
357
358 SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
359
360 /// Inserts Ptr if and only if there is no element in the container equal to
361 /// Ptr. The bool component of the returned pair is true if and only if the
362 /// insertion takes place, and the iterator component of the pair points to
363 /// the element equal to Ptr.
364 std::pair<iterator, bool> insert(PtrType Ptr) {
365 auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
366 return std::make_pair(makeIterator(p.first), p.second);
367 }
368
369 /// Insert the given pointer with an iterator hint that is ignored. This is
370 /// identical to calling insert(Ptr), but allows SmallPtrSet to be used by
371 /// std::insert_iterator and std::inserter().
372 iterator insert(iterator, PtrType Ptr) {
373 return insert(Ptr).first;
374 }
375
376 /// erase - If the set contains the specified pointer, remove it and return
377 /// true, otherwise return false.
378 bool erase(PtrType Ptr) {
379 return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
380 }
381 /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
382 size_type count(ConstPtrType Ptr) const {
383 return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
384 }
385 iterator find(ConstPtrType Ptr) const {
386 return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
387 }
388 bool contains(ConstPtrType Ptr) const {
389 return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
390 }
391
392 template <typename IterT>
393 void insert(IterT I, IterT E) {
394 for (; I != E; ++I)
395 insert(*I);
396 }
397
398 void insert(std::initializer_list<PtrType> IL) {
399 insert(IL.begin(), IL.end());
400 }
401
402 iterator begin() const {
403 if (shouldReverseIterate())
404 return makeIterator(EndPointer() - 1);
405 return makeIterator(CurArray);
406 }
407 iterator end() const { return makeIterator(EndPointer()); }
408
409private:
410 /// Create an iterator that dereferences to same place as the given pointer.
411 iterator makeIterator(const void *const *P) const {
412 if (shouldReverseIterate())
413 return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
414 return iterator(P, EndPointer(), *this);
415 }
416};
417
418/// Equality comparison for SmallPtrSet.
419///
420/// Iterates over elements of LHS confirming that each value from LHS is also in
421/// RHS, and that no additional values are in RHS.
422template <typename PtrType>
423bool operator==(const SmallPtrSetImpl<PtrType> &LHS,
424 const SmallPtrSetImpl<PtrType> &RHS) {
425 if (LHS.size() != RHS.size())
426 return false;
427
428 for (const auto *KV : LHS)
429 if (!RHS.count(KV))
430 return false;
431
432 return true;
433}
434
435/// Inequality comparison for SmallPtrSet.
436///
437/// Equivalent to !(LHS == RHS).
438template <typename PtrType>
439bool operator!=(const SmallPtrSetImpl<PtrType> &LHS,
440 const SmallPtrSetImpl<PtrType> &RHS) {
441 return !(LHS == RHS);
442}
443
444/// SmallPtrSet - This class implements a set which is optimized for holding
445/// SmallSize or less elements. This internally rounds up SmallSize to the next
446/// power of two if it is not already a power of two. See the comments above
447/// SmallPtrSetImplBase for details of the algorithm.
448template<class PtrType, unsigned SmallSize>
449class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
450 // In small mode SmallPtrSet uses linear search for the elements, so it is
451 // not a good idea to choose this value too high. You may consider using a
452 // DenseSet<> instead if you expect many elements in the set.
453 static_assert(SmallSize <= 32, "SmallSize should be small");
454
455 using BaseT = SmallPtrSetImpl<PtrType>;
456
457 // Make sure that SmallSize is a power of two, round up if not.
458 enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
459 /// SmallStorage - Fixed size storage used in 'small mode'.
460 const void *SmallStorage[SmallSizePowTwo];
461
462public:
463 SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
464 SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
465 SmallPtrSet(SmallPtrSet &&that)
466 : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
467
468 template<typename It>
469 SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
470 this->insert(I, E);
471 }
472
473 SmallPtrSet(std::initializer_list<PtrType> IL)
474 : BaseT(SmallStorage, SmallSizePowTwo) {
475 this->insert(IL.begin(), IL.end());
476 }
477
478 SmallPtrSet<PtrType, SmallSize> &
479 operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
480 if (&RHS != this)
481 this->CopyFrom(RHS);
482 return *this;
483 }
484
485 SmallPtrSet<PtrType, SmallSize> &
486 operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
487 if (&RHS != this)
488 this->MoveFrom(SmallSizePowTwo, std::move(RHS));
489 return *this;
490 }
491
492 SmallPtrSet<PtrType, SmallSize> &
493 operator=(std::initializer_list<PtrType> IL) {
494 this->clear();
495 this->insert(IL.begin(), IL.end());
496 return *this;
497 }
498
499 /// swap - Swaps the elements of two sets.
500 void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
501 SmallPtrSetImplBase::swap(RHS);
502 }
503};
504
505} // end namespace llvm
506
507namespace std {
508
509 /// Implement std::swap in terms of SmallPtrSet swap.
510 template<class T, unsigned N>
511 inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
512 LHS.swap(RHS);
513 }
514
515} // end namespace std
516
517#endif // LLVM_ADT_SMALLPTRSET_H

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/ADT/iterator.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Config/abi-breaking.h"
24#include "llvm/Support/ErrorHandling.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstdint>
29#include <cstdlib>
30#include <functional>
31#include <initializer_list>
32#include <iterator>
33#include <limits>
34#include <memory>
35#include <tuple>
36#include <type_traits>
37#include <utility>
38
39#ifdef EXPENSIVE_CHECKS
40#include <random> // for std::mt19937
41#endif
42
43namespace llvm {
44
45// Only used by compiler if both template types are the same. Useful when
46// using SFINAE to test for the existence of member functions.
47template <typename T, T> struct SameType;
48
49namespace detail {
50
51template <typename RangeT>
52using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
53
54template <typename RangeT>
55using ValueOfRange = typename std::remove_reference<decltype(
56 *std::begin(std::declval<RangeT &>()))>::type;
57
58} // end namespace detail
59
60//===----------------------------------------------------------------------===//
61// Extra additions to <type_traits>
62//===----------------------------------------------------------------------===//
63
64template <typename T> struct make_const_ptr {
65 using type =
66 typename std::add_pointer<typename std::add_const<T>::type>::type;
67};
68
69template <typename T> struct make_const_ref {
70 using type = typename std::add_lvalue_reference<
71 typename std::add_const<T>::type>::type;
72};
73
74namespace detail {
75template <typename...> using void_t = void;
76template <class, template <class...> class Op, class... Args> struct detector {
77 using value_t = std::false_type;
78};
79template <template <class...> class Op, class... Args>
80struct detector<void_t<Op<Args...>>, Op, Args...> {
81 using value_t = std::true_type;
82};
83} // end namespace detail
84
85/// Detects if a given trait holds for some set of arguments 'Args'.
86/// For example, the given trait could be used to detect if a given type
87/// has a copy assignment operator:
88/// template<class T>
89/// using has_copy_assign_t = decltype(std::declval<T&>()
90/// = std::declval<const T&>());
91/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
92template <template <class...> class Op, class... Args>
93using is_detected = typename detail::detector<void, Op, Args...>::value_t;
94
95namespace detail {
96template <typename Callable, typename... Args>
97using is_invocable =
98 decltype(std::declval<Callable &>()(std::declval<Args>()...));
99} // namespace detail
100
101/// Check if a Callable type can be invoked with the given set of arg types.
102template <typename Callable, typename... Args>
103using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
104
105/// This class provides various trait information about a callable object.
106/// * To access the number of arguments: Traits::num_args
107/// * To access the type of an argument: Traits::arg_t<Index>
108/// * To access the type of the result: Traits::result_t
109template <typename T, bool isClass = std::is_class<T>::value>
110struct function_traits : public function_traits<decltype(&T::operator())> {};
111
112/// Overload for class function types.
113template <typename ClassType, typename ReturnType, typename... Args>
114struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
115 /// The number of arguments to this function.
116 enum { num_args = sizeof...(Args) };
117
118 /// The result type of this function.
119 using result_t = ReturnType;
120
121 /// The type of an argument to this function.
122 template <size_t Index>
123 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
124};
125/// Overload for class function types.
126template <typename ClassType, typename ReturnType, typename... Args>
127struct function_traits<ReturnType (ClassType::*)(Args...), false>
128 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
129/// Overload for non-class function types.
130template <typename ReturnType, typename... Args>
131struct function_traits<ReturnType (*)(Args...), false> {
132 /// The number of arguments to this function.
133 enum { num_args = sizeof...(Args) };
134
135 /// The result type of this function.
136 using result_t = ReturnType;
137
138 /// The type of an argument to this function.
139 template <size_t i>
140 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
141};
142/// Overload for non-class function type references.
143template <typename ReturnType, typename... Args>
144struct function_traits<ReturnType (&)(Args...), false>
145 : public function_traits<ReturnType (*)(Args...)> {};
146
147//===----------------------------------------------------------------------===//
148// Extra additions to <functional>
149//===----------------------------------------------------------------------===//
150
151template <class Ty> struct identity {
152 using argument_type = Ty;
153
154 Ty &operator()(Ty &self) const {
155 return self;
156 }
157 const Ty &operator()(const Ty &self) const {
158 return self;
159 }
160};
161
162/// An efficient, type-erasing, non-owning reference to a callable. This is
163/// intended for use as the type of a function parameter that is not used
164/// after the function in question returns.
165///
166/// This class does not own the callable, so it is not in general safe to store
167/// a function_ref.
168template<typename Fn> class function_ref;
169
170template<typename Ret, typename ...Params>
171class function_ref<Ret(Params...)> {
172 Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
173 intptr_t callable;
174
175 template<typename Callable>
176 static Ret callback_fn(intptr_t callable, Params ...params) {
177 return (*reinterpret_cast<Callable*>(callable))(
178 std::forward<Params>(params)...);
179 }
180
181public:
182 function_ref() = default;
183 function_ref(std::nullptr_t) {}
184
185 template <typename Callable>
186 function_ref(
187 Callable &&callable,
188 // This is not the copy-constructor.
189 std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
190 function_ref>::value> * = nullptr,
191 // Functor must be callable and return a suitable type.
192 std::enable_if_t<std::is_void<Ret>::value ||
193 std::is_convertible<decltype(std::declval<Callable>()(
194 std::declval<Params>()...)),
195 Ret>::value> * = nullptr)
196 : callback(callback_fn<typename std::remove_reference<Callable>::type>),
197 callable(reinterpret_cast<intptr_t>(&callable)) {}
198
199 Ret operator()(Params ...params) const {
200 return callback(callable, std::forward<Params>(params)...);
201 }
202
203 explicit operator bool() const { return callback; }
204};
205
206//===----------------------------------------------------------------------===//
207// Extra additions to <iterator>
208//===----------------------------------------------------------------------===//
209
210namespace adl_detail {
211
212using std::begin;
213
214template <typename ContainerTy>
215decltype(auto) adl_begin(ContainerTy &&container) {
216 return begin(std::forward<ContainerTy>(container));
217}
218
219using std::end;
220
221template <typename ContainerTy>
222decltype(auto) adl_end(ContainerTy &&container) {
223 return end(std::forward<ContainerTy>(container));
224}
225
226using std::swap;
227
228template <typename T>
229void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
230 std::declval<T>()))) {
231 swap(std::forward<T>(lhs), std::forward<T>(rhs));
232}
233
234} // end namespace adl_detail
235
236template <typename ContainerTy>
237decltype(auto) adl_begin(ContainerTy &&container) {
238 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
239}
240
241template <typename ContainerTy>
242decltype(auto) adl_end(ContainerTy &&container) {
243 return adl_detail::adl_end(std::forward<ContainerTy>(container));
244}
245
246template <typename T>
247void adl_swap(T &&lhs, T &&rhs) noexcept(
248 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
249 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
250}
251
252/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
253template <typename T>
254constexpr bool empty(const T &RangeOrContainer) {
255 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
256}
257
258/// Returns true if the given container only contains a single element.
259template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
260 auto B = std::begin(C), E = std::end(C);
261 return B != E && std::next(B) == E;
262}
263
264/// Return a range covering \p RangeOrContainer with the first N elements
265/// excluded.
266template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
267 return make_range(std::next(adl_begin(RangeOrContainer), N),
268 adl_end(RangeOrContainer));
269}
270
271// mapped_iterator - This is a simple iterator adapter that causes a function to
272// be applied whenever operator* is invoked on the iterator.
273
274template <typename ItTy, typename FuncTy,
275 typename FuncReturnTy =
276 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
277class mapped_iterator
278 : public iterator_adaptor_base<
279 mapped_iterator<ItTy, FuncTy>, ItTy,
280 typename std::iterator_traits<ItTy>::iterator_category,
281 typename std::remove_reference<FuncReturnTy>::type> {
282public:
283 mapped_iterator(ItTy U, FuncTy F)
284 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
285
286 ItTy getCurrent() { return this->I; }
287
288 FuncReturnTy operator*() const { return F(*this->I); }
289
290private:
291 FuncTy F;
292};
293
294// map_iterator - Provide a convenient way to create mapped_iterators, just like
295// make_pair is useful for creating pairs...
296template <class ItTy, class FuncTy>
297inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
298 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
299}
300
301template <class ContainerTy, class FuncTy>
302auto map_range(ContainerTy &&C, FuncTy F) {
303 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
304}
305
306/// Helper to determine if type T has a member called rbegin().
307template <typename Ty> class has_rbegin_impl {
308 using yes = char[1];
309 using no = char[2];
310
311 template <typename Inner>
312 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
313
314 template <typename>
315 static no& test(...);
316
317public:
318 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
319};
320
321/// Metafunction to determine if T& or T has a member called rbegin().
322template <typename Ty>
323struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
324};
325
326// Returns an iterator_range over the given container which iterates in reverse.
327// Note that the container must have rbegin()/rend() methods for this to work.
328template <typename ContainerTy>
329auto reverse(ContainerTy &&C,
330 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
331 return make_range(C.rbegin(), C.rend());
332}
333
334// Returns a std::reverse_iterator wrapped around the given iterator.
335template <typename IteratorTy>
336std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
337 return std::reverse_iterator<IteratorTy>(It);
338}
339
340// Returns an iterator_range over the given container which iterates in reverse.
341// Note that the container must have begin()/end() methods which return
342// bidirectional iterators for this to work.
343template <typename ContainerTy>
344auto reverse(ContainerTy &&C,
345 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
346 return make_range(llvm::make_reverse_iterator(std::end(C)),
347 llvm::make_reverse_iterator(std::begin(C)));
348}
349
350/// An iterator adaptor that filters the elements of given inner iterators.
351///
352/// The predicate parameter should be a callable object that accepts the wrapped
353/// iterator's reference type and returns a bool. When incrementing or
354/// decrementing the iterator, it will call the predicate on each element and
355/// skip any where it returns false.
356///
357/// \code
358/// int A[] = { 1, 2, 3, 4 };
359/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
360/// // R contains { 1, 3 }.
361/// \endcode
362///
363/// Note: filter_iterator_base implements support for forward iteration.
364/// filter_iterator_impl exists to provide support for bidirectional iteration,
365/// conditional on whether the wrapped iterator supports it.
366template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
367class filter_iterator_base
368 : public iterator_adaptor_base<
369 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
370 WrappedIteratorT,
371 typename std::common_type<
372 IterTag, typename std::iterator_traits<
373 WrappedIteratorT>::iterator_category>::type> {
374 using BaseT = iterator_adaptor_base<
375 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
376 WrappedIteratorT,
377 typename std::common_type<
378 IterTag, typename std::iterator_traits<
379 WrappedIteratorT>::iterator_category>::type>;
380
381protected:
382 WrappedIteratorT End;
383 PredicateT Pred;
384
385 void findNextValid() {
386 while (this->I != End && !Pred(*this->I))
387 BaseT::operator++();
388 }
389
390 // Construct the iterator. The begin iterator needs to know where the end
391 // is, so that it can properly stop when it gets there. The end iterator only
392 // needs the predicate to support bidirectional iteration.
393 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
394 PredicateT Pred)
395 : BaseT(Begin), End(End), Pred(Pred) {
396 findNextValid();
397 }
398
399public:
400 using BaseT::operator++;
401
402 filter_iterator_base &operator++() {
403 BaseT::operator++();
404 findNextValid();
405 return *this;
406 }
407};
408
409/// Specialization of filter_iterator_base for forward iteration only.
410template <typename WrappedIteratorT, typename PredicateT,
411 typename IterTag = std::forward_iterator_tag>
412class filter_iterator_impl
413 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
414 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
415
416public:
417 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
418 PredicateT Pred)
419 : BaseT(Begin, End, Pred) {}
420};
421
422/// Specialization of filter_iterator_base for bidirectional iteration.
423template <typename WrappedIteratorT, typename PredicateT>
424class filter_iterator_impl<WrappedIteratorT, PredicateT,
425 std::bidirectional_iterator_tag>
426 : public filter_iterator_base<WrappedIteratorT, PredicateT,
427 std::bidirectional_iterator_tag> {
428 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
429 std::bidirectional_iterator_tag>;
430 void findPrevValid() {
431 while (!this->Pred(*this->I))
432 BaseT::operator--();
433 }
434
435public:
436 using BaseT::operator--;
437
438 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
439 PredicateT Pred)
440 : BaseT(Begin, End, Pred) {}
441
442 filter_iterator_impl &operator--() {
443 BaseT::operator--();
444 findPrevValid();
445 return *this;
446 }
447};
448
449namespace detail {
450
451template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
452 using type = std::forward_iterator_tag;
453};
454
455template <> struct fwd_or_bidi_tag_impl<true> {
456 using type = std::bidirectional_iterator_tag;
457};
458
459/// Helper which sets its type member to forward_iterator_tag if the category
460/// of \p IterT does not derive from bidirectional_iterator_tag, and to
461/// bidirectional_iterator_tag otherwise.
462template <typename IterT> struct fwd_or_bidi_tag {
463 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
464 std::bidirectional_iterator_tag,
465 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
466};
467
468} // namespace detail
469
470/// Defines filter_iterator to a suitable specialization of
471/// filter_iterator_impl, based on the underlying iterator's category.
472template <typename WrappedIteratorT, typename PredicateT>
473using filter_iterator = filter_iterator_impl<
474 WrappedIteratorT, PredicateT,
475 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
476
477/// Convenience function that takes a range of elements and a predicate,
478/// and return a new filter_iterator range.
479///
480/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
481/// lifetime of that temporary is not kept by the returned range object, and the
482/// temporary is going to be dropped on the floor after the make_iterator_range
483/// full expression that contains this function call.
484template <typename RangeT, typename PredicateT>
485iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
486make_filter_range(RangeT &&Range, PredicateT Pred) {
487 using FilterIteratorT =
488 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
489 return make_range(
490 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
491 std::end(std::forward<RangeT>(Range)), Pred),
492 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
493 std::end(std::forward<RangeT>(Range)), Pred));
494}
495
496/// A pseudo-iterator adaptor that is designed to implement "early increment"
497/// style loops.
498///
499/// This is *not a normal iterator* and should almost never be used directly. It
500/// is intended primarily to be used with range based for loops and some range
501/// algorithms.
502///
503/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
504/// somewhere between them. The constraints of these iterators are:
505///
506/// - On construction or after being incremented, it is comparable and
507/// dereferencable. It is *not* incrementable.
508/// - After being dereferenced, it is neither comparable nor dereferencable, it
509/// is only incrementable.
510///
511/// This means you can only dereference the iterator once, and you can only
512/// increment it once between dereferences.
513template <typename WrappedIteratorT>
514class early_inc_iterator_impl
515 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
516 WrappedIteratorT, std::input_iterator_tag> {
517 using BaseT =
518 iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
519 WrappedIteratorT, std::input_iterator_tag>;
520
521 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
522
523protected:
524#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
525 bool IsEarlyIncremented = false;
526#endif
527
528public:
529 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
530
531 using BaseT::operator*;
532 decltype(*std::declval<WrappedIteratorT>()) operator*() {
533#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
534 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast <bool> (!IsEarlyIncremented && "Cannot dereference twice!"
) ? void (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 534, __extension__ __PRETTY_FUNCTION__))
;
535 IsEarlyIncremented = true;
536#endif
537 return *(this->I)++;
538 }
539
540 using BaseT::operator++;
541 early_inc_iterator_impl &operator++() {
542#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
543 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast <bool> (IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? void (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 IsEarlyIncremented = false;
545#endif
546 return *this;
547 }
548
549 friend bool operator==(const early_inc_iterator_impl &LHS,
550 const early_inc_iterator_impl &RHS) {
551#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
552 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast <bool> (!LHS.IsEarlyIncremented &&
"Cannot compare after dereferencing!") ? void (0) : __assert_fail
("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 552, __extension__ __PRETTY_FUNCTION__))
;
553#endif
554 return (const BaseT &)LHS == (const BaseT &)RHS;
555 }
556};
557
558/// Make a range that does early increment to allow mutation of the underlying
559/// range without disrupting iteration.
560///
561/// The underlying iterator will be incremented immediately after it is
562/// dereferenced, allowing deletion of the current node or insertion of nodes to
563/// not disrupt iteration provided they do not invalidate the *next* iterator --
564/// the current iterator can be invalidated.
565///
566/// This requires a very exact pattern of use that is only really suitable to
567/// range based for loops and other range algorithms that explicitly guarantee
568/// to dereference exactly once each element, and to increment exactly once each
569/// element.
570template <typename RangeT>
571iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
572make_early_inc_range(RangeT &&Range) {
573 using EarlyIncIteratorT =
574 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
575 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
576 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
577}
578
579// forward declarations required by zip_shortest/zip_first/zip_longest
580template <typename R, typename UnaryPredicate>
581bool all_of(R &&range, UnaryPredicate P);
582template <typename R, typename UnaryPredicate>
583bool any_of(R &&range, UnaryPredicate P);
584
585namespace detail {
586
587using std::declval;
588
589// We have to alias this since inlining the actual type at the usage site
590// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
591template<typename... Iters> struct ZipTupleType {
592 using type = std::tuple<decltype(*declval<Iters>())...>;
593};
594
595template <typename ZipType, typename... Iters>
596using zip_traits = iterator_facade_base<
597 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
598 typename std::iterator_traits<
599 Iters>::iterator_category...>::type,
600 // ^ TODO: Implement random access methods.
601 typename ZipTupleType<Iters...>::type,
602 typename std::iterator_traits<typename std::tuple_element<
603 0, std::tuple<Iters...>>::type>::difference_type,
604 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
605 // inner iterators have the same difference_type. It would fail if, for
606 // instance, the second field's difference_type were non-numeric while the
607 // first is.
608 typename ZipTupleType<Iters...>::type *,
609 typename ZipTupleType<Iters...>::type>;
610
611template <typename ZipType, typename... Iters>
612struct zip_common : public zip_traits<ZipType, Iters...> {
613 using Base = zip_traits<ZipType, Iters...>;
614 using value_type = typename Base::value_type;
615
616 std::tuple<Iters...> iterators;
617
618protected:
619 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
620 return value_type(*std::get<Ns>(iterators)...);
621 }
622
623 template <size_t... Ns>
624 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
625 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
626 }
627
628 template <size_t... Ns>
629 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
630 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
631 }
632
633public:
634 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
635
636 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
637
638 const value_type operator*() const {
639 return deref(std::index_sequence_for<Iters...>{});
640 }
641
642 ZipType &operator++() {
643 iterators = tup_inc(std::index_sequence_for<Iters...>{});
644 return *reinterpret_cast<ZipType *>(this);
645 }
646
647 ZipType &operator--() {
648 static_assert(Base::IsBidirectional,
649 "All inner iterators must be at least bidirectional.");
650 iterators = tup_dec(std::index_sequence_for<Iters...>{});
651 return *reinterpret_cast<ZipType *>(this);
652 }
653};
654
655template <typename... Iters>
656struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
657 using Base = zip_common<zip_first<Iters...>, Iters...>;
658
659 bool operator==(const zip_first<Iters...> &other) const {
660 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
661 }
662
663 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
664};
665
666template <typename... Iters>
667class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
668 template <size_t... Ns>
669 bool test(const zip_shortest<Iters...> &other,
670 std::index_sequence<Ns...>) const {
671 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
672 std::get<Ns>(other.iterators)...},
673 identity<bool>{});
674 }
675
676public:
677 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
678
679 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
680
681 bool operator==(const zip_shortest<Iters...> &other) const {
682 return !test(other, std::index_sequence_for<Iters...>{});
683 }
684};
685
686template <template <typename...> class ItType, typename... Args> class zippy {
687public:
688 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
689 using iterator_category = typename iterator::iterator_category;
690 using value_type = typename iterator::value_type;
691 using difference_type = typename iterator::difference_type;
692 using pointer = typename iterator::pointer;
693 using reference = typename iterator::reference;
694
695private:
696 std::tuple<Args...> ts;
697
698 template <size_t... Ns>
699 iterator begin_impl(std::index_sequence<Ns...>) const {
700 return iterator(std::begin(std::get<Ns>(ts))...);
701 }
702 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
703 return iterator(std::end(std::get<Ns>(ts))...);
704 }
705
706public:
707 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
708
709 iterator begin() const {
710 return begin_impl(std::index_sequence_for<Args...>{});
711 }
712 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
713};
714
715} // end namespace detail
716
717/// zip iterator for two or more iteratable types.
718template <typename T, typename U, typename... Args>
719detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
720 Args &&... args) {
721 return detail::zippy<detail::zip_shortest, T, U, Args...>(
722 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
723}
724
725/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
726/// be the shortest.
727template <typename T, typename U, typename... Args>
728detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
729 Args &&... args) {
730 return detail::zippy<detail::zip_first, T, U, Args...>(
731 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
732}
733
734namespace detail {
735template <typename Iter>
736Iter next_or_end(const Iter &I, const Iter &End) {
737 if (I == End)
738 return End;
739 return std::next(I);
740}
741
742template <typename Iter>
743auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
744 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
745 if (I == End)
746 return None;
747 return *I;
748}
749
750template <typename Iter> struct ZipLongestItemType {
751 using type =
752 llvm::Optional<typename std::remove_const<typename std::remove_reference<
753 decltype(*std::declval<Iter>())>::type>::type>;
754};
755
756template <typename... Iters> struct ZipLongestTupleType {
757 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
758};
759
760template <typename... Iters>
761class zip_longest_iterator
762 : public iterator_facade_base<
763 zip_longest_iterator<Iters...>,
764 typename std::common_type<
765 std::forward_iterator_tag,
766 typename std::iterator_traits<Iters>::iterator_category...>::type,
767 typename ZipLongestTupleType<Iters...>::type,
768 typename std::iterator_traits<typename std::tuple_element<
769 0, std::tuple<Iters...>>::type>::difference_type,
770 typename ZipLongestTupleType<Iters...>::type *,
771 typename ZipLongestTupleType<Iters...>::type> {
772public:
773 using value_type = typename ZipLongestTupleType<Iters...>::type;
774
775private:
776 std::tuple<Iters...> iterators;
777 std::tuple<Iters...> end_iterators;
778
779 template <size_t... Ns>
780 bool test(const zip_longest_iterator<Iters...> &other,
781 std::index_sequence<Ns...>) const {
782 return llvm::any_of(
783 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
784 std::get<Ns>(other.iterators)...},
785 identity<bool>{});
786 }
787
788 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
789 return value_type(
790 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
791 }
792
793 template <size_t... Ns>
794 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
795 return std::tuple<Iters...>(
796 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
797 }
798
799public:
800 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
801 : iterators(std::forward<Iters>(ts.first)...),
802 end_iterators(std::forward<Iters>(ts.second)...) {}
803
804 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
805
806 value_type operator*() const {
807 return deref(std::index_sequence_for<Iters...>{});
808 }
809
810 zip_longest_iterator<Iters...> &operator++() {
811 iterators = tup_inc(std::index_sequence_for<Iters...>{});
812 return *this;
813 }
814
815 bool operator==(const zip_longest_iterator<Iters...> &other) const {
816 return !test(other, std::index_sequence_for<Iters...>{});
817 }
818};
819
820template <typename... Args> class zip_longest_range {
821public:
822 using iterator =
823 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
824 using iterator_category = typename iterator::iterator_category;
825 using value_type = typename iterator::value_type;
826 using difference_type = typename iterator::difference_type;
827 using pointer = typename iterator::pointer;
828 using reference = typename iterator::reference;
829
830private:
831 std::tuple<Args...> ts;
832
833 template <size_t... Ns>
834 iterator begin_impl(std::index_sequence<Ns...>) const {
835 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
836 adl_end(std::get<Ns>(ts)))...);
837 }
838
839 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
840 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
841 adl_end(std::get<Ns>(ts)))...);
842 }
843
844public:
845 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
846
847 iterator begin() const {
848 return begin_impl(std::index_sequence_for<Args...>{});
849 }
850 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
851};
852} // namespace detail
853
854/// Iterate over two or more iterators at the same time. Iteration continues
855/// until all iterators reach the end. The llvm::Optional only contains a value
856/// if the iterator has not reached the end.
857template <typename T, typename U, typename... Args>
858detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
859 Args &&... args) {
860 return detail::zip_longest_range<T, U, Args...>(
861 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
862}
863
864/// Iterator wrapper that concatenates sequences together.
865///
866/// This can concatenate different iterators, even with different types, into
867/// a single iterator provided the value types of all the concatenated
868/// iterators expose `reference` and `pointer` types that can be converted to
869/// `ValueT &` and `ValueT *` respectively. It doesn't support more
870/// interesting/customized pointer or reference types.
871///
872/// Currently this only supports forward or higher iterator categories as
873/// inputs and always exposes a forward iterator interface.
874template <typename ValueT, typename... IterTs>
875class concat_iterator
876 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
877 std::forward_iterator_tag, ValueT> {
878 using BaseT = typename concat_iterator::iterator_facade_base;
879
880 /// We store both the current and end iterators for each concatenated
881 /// sequence in a tuple of pairs.
882 ///
883 /// Note that something like iterator_range seems nice at first here, but the
884 /// range properties are of little benefit and end up getting in the way
885 /// because we need to do mutation on the current iterators.
886 std::tuple<IterTs...> Begins;
887 std::tuple<IterTs...> Ends;
888
889 /// Attempts to increment a specific iterator.
890 ///
891 /// Returns true if it was able to increment the iterator. Returns false if
892 /// the iterator is already at the end iterator.
893 template <size_t Index> bool incrementHelper() {
894 auto &Begin = std::get<Index>(Begins);
895 auto &End = std::get<Index>(Ends);
896 if (Begin == End)
897 return false;
898
899 ++Begin;
900 return true;
901 }
902
903 /// Increments the first non-end iterator.
904 ///
905 /// It is an error to call this with all iterators at the end.
906 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
907 // Build a sequence of functions to increment each iterator if possible.
908 bool (concat_iterator::*IncrementHelperFns[])() = {
909 &concat_iterator::incrementHelper<Ns>...};
910
911 // Loop over them, and stop as soon as we succeed at incrementing one.
912 for (auto &IncrementHelperFn : IncrementHelperFns)
913 if ((this->*IncrementHelperFn)())
914 return;
915
916 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 916)
;
917 }
918
919 /// Returns null if the specified iterator is at the end. Otherwise,
920 /// dereferences the iterator and returns the address of the resulting
921 /// reference.
922 template <size_t Index> ValueT *getHelper() const {
923 auto &Begin = std::get<Index>(Begins);
924 auto &End = std::get<Index>(Ends);
925 if (Begin == End)
926 return nullptr;
927
928 return &*Begin;
929 }
930
931 /// Finds the first non-end iterator, dereferences, and returns the resulting
932 /// reference.
933 ///
934 /// It is an error to call this with all iterators at the end.
935 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
936 // Build a sequence of functions to get from iterator if possible.
937 ValueT *(concat_iterator::*GetHelperFns[])() const = {
938 &concat_iterator::getHelper<Ns>...};
939
940 // Loop over them, and return the first result we find.
941 for (auto &GetHelperFn : GetHelperFns)
942 if (ValueT *P = (this->*GetHelperFn)())
943 return *P;
944
945 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 945)
;
946 }
947
948public:
949 /// Constructs an iterator from a sequence of ranges.
950 ///
951 /// We need the full range to know how to switch between each of the
952 /// iterators.
953 template <typename... RangeTs>
954 explicit concat_iterator(RangeTs &&... Ranges)
955 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
956
957 using BaseT::operator++;
958
959 concat_iterator &operator++() {
960 increment(std::index_sequence_for<IterTs...>());
961 return *this;
962 }
963
964 ValueT &operator*() const {
965 return get(std::index_sequence_for<IterTs...>());
966 }
967
968 bool operator==(const concat_iterator &RHS) const {
969 return Begins == RHS.Begins && Ends == RHS.Ends;
970 }
971};
972
973namespace detail {
974
975/// Helper to store a sequence of ranges being concatenated and access them.
976///
977/// This is designed to facilitate providing actual storage when temporaries
978/// are passed into the constructor such that we can use it as part of range
979/// based for loops.
980template <typename ValueT, typename... RangeTs> class concat_range {
981public:
982 using iterator =
983 concat_iterator<ValueT,
984 decltype(std::begin(std::declval<RangeTs &>()))...>;
985
986private:
987 std::tuple<RangeTs...> Ranges;
988
989 template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
990 return iterator(std::get<Ns>(Ranges)...);
991 }
992 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
993 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
994 std::end(std::get<Ns>(Ranges)))...);
995 }
996
997public:
998 concat_range(RangeTs &&... Ranges)
999 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1000
1001 iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
1002 iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
1003};
1004
1005} // end namespace detail
1006
1007/// Concatenated range across two or more ranges.
1008///
1009/// The desired value type must be explicitly specified.
1010template <typename ValueT, typename... RangeTs>
1011detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1012 static_assert(sizeof...(RangeTs) > 1,
1013 "Need more than one range to concatenate!");
1014 return detail::concat_range<ValueT, RangeTs...>(
1015 std::forward<RangeTs>(Ranges)...);
1016}
1017
1018/// A utility class used to implement an iterator that contains some base object
1019/// and an index. The iterator moves the index but keeps the base constant.
1020template <typename DerivedT, typename BaseT, typename T,
1021 typename PointerT = T *, typename ReferenceT = T &>
1022class indexed_accessor_iterator
1023 : public llvm::iterator_facade_base<DerivedT,
1024 std::random_access_iterator_tag, T,
1025 std::ptrdiff_t, PointerT, ReferenceT> {
1026public:
1027 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1028 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
;
1029 return index - rhs.index;
1030 }
1031 bool operator==(const indexed_accessor_iterator &rhs) const {
1032 return base == rhs.base && index == rhs.index;
1033 }
1034 bool operator<(const indexed_accessor_iterator &rhs) const {
1035 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return index < rhs.index;
1037 }
1038
1039 DerivedT &operator+=(ptrdiff_t offset) {
1040 this->index += offset;
1041 return static_cast<DerivedT &>(*this);
1042 }
1043 DerivedT &operator-=(ptrdiff_t offset) {
1044 this->index -= offset;
1045 return static_cast<DerivedT &>(*this);
1046 }
1047
1048 /// Returns the current index of the iterator.
1049 ptrdiff_t getIndex() const { return index; }
1050
1051 /// Returns the current base of the iterator.
1052 const BaseT &getBase() const { return base; }
1053
1054protected:
1055 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1056 : base(base), index(index) {}
1057 BaseT base;
1058 ptrdiff_t index;
1059};
1060
1061namespace detail {
1062/// The class represents the base of a range of indexed_accessor_iterators. It
1063/// provides support for many different range functionalities, e.g.
1064/// drop_front/slice/etc.. Derived range classes must implement the following
1065/// static methods:
1066/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1067/// - Dereference an iterator pointing to the base object at the given
1068/// index.
1069/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1070/// - Return a new base that is offset from the provide base by 'index'
1071/// elements.
1072template <typename DerivedT, typename BaseT, typename T,
1073 typename PointerT = T *, typename ReferenceT = T &>
1074class indexed_accessor_range_base {
1075public:
1076 using RangeBaseT =
1077 indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
1078
1079 /// An iterator element of this range.
1080 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1081 PointerT, ReferenceT> {
1082 public:
1083 // Index into this iterator, invoking a static method on the derived type.
1084 ReferenceT operator*() const {
1085 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1086 }
1087
1088 private:
1089 iterator(BaseT owner, ptrdiff_t curIndex)
1090 : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
1091 owner, curIndex) {}
1092
1093 /// Allow access to the constructor.
1094 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1095 ReferenceT>;
1096 };
1097
1098 indexed_accessor_range_base(iterator begin, iterator end)
1099 : base(offset_base(begin.getBase(), begin.getIndex())),
1100 count(end.getIndex() - begin.getIndex()) {}
1101 indexed_accessor_range_base(const iterator_range<iterator> &range)
1102 : indexed_accessor_range_base(range.begin(), range.end()) {}
1103 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1104 : base(base), count(count) {}
1105
1106 iterator begin() const { return iterator(base, 0); }
1107 iterator end() const { return iterator(base, count); }
1108 ReferenceT operator[](size_t Index) const {
1109 assert(Index < size() && "invalid index for value range")(static_cast <bool> (Index < size() && "invalid index for value range"
) ? void (0) : __assert_fail ("Index < size() && \"invalid index for value range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1109, __extension__ __PRETTY_FUNCTION__))
;
1110 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1111 }
1112 ReferenceT front() const {
1113 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 return (*this)[0];
1115 }
1116 ReferenceT back() const {
1117 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 return (*this)[size() - 1];
1119 }
1120
1121 /// Compare this range with another.
1122 template <typename OtherT> bool operator==(const OtherT &other) const {
1123 return size() ==
1124 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1125 std::equal(begin(), end(), other.begin());
1126 }
1127 template <typename OtherT> bool operator!=(const OtherT &other) const {
1128 return !(*this == other);
1129 }
1130
1131 /// Return the size of this range.
1132 size_t size() const { return count; }
1133
1134 /// Return if the range is empty.
1135 bool empty() const { return size() == 0; }
1136
1137 /// Drop the first N elements, and keep M elements.
1138 DerivedT slice(size_t n, size_t m) const {
1139 assert(n + m <= size() && "invalid size specifiers")(static_cast <bool> (n + m <= size() && "invalid size specifiers"
) ? void (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1139, __extension__ __PRETTY_FUNCTION__))
;
1140 return DerivedT(offset_base(base, n), m);
1141 }
1142
1143 /// Drop the first n elements.
1144 DerivedT drop_front(size_t n = 1) const {
1145 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1145, __extension__ __PRETTY_FUNCTION__))
;
1146 return slice(n, size() - n);
1147 }
1148 /// Drop the last n elements.
1149 DerivedT drop_back(size_t n = 1) const {
1150 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1150, __extension__ __PRETTY_FUNCTION__))
;
1151 return DerivedT(base, size() - n);
1152 }
1153
1154 /// Take the first n elements.
1155 DerivedT take_front(size_t n = 1) const {
1156 return n < size() ? drop_back(size() - n)
1157 : static_cast<const DerivedT &>(*this);
1158 }
1159
1160 /// Take the last n elements.
1161 DerivedT take_back(size_t n = 1) const {
1162 return n < size() ? drop_front(size() - n)
1163 : static_cast<const DerivedT &>(*this);
1164 }
1165
1166 /// Allow conversion to any type accepting an iterator_range.
1167 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1168 RangeT, iterator_range<iterator>>::value>>
1169 operator RangeT() const {
1170 return RangeT(iterator_range<iterator>(*this));
1171 }
1172
1173 /// Returns the base of this range.
1174 const BaseT &getBase() const { return base; }
1175
1176private:
1177 /// Offset the given base by the given amount.
1178 static BaseT offset_base(const BaseT &base, size_t n) {
1179 return n == 0 ? base : DerivedT::offset_base(base, n);
1180 }
1181
1182protected:
1183 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1184 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1185 indexed_accessor_range_base &
1186 operator=(const indexed_accessor_range_base &) = default;
1187
1188 /// The base that owns the provided range of values.
1189 BaseT base;
1190 /// The size from the owning range.
1191 ptrdiff_t count;
1192};
1193} // end namespace detail
1194
1195/// This class provides an implementation of a range of
1196/// indexed_accessor_iterators where the base is not indexable. Ranges with
1197/// bases that are offsetable should derive from indexed_accessor_range_base
1198/// instead. Derived range classes are expected to implement the following
1199/// static method:
1200/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1201/// - Dereference an iterator pointing to a parent base at the given index.
1202template <typename DerivedT, typename BaseT, typename T,
1203 typename PointerT = T *, typename ReferenceT = T &>
1204class indexed_accessor_range
1205 : public detail::indexed_accessor_range_base<
1206 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1207public:
1208 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1209 : detail::indexed_accessor_range_base<
1210 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1211 std::make_pair(base, startIndex), count) {}
1212 using detail::indexed_accessor_range_base<
1213 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1214 ReferenceT>::indexed_accessor_range_base;
1215
1216 /// Returns the current base of the range.
1217 const BaseT &getBase() const { return this->base.first; }
1218
1219 /// Returns the current start index of the range.
1220 ptrdiff_t getStartIndex() const { return this->base.second; }
1221
1222 /// See `detail::indexed_accessor_range_base` for details.
1223 static std::pair<BaseT, ptrdiff_t>
1224 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1225 // We encode the internal base as a pair of the derived base and a start
1226 // index into the derived base.
1227 return std::make_pair(base.first, base.second + index);
1228 }
1229 /// See `detail::indexed_accessor_range_base` for details.
1230 static ReferenceT
1231 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1232 ptrdiff_t index) {
1233 return DerivedT::dereference(base.first, base.second + index);
1234 }
1235};
1236
1237/// Given a container of pairs, return a range over the first elements.
1238template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1239 return llvm::map_range(
1240 std::forward<ContainerTy>(c),
1241 [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
1242 return elt.first;
1243 });
1244}
1245
1246/// Given a container of pairs, return a range over the second elements.
1247template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1248 return llvm::map_range(
1249 std::forward<ContainerTy>(c),
1250 [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
1251 return elt.second;
1252 });
1253}
1254
1255//===----------------------------------------------------------------------===//
1256// Extra additions to <utility>
1257//===----------------------------------------------------------------------===//
1258
1259/// Function object to check whether the first component of a std::pair
1260/// compares less than the first component of another std::pair.
1261struct less_first {
1262 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1263 return lhs.first < rhs.first;
1264 }
1265};
1266
1267/// Function object to check whether the second component of a std::pair
1268/// compares less than the second component of another std::pair.
1269struct less_second {
1270 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1271 return lhs.second < rhs.second;
1272 }
1273};
1274
1275/// \brief Function object to apply a binary function to the first component of
1276/// a std::pair.
1277template<typename FuncTy>
1278struct on_first {
1279 FuncTy func;
1280
1281 template <typename T>
1282 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1283 return func(lhs.first, rhs.first);
1284 }
1285};
1286
1287/// Utility type to build an inheritance chain that makes it easy to rank
1288/// overload candidates.
1289template <int N> struct rank : rank<N - 1> {};
1290template <> struct rank<0> {};
1291
1292/// traits class for checking whether type T is one of any of the given
1293/// types in the variadic list.
1294template <typename T, typename... Ts>
1295using is_one_of = disjunction<std::is_same<T, Ts>...>;
1296
1297/// traits class for checking whether type T is a base class for all
1298/// the given types in the variadic list.
1299template <typename T, typename... Ts>
1300using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
1301
1302//===----------------------------------------------------------------------===//
1303// Extra additions for arrays
1304//===----------------------------------------------------------------------===//
1305
1306// We have a copy here so that LLVM behaves the same when using different
1307// standard libraries.
1308template <class Iterator, class RNG>
1309void shuffle(Iterator first, Iterator last, RNG &&g) {
1310 // It would be better to use a std::uniform_int_distribution,
1311 // but that would be stdlib dependent.
1312 typedef
1313 typename std::iterator_traits<Iterator>::difference_type difference_type;
1314 for (auto size = last - first; size > 1; ++first, (void)--size) {
1315 difference_type offset = g() % size;
1316 // Avoid self-assignment due to incorrect assertions in libstdc++
1317 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1318 if (offset != difference_type(0))
1319 std::iter_swap(first, first + offset);
1320 }
1321}
1322
1323/// Find the length of an array.
1324template <class T, std::size_t N>
1325constexpr inline size_t array_lengthof(T (&)[N]) {
1326 return N;
1327}
1328
1329/// Adapt std::less<T> for array_pod_sort.
1330template<typename T>
1331inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1332 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1333 *reinterpret_cast<const T*>(P2)))
1334 return -1;
1335 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1336 *reinterpret_cast<const T*>(P1)))
1337 return 1;
1338 return 0;
1339}
1340
1341/// get_array_pod_sort_comparator - This is an internal helper function used to
1342/// get type deduction of T right.
1343template<typename T>
1344inline int (*get_array_pod_sort_comparator(const T &))
1345 (const void*, const void*) {
1346 return array_pod_sort_comparator<T>;
1347}
1348
1349#ifdef EXPENSIVE_CHECKS
1350namespace detail {
1351
1352inline unsigned presortShuffleEntropy() {
1353 static unsigned Result(std::random_device{}());
1354 return Result;
1355}
1356
1357template <class IteratorTy>
1358inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1359 std::mt19937 Generator(presortShuffleEntropy());
1360 llvm::shuffle(Start, End, Generator);
1361}
1362
1363} // end namespace detail
1364#endif
1365
1366/// array_pod_sort - This sorts an array with the specified start and end
1367/// extent. This is just like std::sort, except that it calls qsort instead of
1368/// using an inlined template. qsort is slightly slower than std::sort, but
1369/// most sorts are not performance critical in LLVM and std::sort has to be
1370/// template instantiated for each type, leading to significant measured code
1371/// bloat. This function should generally be used instead of std::sort where
1372/// possible.
1373///
1374/// This function assumes that you have simple POD-like types that can be
1375/// compared with std::less and can be moved with memcpy. If this isn't true,
1376/// you should use std::sort.
1377///
1378/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1379/// default to std::less.
1380template<class IteratorTy>
1381inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1382 // Don't inefficiently call qsort with one element or trigger undefined
1383 // behavior with an empty sequence.
1384 auto NElts = End - Start;
1385 if (NElts <= 1) return;
1386#ifdef EXPENSIVE_CHECKS
1387 detail::presortShuffle<IteratorTy>(Start, End);
1388#endif
1389 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1390}
1391
1392template <class IteratorTy>
1393inline void array_pod_sort(
1394 IteratorTy Start, IteratorTy End,
1395 int (*Compare)(
1396 const typename std::iterator_traits<IteratorTy>::value_type *,
1397 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1398 // Don't inefficiently call qsort with one element or trigger undefined
1399 // behavior with an empty sequence.
1400 auto NElts = End - Start;
1401 if (NElts <= 1) return;
1402#ifdef EXPENSIVE_CHECKS
1403 detail::presortShuffle<IteratorTy>(Start, End);
1404#endif
1405 qsort(&*Start, NElts, sizeof(*Start),
1406 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1407}
1408
1409namespace detail {
1410template <typename T>
1411// We can use qsort if the iterator type is a pointer and the underlying value
1412// is trivially copyable.
1413using sort_trivially_copyable = conjunction<
1414 std::is_pointer<T>,
1415 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1416} // namespace detail
1417
1418// Provide wrappers to std::sort which shuffle the elements before sorting
1419// to help uncover non-deterministic behavior (PR35135).
1420template <typename IteratorTy,
1421 std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
1422 int> = 0>
1423inline void sort(IteratorTy Start, IteratorTy End) {
1424#ifdef EXPENSIVE_CHECKS
1425 detail::presortShuffle<IteratorTy>(Start, End);
1426#endif
1427 std::sort(Start, End);
1428}
1429
1430// Forward trivially copyable types to array_pod_sort. This avoids a large
1431// amount of code bloat for a minor performance hit.
1432template <typename IteratorTy,
1433 std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
1434 int> = 0>
1435inline void sort(IteratorTy Start, IteratorTy End) {
1436 array_pod_sort(Start, End);
1437}
1438
1439template <typename Container> inline void sort(Container &&C) {
1440 llvm::sort(adl_begin(C), adl_end(C));
1441}
1442
1443template <typename IteratorTy, typename Compare>
1444inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1445#ifdef EXPENSIVE_CHECKS
1446 detail::presortShuffle<IteratorTy>(Start, End);
1447#endif
1448 std::sort(Start, End, Comp);
1449}
1450
1451template <typename Container, typename Compare>
1452inline void sort(Container &&C, Compare Comp) {
1453 llvm::sort(adl_begin(C), adl_end(C), Comp);
1454}
1455
1456//===----------------------------------------------------------------------===//
1457// Extra additions to <algorithm>
1458//===----------------------------------------------------------------------===//
1459
1460/// Get the size of a range. This is a wrapper function around std::distance
1461/// which is only enabled when the operation is O(1).
1462template <typename R>
1463auto size(R &&Range,
1464 std::enable_if_t<
1465 std::is_base_of<std::random_access_iterator_tag,
1466 typename std::iterator_traits<decltype(
1467 Range.begin())>::iterator_category>::value,
1468 void> * = nullptr) {
1469 return std::distance(Range.begin(), Range.end());
1470}
1471
1472/// Provide wrappers to std::for_each which take ranges instead of having to
1473/// pass begin/end explicitly.
1474template <typename R, typename UnaryFunction>
1475UnaryFunction for_each(R &&Range, UnaryFunction F) {
1476 return std::for_each(adl_begin(Range), adl_end(Range), F);
1477}
1478
1479/// Provide wrappers to std::all_of which take ranges instead of having to pass
1480/// begin/end explicitly.
1481template <typename R, typename UnaryPredicate>
1482bool all_of(R &&Range, UnaryPredicate P) {
1483 return std::all_of(adl_begin(Range), adl_end(Range), P);
1484}
1485
1486/// Provide wrappers to std::any_of which take ranges instead of having to pass
1487/// begin/end explicitly.
1488template <typename R, typename UnaryPredicate>
1489bool any_of(R &&Range, UnaryPredicate P) {
1490 return std::any_of(adl_begin(Range), adl_end(Range), P);
69
Calling 'any_of<llvm::Value::use_iterator_impl<llvm::Use>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
72
Returning from 'any_of<llvm::Value::use_iterator_impl<llvm::Use>, (lambda at /build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp:1373:35)>'
73
Returning zero, which participates in a condition later
1491}
1492
1493/// Provide wrappers to std::none_of which take ranges instead of having to pass
1494/// begin/end explicitly.
1495template <typename R, typename UnaryPredicate>
1496bool none_of(R &&Range, UnaryPredicate P) {
1497 return std::none_of(adl_begin(Range), adl_end(Range), P);
1498}
1499
1500/// Provide wrappers to std::find which take ranges instead of having to pass
1501/// begin/end explicitly.
1502template <typename R, typename T> auto find(R &&Range, const T &Val) {
1503 return std::find(adl_begin(Range), adl_end(Range), Val);
1504}
1505
1506/// Provide wrappers to std::find_if which take ranges instead of having to pass
1507/// begin/end explicitly.
1508template <typename R, typename UnaryPredicate>
1509auto find_if(R &&Range, UnaryPredicate P) {
1510 return std::find_if(adl_begin(Range), adl_end(Range), P);
1511}
1512
1513template <typename R, typename UnaryPredicate>
1514auto find_if_not(R &&Range, UnaryPredicate P) {
1515 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1516}
1517
1518/// Provide wrappers to std::remove_if which take ranges instead of having to
1519/// pass begin/end explicitly.
1520template <typename R, typename UnaryPredicate>
1521auto remove_if(R &&Range, UnaryPredicate P) {
1522 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1523}
1524
1525/// Provide wrappers to std::copy_if which take ranges instead of having to
1526/// pass begin/end explicitly.
1527template <typename R, typename OutputIt, typename UnaryPredicate>
1528OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1529 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1530}
1531
1532template <typename R, typename OutputIt>
1533OutputIt copy(R &&Range, OutputIt Out) {
1534 return std::copy(adl_begin(Range), adl_end(Range), Out);
1535}
1536
1537/// Provide wrappers to std::move which take ranges instead of having to
1538/// pass begin/end explicitly.
1539template <typename R, typename OutputIt>
1540OutputIt move(R &&Range, OutputIt Out) {
1541 return std::move(adl_begin(Range), adl_end(Range), Out);
1542}
1543
1544/// Wrapper function around std::find to detect if an element exists
1545/// in a container.
1546template <typename R, typename E>
1547bool is_contained(R &&Range, const E &Element) {
1548 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
1549}
1550
1551/// Wrapper function around std::is_sorted to check if elements in a range \p R
1552/// are sorted with respect to a comparator \p C.
1553template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1554 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1555}
1556
1557/// Wrapper function around std::is_sorted to check if elements in a range \p R
1558/// are sorted in non-descending order.
1559template <typename R> bool is_sorted(R &&Range) {
1560 return std::is_sorted(adl_begin(Range), adl_end(Range));
1561}
1562
1563/// Wrapper function around std::count to count the number of times an element
1564/// \p Element occurs in the given range \p Range.
1565template <typename R, typename E> auto count(R &&Range, const E &Element) {
1566 return std::count(adl_begin(Range), adl_end(Range), Element);
1567}
1568
1569/// Wrapper function around std::count_if to count the number of times an
1570/// element satisfying a given predicate occurs in a range.
1571template <typename R, typename UnaryPredicate>
1572auto count_if(R &&Range, UnaryPredicate P) {
1573 return std::count_if(adl_begin(Range), adl_end(Range), P);
1574}
1575
1576/// Wrapper function around std::transform to apply a function to a range and
1577/// store the result elsewhere.
1578template <typename R, typename OutputIt, typename UnaryFunction>
1579OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1580 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1581}
1582
1583/// Provide wrappers to std::partition which take ranges instead of having to
1584/// pass begin/end explicitly.
1585template <typename R, typename UnaryPredicate>
1586auto partition(R &&Range, UnaryPredicate P) {
1587 return std::partition(adl_begin(Range), adl_end(Range), P);
1588}
1589
1590/// Provide wrappers to std::lower_bound which take ranges instead of having to
1591/// pass begin/end explicitly.
1592template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1593 return std::lower_bound(adl_begin(Range), adl_end(Range),
1594 std::forward<T>(Value));
1595}
1596
1597template <typename R, typename T, typename Compare>
1598auto lower_bound(R &&Range, T &&Value, Compare C) {
1599 return std::lower_bound(adl_begin(Range), adl_end(Range),
1600 std::forward<T>(Value), C);
1601}
1602
1603/// Provide wrappers to std::upper_bound which take ranges instead of having to
1604/// pass begin/end explicitly.
1605template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1606 return std::upper_bound(adl_begin(Range), adl_end(Range),
1607 std::forward<T>(Value));
1608}
1609
1610template <typename R, typename T, typename Compare>
1611auto upper_bound(R &&Range, T &&Value, Compare C) {
1612 return std::upper_bound(adl_begin(Range), adl_end(Range),
1613 std::forward<T>(Value), C);
1614}
1615
1616template <typename R>
1617void stable_sort(R &&Range) {
1618 std::stable_sort(adl_begin(Range), adl_end(Range));
1619}
1620
1621template <typename R, typename Compare>
1622void stable_sort(R &&Range, Compare C) {
1623 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1624}
1625
1626/// Binary search for the first iterator in a range where a predicate is false.
1627/// Requires that C is always true below some limit, and always false above it.
1628template <typename R, typename Predicate,
1629 typename Val = decltype(*adl_begin(std::declval<R>()))>
1630auto partition_point(R &&Range, Predicate P) {
1631 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1632}
1633
1634template<typename Range, typename Predicate>
1635auto unique(Range &&R, Predicate P) {
1636 return std::unique(adl_begin(R), adl_end(R), P);
1637}
1638
1639/// Wrapper function around std::equal to detect if all elements
1640/// in a container are same.
1641template <typename R>
1642bool is_splat(R &&Range) {
1643 size_t range_size = size(Range);
1644 return range_size != 0 && (range_size == 1 ||
1645 std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
1646}
1647
1648/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1649/// `erase_if` which is equivalent to:
1650///
1651/// C.erase(remove_if(C, pred), C.end());
1652///
1653/// This version works for any container with an erase method call accepting
1654/// two iterators.
1655template <typename Container, typename UnaryPredicate>
1656void erase_if(Container &C, UnaryPredicate P) {
1657 C.erase(remove_if(C, P), C.end());
1658}
1659
1660/// Wrapper function to remove a value from a container:
1661///
1662/// C.erase(remove(C.begin(), C.end(), V), C.end());
1663template <typename Container, typename ValueType>
1664void erase_value(Container &C, ValueType V) {
1665 C.erase(std::remove(C.begin(), C.end(), V), C.end());
1666}
1667
1668/// Wrapper function to append a range to a container.
1669///
1670/// C.insert(C.end(), R.begin(), R.end());
1671template <typename Container, typename Range>
1672inline void append_range(Container &C, Range &&R) {
1673 C.insert(C.end(), R.begin(), R.end());
1674}
1675
1676/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1677/// the range [ValIt, ValEnd) (which is not from the same container).
1678template<typename Container, typename RandomAccessIterator>
1679void replace(Container &Cont, typename Container::iterator ContIt,
1680 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
1681 RandomAccessIterator ValEnd) {
1682 while (true) {
1683 if (ValIt == ValEnd) {
1684 Cont.erase(ContIt, ContEnd);
1685 return;
1686 } else if (ContIt == ContEnd) {
1687 Cont.insert(ContIt, ValIt, ValEnd);
1688 return;
1689 }
1690 *ContIt++ = *ValIt++;
1691 }
1692}
1693
1694/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1695/// the range R.
1696template<typename Container, typename Range = std::initializer_list<
1697 typename Container::value_type>>
1698void replace(Container &Cont, typename Container::iterator ContIt,
1699 typename Container::iterator ContEnd, Range R) {
1700 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
1701}
1702
1703/// An STL-style algorithm similar to std::for_each that applies a second
1704/// functor between every pair of elements.
1705///
1706/// This provides the control flow logic to, for example, print a
1707/// comma-separated list:
1708/// \code
1709/// interleave(names.begin(), names.end(),
1710/// [&](StringRef name) { os << name; },
1711/// [&] { os << ", "; });
1712/// \endcode
1713template <typename ForwardIterator, typename UnaryFunctor,
1714 typename NullaryFunctor,
1715 typename = typename std::enable_if<
1716 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1717 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1718inline void interleave(ForwardIterator begin, ForwardIterator end,
1719 UnaryFunctor each_fn, NullaryFunctor between_fn) {
1720 if (begin == end)
1721 return;
1722 each_fn(*begin);
1723 ++begin;
1724 for (; begin != end; ++begin) {
1725 between_fn();
1726 each_fn(*begin);
1727 }
1728}
1729
1730template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
1731 typename = typename std::enable_if<
1732 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1733 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1734inline void interleave(const Container &c, UnaryFunctor each_fn,
1735 NullaryFunctor between_fn) {
1736 interleave(c.begin(), c.end(), each_fn, between_fn);
1737}
1738
1739/// Overload of interleave for the common case of string separator.
1740template <typename Container, typename UnaryFunctor, typename StreamT,
1741 typename T = detail::ValueOfRange<Container>>
1742inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
1743 const StringRef &separator) {
1744 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
1745}
1746template <typename Container, typename StreamT,
1747 typename T = detail::ValueOfRange<Container>>
1748inline void interleave(const Container &c, StreamT &os,
1749 const StringRef &separator) {
1750 interleave(
1751 c, os, [&](const T &a) { os << a; }, separator);
1752}
1753
1754template <typename Container, typename UnaryFunctor, typename StreamT,
1755 typename T = detail::ValueOfRange<Container>>
1756inline void interleaveComma(const Container &c, StreamT &os,
1757 UnaryFunctor each_fn) {
1758 interleave(c, os, each_fn, ", ");
1759}
1760template <typename Container, typename StreamT,
1761 typename T = detail::ValueOfRange<Container>>
1762inline void interleaveComma(const Container &c, StreamT &os) {
1763 interleaveComma(c, os, [&](const T &a) { os << a; });
1764}
1765
1766//===----------------------------------------------------------------------===//
1767// Extra additions to <memory>
1768//===----------------------------------------------------------------------===//
1769
1770struct FreeDeleter {
1771 void operator()(void* v) {
1772 ::free(v);
1773 }
1774};
1775
1776template<typename First, typename Second>
1777struct pair_hash {
1778 size_t operator()(const std::pair<First, Second> &P) const {
1779 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
1780 }
1781};
1782
1783/// Binary functor that adapts to any other binary functor after dereferencing
1784/// operands.
1785template <typename T> struct deref {
1786 T func;
1787
1788 // Could be further improved to cope with non-derivable functors and
1789 // non-binary functors (should be a variadic template member function
1790 // operator()).
1791 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
1792 assert(lhs)(static_cast <bool> (lhs) ? void (0) : __assert_fail ("lhs"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1792, __extension__ __PRETTY_FUNCTION__))
;
1793 assert(rhs)(static_cast <bool> (rhs) ? void (0) : __assert_fail ("rhs"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1793, __extension__ __PRETTY_FUNCTION__))
;
1794 return func(*lhs, *rhs);
1795 }
1796};
1797
1798namespace detail {
1799
1800template <typename R> class enumerator_iter;
1801
1802template <typename R> struct result_pair {
1803 using value_reference =
1804 typename std::iterator_traits<IterOfRange<R>>::reference;
1805
1806 friend class enumerator_iter<R>;
1807
1808 result_pair() = default;
1809 result_pair(std::size_t Index, IterOfRange<R> Iter)
1810 : Index(Index), Iter(Iter) {}
1811
1812 result_pair(const result_pair<R> &Other)
1813 : Index(Other.Index), Iter(Other.Iter) {}
1814 result_pair &operator=(const result_pair &Other) {
1815 Index = Other.Index;
1816 Iter = Other.Iter;
1817 return *this;
1818 }
1819
1820 std::size_t index() const { return Index; }
1821 const value_reference value() const { return *Iter; }
1822 value_reference value() { return *Iter; }
1823
1824private:
1825 std::size_t Index = std::numeric_limits<std::size_t>::max();
1826 IterOfRange<R> Iter;
1827};
1828
1829template <typename R>
1830class enumerator_iter
1831 : public iterator_facade_base<
1832 enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
1833 typename std::iterator_traits<IterOfRange<R>>::difference_type,
1834 typename std::iterator_traits<IterOfRange<R>>::pointer,
1835 typename std::iterator_traits<IterOfRange<R>>::reference> {
1836 using result_type = result_pair<R>;
1837
1838public:
1839 explicit enumerator_iter(IterOfRange<R> EndIter)
1840 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
1841
1842 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
1843 : Result(Index, Iter) {}
1844
1845 result_type &operator*() { return Result; }
1846 const result_type &operator*() const { return Result; }
1847
1848 enumerator_iter &operator++() {
1849 assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast <bool> (Result.Index != std::numeric_limits
<size_t>::max()) ? void (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1849, __extension__ __PRETTY_FUNCTION__))
;
1850 ++Result.Iter;
1851 ++Result.Index;
1852 return *this;
1853 }
1854
1855 bool operator==(const enumerator_iter &RHS) const {
1856 // Don't compare indices here, only iterators. It's possible for an end
1857 // iterator to have different indices depending on whether it was created
1858 // by calling std::end() versus incrementing a valid iterator.
1859 return Result.Iter == RHS.Result.Iter;
1860 }
1861
1862 enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
1863 enumerator_iter &operator=(const enumerator_iter &Other) {
1864 Result = Other.Result;
1865 return *this;
1866 }
1867
1868private:
1869 result_type Result;
1870};
1871
1872template <typename R> class enumerator {
1873public:
1874 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
1875
1876 enumerator_iter<R> begin() {
1877 return enumerator_iter<R>(0, std::begin(TheRange));
1878 }
1879
1880 enumerator_iter<R> end() {
1881 return enumerator_iter<R>(std::end(TheRange));
1882 }
1883
1884private:
1885 R TheRange;
1886};
1887
1888} // end namespace detail
1889
1890/// Given an input range, returns a new range whose values are are pair (A,B)
1891/// such that A is the 0-based index of the item in the sequence, and B is
1892/// the value from the original sequence. Example:
1893///
1894/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
1895/// for (auto X : enumerate(Items)) {
1896/// printf("Item %d - %c\n", X.index(), X.value());
1897/// }
1898///
1899/// Output:
1900/// Item 0 - A
1901/// Item 1 - B
1902/// Item 2 - C
1903/// Item 3 - D
1904///
1905template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
1906 return detail::enumerator<R>(std::forward<R>(TheRange));
1907}
1908
1909namespace detail {
1910
1911template <typename F, typename Tuple, std::size_t... I>
1912decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
1913 return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
1914}
1915
1916} // end namespace detail
1917
1918/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
1919/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
1920/// return the result.
1921template <typename F, typename Tuple>
1922decltype(auto) apply_tuple(F &&f, Tuple &&t) {
1923 using Indices = std::make_index_sequence<
1924 std::tuple_size<typename std::decay<Tuple>::type>::value>;
1925
1926 return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
1927 Indices{});
1928}
1929
1930/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
1931/// time. Not meant for use with random-access iterators.
1932/// Can optionally take a predicate to filter lazily some items.
1933template <typename IterTy,
1934 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1935bool hasNItems(
1936 IterTy &&Begin, IterTy &&End, unsigned N,
1937 Pred &&ShouldBeCounted =
1938 [](const decltype(*std::declval<IterTy>()) &) { return true; },
1939 std::enable_if_t<
1940 !std::is_base_of<std::random_access_iterator_tag,
1941 typename std::iterator_traits<std::remove_reference_t<
1942 decltype(Begin)>>::iterator_category>::value,
1943 void> * = nullptr) {
1944 for (; N; ++Begin) {
1945 if (Begin == End)
1946 return false; // Too few.
1947 N -= ShouldBeCounted(*Begin);
1948 }
1949 for (; Begin != End; ++Begin)
1950 if (ShouldBeCounted(*Begin))
1951 return false; // Too many.
1952 return true;
1953}
1954
1955/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
1956/// time. Not meant for use with random-access iterators.
1957/// Can optionally take a predicate to lazily filter some items.
1958template <typename IterTy,
1959 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1960bool hasNItemsOrMore(
1961 IterTy &&Begin, IterTy &&End, unsigned N,
1962 Pred &&ShouldBeCounted =
1963 [](const decltype(*std::declval<IterTy>()) &) { return true; },
1964 std::enable_if_t<
1965 !std::is_base_of<std::random_access_iterator_tag,
1966 typename std::iterator_traits<std::remove_reference_t<
1967 decltype(Begin)>>::iterator_category>::value,
1968 void> * = nullptr) {
1969 for (; N; ++Begin) {
1970 if (Begin == End)
1971 return false; // Too few.
1972 N -= ShouldBeCounted(*Begin);
1973 }
1974 return true;
1975}
1976
1977/// Returns true if the sequence [Begin, End) has N or less items. Can
1978/// optionally take a predicate to lazily filter some items.
1979template <typename IterTy,
1980 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1981bool hasNItemsOrLess(
1982 IterTy &&Begin, IterTy &&End, unsigned N,
1983 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
1984 return true;
1985 }) {
1986 assert(N != std::numeric_limits<unsigned>::max())(static_cast <bool> (N != std::numeric_limits<unsigned
>::max()) ? void (0) : __assert_fail ("N != std::numeric_limits<unsigned>::max()"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/STLExtras.h"
, 1986, __extension__ __PRETTY_FUNCTION__))
;
1987 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
1988}
1989
1990/// Returns true if the given container has exactly N items
1991template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
1992 return hasNItems(std::begin(C), std::end(C), N);
1993}
1994
1995/// Returns true if the given container has N or more items
1996template <typename ContainerTy>
1997bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
1998 return hasNItemsOrMore(std::begin(C), std::end(C), N);
1999}
2000
2001/// Returns true if the given container has N or less items
2002template <typename ContainerTy>
2003bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2004 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2005}
2006
2007/// Returns a raw pointer that represents the same address as the argument.
2008///
2009/// This implementation can be removed once we move to C++20 where it's defined
2010/// as std::to_address().
2011///
2012/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2013/// not been implemented.
2014template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2015template <class T> constexpr T *to_address(T *P) { return P; }
2016
2017} // end namespace llvm
2018
2019#endif // LLVM_ADT_STLEXTRAS_H

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/stl_algo.h

1// Algorithm implementation -*- C++ -*-
2
3// Copyright (C) 2001-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1994
28 * Hewlett-Packard Company
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Hewlett-Packard Company makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1996
40 * Silicon Graphics Computer Systems, Inc.
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Silicon Graphics makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 */
50
51/** @file bits/stl_algo.h
52 * This is an internal header file, included by other library headers.
53 * Do not attempt to use it directly. @headername{algorithm}
54 */
55
56#ifndef _STL_ALGO_H1
57#define _STL_ALGO_H1 1
58
59#include <cstdlib> // for rand
60#include <bits/algorithmfwd.h>
61#include <bits/stl_heap.h>
62#include <bits/stl_tempbuf.h> // for _Temporary_buffer
63#include <bits/predefined_ops.h>
64
65#if __cplusplus201402L >= 201103L
66#include <bits/uniform_int_dist.h>
67#endif
68
69// See concept_check.h for the __glibcxx_*_requires macros.
70
71namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
72{
73_GLIBCXX_BEGIN_NAMESPACE_VERSION
74
75 /// Swaps the median value of *__a, *__b and *__c under __comp to *__result
76 template<typename _Iterator, typename _Compare>
77 _GLIBCXX20_CONSTEXPR
78 void
79 __move_median_to_first(_Iterator __result,_Iterator __a, _Iterator __b,
80 _Iterator __c, _Compare __comp)
81 {
82 if (__comp(__a, __b))
83 {
84 if (__comp(__b, __c))
85 std::iter_swap(__result, __b);
86 else if (__comp(__a, __c))
87 std::iter_swap(__result, __c);
88 else
89 std::iter_swap(__result, __a);
90 }
91 else if (__comp(__a, __c))
92 std::iter_swap(__result, __a);
93 else if (__comp(__b, __c))
94 std::iter_swap(__result, __c);
95 else
96 std::iter_swap(__result, __b);
97 }
98
99 /// Provided for stable_partition to use.
100 template<typename _InputIterator, typename _Predicate>
101 _GLIBCXX20_CONSTEXPR
102 inline _InputIterator
103 __find_if_not(_InputIterator __first, _InputIterator __last,
104 _Predicate __pred)
105 {
106 return std::__find_if(__first, __last,
107 __gnu_cxx::__ops::__negate(__pred),
108 std::__iterator_category(__first));
109 }
110
111 /// Like find_if_not(), but uses and updates a count of the
112 /// remaining range length instead of comparing against an end
113 /// iterator.
114 template<typename _InputIterator, typename _Predicate, typename _Distance>
115 _GLIBCXX20_CONSTEXPR
116 _InputIterator
117 __find_if_not_n(_InputIterator __first, _Distance& __len, _Predicate __pred)
118 {
119 for (; __len; --__len, (void) ++__first)
120 if (!__pred(__first))
121 break;
122 return __first;
123 }
124
125 // set_difference
126 // set_intersection
127 // set_symmetric_difference
128 // set_union
129 // for_each
130 // find
131 // find_if
132 // find_first_of
133 // adjacent_find
134 // count
135 // count_if
136 // search
137
138 template<typename _ForwardIterator1, typename _ForwardIterator2,
139 typename _BinaryPredicate>
140 _GLIBCXX20_CONSTEXPR
141 _ForwardIterator1
142 __search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
143 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
144 _BinaryPredicate __predicate)
145 {
146 // Test for empty ranges
147 if (__first1 == __last1 || __first2 == __last2)
148 return __first1;
149
150 // Test for a pattern of length 1.
151 _ForwardIterator2 __p1(__first2);
152 if (++__p1 == __last2)
153 return std::__find_if(__first1, __last1,
154 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
155
156 // General case.
157 _ForwardIterator1 __current = __first1;
158
159 for (;;)
160 {
161 __first1 =
162 std::__find_if(__first1, __last1,
163 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
164
165 if (__first1 == __last1)
166 return __last1;
167
168 _ForwardIterator2 __p = __p1;
169 __current = __first1;
170 if (++__current == __last1)
171 return __last1;
172
173 while (__predicate(__current, __p))
174 {
175 if (++__p == __last2)
176 return __first1;
177 if (++__current == __last1)
178 return __last1;
179 }
180 ++__first1;
181 }
182 return __first1;
183 }
184
185 // search_n
186
187 /**
188 * This is an helper function for search_n overloaded for forward iterators.
189 */
190 template<typename _ForwardIterator, typename _Integer,
191 typename _UnaryPredicate>
192 _GLIBCXX20_CONSTEXPR
193 _ForwardIterator
194 __search_n_aux(_ForwardIterator __first, _ForwardIterator __last,
195 _Integer __count, _UnaryPredicate __unary_pred,
196 std::forward_iterator_tag)
197 {
198 __first = std::__find_if(__first, __last, __unary_pred);
199 while (__first != __last)
200 {
201 typename iterator_traits<_ForwardIterator>::difference_type
202 __n = __count;
203 _ForwardIterator __i = __first;
204 ++__i;
205 while (__i != __last && __n != 1 && __unary_pred(__i))
206 {
207 ++__i;
208 --__n;
209 }
210 if (__n == 1)
211 return __first;
212 if (__i == __last)
213 return __last;
214 __first = std::__find_if(++__i, __last, __unary_pred);
215 }
216 return __last;
217 }
218
219 /**
220 * This is an helper function for search_n overloaded for random access
221 * iterators.
222 */
223 template<typename _RandomAccessIter, typename _Integer,
224 typename _UnaryPredicate>
225 _GLIBCXX20_CONSTEXPR
226 _RandomAccessIter
227 __search_n_aux(_RandomAccessIter __first, _RandomAccessIter __last,
228 _Integer __count, _UnaryPredicate __unary_pred,
229 std::random_access_iterator_tag)
230 {
231 typedef typename std::iterator_traits<_RandomAccessIter>::difference_type
232 _DistanceType;
233
234 _DistanceType __tailSize = __last - __first;
235 _DistanceType __remainder = __count;
236
237 while (__remainder <= __tailSize) // the main loop...
238 {
239 __first += __remainder;
240 __tailSize -= __remainder;
241 // __first here is always pointing to one past the last element of
242 // next possible match.
243 _RandomAccessIter __backTrack = __first;
244 while (__unary_pred(--__backTrack))
245 {
246 if (--__remainder == 0)
247 return (__first - __count); // Success
248 }
249 __remainder = __count + 1 - (__first - __backTrack);
250 }
251 return __last; // Failure
252 }
253
254 template<typename _ForwardIterator, typename _Integer,
255 typename _UnaryPredicate>
256 _GLIBCXX20_CONSTEXPR
257 _ForwardIterator
258 __search_n(_ForwardIterator __first, _ForwardIterator __last,
259 _Integer __count,
260 _UnaryPredicate __unary_pred)
261 {
262 if (__count <= 0)
263 return __first;
264
265 if (__count == 1)
266 return std::__find_if(__first, __last, __unary_pred);
267
268 return std::__search_n_aux(__first, __last, __count, __unary_pred,
269 std::__iterator_category(__first));
270 }
271
272 // find_end for forward iterators.
273 template<typename _ForwardIterator1, typename _ForwardIterator2,
274 typename _BinaryPredicate>
275 _GLIBCXX20_CONSTEXPR
276 _ForwardIterator1
277 __find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
278 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
279 forward_iterator_tag, forward_iterator_tag,
280 _BinaryPredicate __comp)
281 {
282 if (__first2 == __last2)
283 return __last1;
284
285 _ForwardIterator1 __result = __last1;
286 while (1)
287 {
288 _ForwardIterator1 __new_result
289 = std::__search(__first1, __last1, __first2, __last2, __comp);
290 if (__new_result == __last1)
291 return __result;
292 else
293 {
294 __result = __new_result;
295 __first1 = __new_result;
296 ++__first1;
297 }
298 }
299 }
300
301 // find_end for bidirectional iterators (much faster).
302 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
303 typename _BinaryPredicate>
304 _GLIBCXX20_CONSTEXPR
305 _BidirectionalIterator1
306 __find_end(_BidirectionalIterator1 __first1,
307 _BidirectionalIterator1 __last1,
308 _BidirectionalIterator2 __first2,
309 _BidirectionalIterator2 __last2,
310 bidirectional_iterator_tag, bidirectional_iterator_tag,
311 _BinaryPredicate __comp)
312 {
313 // concept requirements
314 __glibcxx_function_requires(_BidirectionalIteratorConcept<
315 _BidirectionalIterator1>)
316 __glibcxx_function_requires(_BidirectionalIteratorConcept<
317 _BidirectionalIterator2>)
318
319 typedef reverse_iterator<_BidirectionalIterator1> _RevIterator1;
320 typedef reverse_iterator<_BidirectionalIterator2> _RevIterator2;
321
322 _RevIterator1 __rlast1(__first1);
323 _RevIterator2 __rlast2(__first2);
324 _RevIterator1 __rresult = std::__search(_RevIterator1(__last1), __rlast1,
325 _RevIterator2(__last2), __rlast2,
326 __comp);
327
328 if (__rresult == __rlast1)
329 return __last1;
330 else
331 {
332 _BidirectionalIterator1 __result = __rresult.base();
333 std::advance(__result, -std::distance(__first2, __last2));
334 return __result;
335 }
336 }
337
338 /**
339 * @brief Find last matching subsequence in a sequence.
340 * @ingroup non_mutating_algorithms
341 * @param __first1 Start of range to search.
342 * @param __last1 End of range to search.
343 * @param __first2 Start of sequence to match.
344 * @param __last2 End of sequence to match.
345 * @return The last iterator @c i in the range
346 * @p [__first1,__last1-(__last2-__first2)) such that @c *(i+N) ==
347 * @p *(__first2+N) for each @c N in the range @p
348 * [0,__last2-__first2), or @p __last1 if no such iterator exists.
349 *
350 * Searches the range @p [__first1,__last1) for a sub-sequence that
351 * compares equal value-by-value with the sequence given by @p
352 * [__first2,__last2) and returns an iterator to the __first
353 * element of the sub-sequence, or @p __last1 if the sub-sequence
354 * is not found. The sub-sequence will be the last such
355 * subsequence contained in [__first1,__last1).
356 *
357 * Because the sub-sequence must lie completely within the range @p
358 * [__first1,__last1) it must start at a position less than @p
359 * __last1-(__last2-__first2) where @p __last2-__first2 is the
360 * length of the sub-sequence. This means that the returned
361 * iterator @c i will be in the range @p
362 * [__first1,__last1-(__last2-__first2))
363 */
364 template<typename _ForwardIterator1, typename _ForwardIterator2>
365 _GLIBCXX20_CONSTEXPR
366 inline _ForwardIterator1
367 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
368 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
369 {
370 // concept requirements
371 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
372 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
373 __glibcxx_function_requires(_EqualOpConcept<
374 typename iterator_traits<_ForwardIterator1>::value_type,
375 typename iterator_traits<_ForwardIterator2>::value_type>)
376 __glibcxx_requires_valid_range(__first1, __last1);
377 __glibcxx_requires_valid_range(__first2, __last2);
378
379 return std::__find_end(__first1, __last1, __first2, __last2,
380 std::__iterator_category(__first1),
381 std::__iterator_category(__first2),
382 __gnu_cxx::__ops::__iter_equal_to_iter());
383 }
384
385 /**
386 * @brief Find last matching subsequence in a sequence using a predicate.
387 * @ingroup non_mutating_algorithms
388 * @param __first1 Start of range to search.
389 * @param __last1 End of range to search.
390 * @param __first2 Start of sequence to match.
391 * @param __last2 End of sequence to match.
392 * @param __comp The predicate to use.
393 * @return The last iterator @c i in the range @p
394 * [__first1,__last1-(__last2-__first2)) such that @c
395 * predicate(*(i+N), @p (__first2+N)) is true for each @c N in the
396 * range @p [0,__last2-__first2), or @p __last1 if no such iterator
397 * exists.
398 *
399 * Searches the range @p [__first1,__last1) for a sub-sequence that
400 * compares equal value-by-value with the sequence given by @p
401 * [__first2,__last2) using comp as a predicate and returns an
402 * iterator to the first element of the sub-sequence, or @p __last1
403 * if the sub-sequence is not found. The sub-sequence will be the
404 * last such subsequence contained in [__first,__last1).
405 *
406 * Because the sub-sequence must lie completely within the range @p
407 * [__first1,__last1) it must start at a position less than @p
408 * __last1-(__last2-__first2) where @p __last2-__first2 is the
409 * length of the sub-sequence. This means that the returned
410 * iterator @c i will be in the range @p
411 * [__first1,__last1-(__last2-__first2))
412 */
413 template<typename _ForwardIterator1, typename _ForwardIterator2,
414 typename _BinaryPredicate>
415 _GLIBCXX20_CONSTEXPR
416 inline _ForwardIterator1
417 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
418 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
419 _BinaryPredicate __comp)
420 {
421 // concept requirements
422 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
423 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
424 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
425 typename iterator_traits<_ForwardIterator1>::value_type,
426 typename iterator_traits<_ForwardIterator2>::value_type>)
427 __glibcxx_requires_valid_range(__first1, __last1);
428 __glibcxx_requires_valid_range(__first2, __last2);
429
430 return std::__find_end(__first1, __last1, __first2, __last2,
431 std::__iterator_category(__first1),
432 std::__iterator_category(__first2),
433 __gnu_cxx::__ops::__iter_comp_iter(__comp));
434 }
435
436#if __cplusplus201402L >= 201103L
437 /**
438 * @brief Checks that a predicate is true for all the elements
439 * of a sequence.
440 * @ingroup non_mutating_algorithms
441 * @param __first An input iterator.
442 * @param __last An input iterator.
443 * @param __pred A predicate.
444 * @return True if the check is true, false otherwise.
445 *
446 * Returns true if @p __pred is true for each element in the range
447 * @p [__first,__last), and false otherwise.
448 */
449 template<typename _InputIterator, typename _Predicate>
450 _GLIBCXX20_CONSTEXPR
451 inline bool
452 all_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
453 { return __last == std::find_if_not(__first, __last, __pred); }
454
455 /**
456 * @brief Checks that a predicate is false for all the elements
457 * of a sequence.
458 * @ingroup non_mutating_algorithms
459 * @param __first An input iterator.
460 * @param __last An input iterator.
461 * @param __pred A predicate.
462 * @return True if the check is true, false otherwise.
463 *
464 * Returns true if @p __pred is false for each element in the range
465 * @p [__first,__last), and false otherwise.
466 */
467 template<typename _InputIterator, typename _Predicate>
468 _GLIBCXX20_CONSTEXPR
469 inline bool
470 none_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
471 { return __last == _GLIBCXX_STD_Astd::find_if(__first, __last, __pred); }
472
473 /**
474 * @brief Checks that a predicate is true for at least one element
475 * of a sequence.
476 * @ingroup non_mutating_algorithms
477 * @param __first An input iterator.
478 * @param __last An input iterator.
479 * @param __pred A predicate.
480 * @return True if the check is true, false otherwise.
481 *
482 * Returns true if an element exists in the range @p
483 * [__first,__last) such that @p __pred is true, and false
484 * otherwise.
485 */
486 template<typename _InputIterator, typename _Predicate>
487 _GLIBCXX20_CONSTEXPR
488 inline bool
489 any_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
490 { return !std::none_of(__first, __last, __pred); }
70
Assuming the condition is false
71
Returning zero, which participates in a condition later
491
492 /**
493 * @brief Find the first element in a sequence for which a
494 * predicate is false.
495 * @ingroup non_mutating_algorithms
496 * @param __first An input iterator.
497 * @param __last An input iterator.
498 * @param __pred A predicate.
499 * @return The first iterator @c i in the range @p [__first,__last)
500 * such that @p __pred(*i) is false, or @p __last if no such iterator exists.
501 */
502 template<typename _InputIterator, typename _Predicate>
503 _GLIBCXX20_CONSTEXPR
504 inline _InputIterator
505 find_if_not(_InputIterator __first, _InputIterator __last,
506 _Predicate __pred)
507 {
508 // concept requirements
509 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
510 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
511 typename iterator_traits<_InputIterator>::value_type>)
512 __glibcxx_requires_valid_range(__first, __last);
513 return std::__find_if_not(__first, __last,
514 __gnu_cxx::__ops::__pred_iter(__pred));
515 }
516
517 /**
518 * @brief Checks whether the sequence is partitioned.
519 * @ingroup mutating_algorithms
520 * @param __first An input iterator.
521 * @param __last An input iterator.
522 * @param __pred A predicate.
523 * @return True if the range @p [__first,__last) is partioned by @p __pred,
524 * i.e. if all elements that satisfy @p __pred appear before those that
525 * do not.
526 */
527 template<typename _InputIterator, typename _Predicate>
528 _GLIBCXX20_CONSTEXPR
529 inline bool
530 is_partitioned(_InputIterator __first, _InputIterator __last,
531 _Predicate __pred)
532 {
533 __first = std::find_if_not(__first, __last, __pred);
534 if (__first == __last)
535 return true;
536 ++__first;
537 return std::none_of(__first, __last, __pred);
538 }
539
540 /**
541 * @brief Find the partition point of a partitioned range.
542 * @ingroup mutating_algorithms
543 * @param __first An iterator.
544 * @param __last Another iterator.
545 * @param __pred A predicate.
546 * @return An iterator @p mid such that @p all_of(__first, mid, __pred)
547 * and @p none_of(mid, __last, __pred) are both true.
548 */
549 template<typename _ForwardIterator, typename _Predicate>
550 _GLIBCXX20_CONSTEXPR
551 _ForwardIterator
552 partition_point(_ForwardIterator __first, _ForwardIterator __last,
553 _Predicate __pred)
554 {
555 // concept requirements
556 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
557 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
558 typename iterator_traits<_ForwardIterator>::value_type>)
559
560 // A specific debug-mode test will be necessary...
561 __glibcxx_requires_valid_range(__first, __last);
562
563 typedef typename iterator_traits<_ForwardIterator>::difference_type
564 _DistanceType;
565
566 _DistanceType __len = std::distance(__first, __last);
567
568 while (__len > 0)
569 {
570 _DistanceType __half = __len >> 1;
571 _ForwardIterator __middle = __first;
572 std::advance(__middle, __half);
573 if (__pred(*__middle))
574 {
575 __first = __middle;
576 ++__first;
577 __len = __len - __half - 1;
578 }
579 else
580 __len = __half;
581 }
582 return __first;
583 }
584#endif
585
586 template<typename _InputIterator, typename _OutputIterator,
587 typename _Predicate>
588 _GLIBCXX20_CONSTEXPR
589 _OutputIterator
590 __remove_copy_if(_InputIterator __first, _InputIterator __last,
591 _OutputIterator __result, _Predicate __pred)
592 {
593 for (; __first != __last; ++__first)
594 if (!__pred(__first))
595 {
596 *__result = *__first;
597 ++__result;
598 }
599 return __result;
600 }
601
602 /**
603 * @brief Copy a sequence, removing elements of a given value.
604 * @ingroup mutating_algorithms
605 * @param __first An input iterator.
606 * @param __last An input iterator.
607 * @param __result An output iterator.
608 * @param __value The value to be removed.
609 * @return An iterator designating the end of the resulting sequence.
610 *
611 * Copies each element in the range @p [__first,__last) not equal
612 * to @p __value to the range beginning at @p __result.
613 * remove_copy() is stable, so the relative order of elements that
614 * are copied is unchanged.
615 */
616 template<typename _InputIterator, typename _OutputIterator, typename _Tp>
617 _GLIBCXX20_CONSTEXPR
618 inline _OutputIterator
619 remove_copy(_InputIterator __first, _InputIterator __last,
620 _OutputIterator __result, const _Tp& __value)
621 {
622 // concept requirements
623 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
624 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
625 typename iterator_traits<_InputIterator>::value_type>)
626 __glibcxx_function_requires(_EqualOpConcept<
627 typename iterator_traits<_InputIterator>::value_type, _Tp>)
628 __glibcxx_requires_valid_range(__first, __last);
629
630 return std::__remove_copy_if(__first, __last, __result,
631 __gnu_cxx::__ops::__iter_equals_val(__value));
632 }
633
634 /**
635 * @brief Copy a sequence, removing elements for which a predicate is true.
636 * @ingroup mutating_algorithms
637 * @param __first An input iterator.
638 * @param __last An input iterator.
639 * @param __result An output iterator.
640 * @param __pred A predicate.
641 * @return An iterator designating the end of the resulting sequence.
642 *
643 * Copies each element in the range @p [__first,__last) for which
644 * @p __pred returns false to the range beginning at @p __result.
645 *
646 * remove_copy_if() is stable, so the relative order of elements that are
647 * copied is unchanged.
648 */
649 template<typename _InputIterator, typename _OutputIterator,
650 typename _Predicate>
651 _GLIBCXX20_CONSTEXPR
652 inline _OutputIterator
653 remove_copy_if(_InputIterator __first, _InputIterator __last,
654 _OutputIterator __result, _Predicate __pred)
655 {
656 // concept requirements
657 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
658 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
659 typename iterator_traits<_InputIterator>::value_type>)
660 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
661 typename iterator_traits<_InputIterator>::value_type>)
662 __glibcxx_requires_valid_range(__first, __last);
663
664 return std::__remove_copy_if(__first, __last, __result,
665 __gnu_cxx::__ops::__pred_iter(__pred));
666 }
667
668#if __cplusplus201402L >= 201103L
669 /**
670 * @brief Copy the elements of a sequence for which a predicate is true.
671 * @ingroup mutating_algorithms
672 * @param __first An input iterator.
673 * @param __last An input iterator.
674 * @param __result An output iterator.
675 * @param __pred A predicate.
676 * @return An iterator designating the end of the resulting sequence.
677 *
678 * Copies each element in the range @p [__first,__last) for which
679 * @p __pred returns true to the range beginning at @p __result.
680 *
681 * copy_if() is stable, so the relative order of elements that are
682 * copied is unchanged.
683 */
684 template<typename _InputIterator, typename _OutputIterator,
685 typename _Predicate>
686 _GLIBCXX20_CONSTEXPR
687 _OutputIterator
688 copy_if(_InputIterator __first, _InputIterator __last,
689 _OutputIterator __result, _Predicate __pred)
690 {
691 // concept requirements
692 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
693 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
694 typename iterator_traits<_InputIterator>::value_type>)
695 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
696 typename iterator_traits<_InputIterator>::value_type>)
697 __glibcxx_requires_valid_range(__first, __last);
698
699 for (; __first != __last; ++__first)
700 if (__pred(*__first))
701 {
702 *__result = *__first;
703 ++__result;
704 }
705 return __result;
706 }
707
708 template<typename _InputIterator, typename _Size, typename _OutputIterator>
709 _GLIBCXX20_CONSTEXPR
710 _OutputIterator
711 __copy_n_a(_InputIterator __first, _Size __n, _OutputIterator __result)
712 {
713 if (__n > 0)
714 {
715 while (true)
716 {
717 *__result = *__first;
718 ++__result;
719 if (--__n > 0)
720 ++__first;
721 else
722 break;
723 }
724 }
725 return __result;
726 }
727
728 template<typename _CharT, typename _Size>
729 __enable_if_t<__is_char<_CharT>::__value, _CharT*>
730 __copy_n_a(istreambuf_iterator<_CharT, char_traits<_CharT>>,
731 _Size, _CharT*);
732
733 template<typename _InputIterator, typename _Size, typename _OutputIterator>
734 _GLIBCXX20_CONSTEXPR
735 _OutputIterator
736 __copy_n(_InputIterator __first, _Size __n,
737 _OutputIterator __result, input_iterator_tag)
738 {
739 return std::__niter_wrap(__result,
740 __copy_n_a(__first, __n,
741 std::__niter_base(__result)));
742 }
743
744 template<typename _RandomAccessIterator, typename _Size,
745 typename _OutputIterator>
746 _GLIBCXX20_CONSTEXPR
747 inline _OutputIterator
748 __copy_n(_RandomAccessIterator __first, _Size __n,
749 _OutputIterator __result, random_access_iterator_tag)
750 { return std::copy(__first, __first + __n, __result); }
751
752 /**
753 * @brief Copies the range [first,first+n) into [result,result+n).
754 * @ingroup mutating_algorithms
755 * @param __first An input iterator.
756 * @param __n The number of elements to copy.
757 * @param __result An output iterator.
758 * @return result+n.
759 *
760 * This inline function will boil down to a call to @c memmove whenever
761 * possible. Failing that, if random access iterators are passed, then the
762 * loop count will be known (and therefore a candidate for compiler
763 * optimizations such as unrolling).
764 */
765 template<typename _InputIterator, typename _Size, typename _OutputIterator>
766 _GLIBCXX20_CONSTEXPR
767 inline _OutputIterator
768 copy_n(_InputIterator __first, _Size __n, _OutputIterator __result)
769 {
770 // concept requirements
771 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
772 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
773 typename iterator_traits<_InputIterator>::value_type>)
774 __glibcxx_requires_can_increment(__first, __n);
775 __glibcxx_requires_can_increment(__result, __n);
776
777 return std::__copy_n(__first, __n, __result,
778 std::__iterator_category(__first));
779 }
780
781 /**
782 * @brief Copy the elements of a sequence to separate output sequences
783 * depending on the truth value of a predicate.
784 * @ingroup mutating_algorithms
785 * @param __first An input iterator.
786 * @param __last An input iterator.
787 * @param __out_true An output iterator.
788 * @param __out_false An output iterator.
789 * @param __pred A predicate.
790 * @return A pair designating the ends of the resulting sequences.
791 *
792 * Copies each element in the range @p [__first,__last) for which
793 * @p __pred returns true to the range beginning at @p out_true
794 * and each element for which @p __pred returns false to @p __out_false.
795 */
796 template<typename _InputIterator, typename _OutputIterator1,
797 typename _OutputIterator2, typename _Predicate>
798 _GLIBCXX20_CONSTEXPR
799 pair<_OutputIterator1, _OutputIterator2>
800 partition_copy(_InputIterator __first, _InputIterator __last,
801 _OutputIterator1 __out_true, _OutputIterator2 __out_false,
802 _Predicate __pred)
803 {
804 // concept requirements
805 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
806 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator1,
807 typename iterator_traits<_InputIterator>::value_type>)
808 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator2,
809 typename iterator_traits<_InputIterator>::value_type>)
810 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
811 typename iterator_traits<_InputIterator>::value_type>)
812 __glibcxx_requires_valid_range(__first, __last);
813
814 for (; __first != __last; ++__first)
815 if (__pred(*__first))
816 {
817 *__out_true = *__first;
818 ++__out_true;
819 }
820 else
821 {
822 *__out_false = *__first;
823 ++__out_false;
824 }
825
826 return pair<_OutputIterator1, _OutputIterator2>(__out_true, __out_false);
827 }
828#endif // C++11
829
830 template<typename _ForwardIterator, typename _Predicate>
831 _GLIBCXX20_CONSTEXPR
832 _ForwardIterator
833 __remove_if(_ForwardIterator __first, _ForwardIterator __last,
834 _Predicate __pred)
835 {
836 __first = std::__find_if(__first, __last, __pred);
837 if (__first == __last)
838 return __first;
839 _ForwardIterator __result = __first;
840 ++__first;
841 for (; __first != __last; ++__first)
842 if (!__pred(__first))
843 {
844 *__result = _GLIBCXX_MOVE(*__first)std::move(*__first);
845 ++__result;
846 }
847 return __result;
848 }
849
850 /**
851 * @brief Remove elements from a sequence.
852 * @ingroup mutating_algorithms
853 * @param __first An input iterator.
854 * @param __last An input iterator.
855 * @param __value The value to be removed.
856 * @return An iterator designating the end of the resulting sequence.
857 *
858 * All elements equal to @p __value are removed from the range
859 * @p [__first,__last).
860 *
861 * remove() is stable, so the relative order of elements that are
862 * not removed is unchanged.
863 *
864 * Elements between the end of the resulting sequence and @p __last
865 * are still present, but their value is unspecified.
866 */
867 template<typename _ForwardIterator, typename _Tp>
868 _GLIBCXX20_CONSTEXPR
869 inline _ForwardIterator
870 remove(_ForwardIterator __first, _ForwardIterator __last,
871 const _Tp& __value)
872 {
873 // concept requirements
874 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
875 _ForwardIterator>)
876 __glibcxx_function_requires(_EqualOpConcept<
877 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
878 __glibcxx_requires_valid_range(__first, __last);
879
880 return std::__remove_if(__first, __last,
881 __gnu_cxx::__ops::__iter_equals_val(__value));
882 }
883
884 /**
885 * @brief Remove elements from a sequence using a predicate.
886 * @ingroup mutating_algorithms
887 * @param __first A forward iterator.
888 * @param __last A forward iterator.
889 * @param __pred A predicate.
890 * @return An iterator designating the end of the resulting sequence.
891 *
892 * All elements for which @p __pred returns true are removed from the range
893 * @p [__first,__last).
894 *
895 * remove_if() is stable, so the relative order of elements that are
896 * not removed is unchanged.
897 *
898 * Elements between the end of the resulting sequence and @p __last
899 * are still present, but their value is unspecified.
900 */
901 template<typename _ForwardIterator, typename _Predicate>
902 _GLIBCXX20_CONSTEXPR
903 inline _ForwardIterator
904 remove_if(_ForwardIterator __first, _ForwardIterator __last,
905 _Predicate __pred)
906 {
907 // concept requirements
908 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
909 _ForwardIterator>)
910 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
911 typename iterator_traits<_ForwardIterator>::value_type>)
912 __glibcxx_requires_valid_range(__first, __last);
913
914 return std::__remove_if(__first, __last,
915 __gnu_cxx::__ops::__pred_iter(__pred));
916 }
917
918 template<typename _ForwardIterator, typename _BinaryPredicate>
919 _GLIBCXX20_CONSTEXPR
920 _ForwardIterator
921 __adjacent_find(_ForwardIterator __first, _ForwardIterator __last,
922 _BinaryPredicate __binary_pred)
923 {
924 if (__first == __last)
925 return __last;
926 _ForwardIterator __next = __first;
927 while (++__next != __last)
928 {
929 if (__binary_pred(__first, __next))
930 return __first;
931 __first = __next;
932 }
933 return __last;
934 }
935
936 template<typename _ForwardIterator, typename _BinaryPredicate>
937 _GLIBCXX20_CONSTEXPR
938 _ForwardIterator
939 __unique(_ForwardIterator __first, _ForwardIterator __last,
940 _BinaryPredicate __binary_pred)
941 {
942 // Skip the beginning, if already unique.
943 __first = std::__adjacent_find(__first, __last, __binary_pred);
944 if (__first == __last)
945 return __last;
946
947 // Do the real copy work.
948 _ForwardIterator __dest = __first;
949 ++__first;
950 while (++__first != __last)
951 if (!__binary_pred(__dest, __first))
952 *++__dest = _GLIBCXX_MOVE(*__first)std::move(*__first);
953 return ++__dest;
954 }
955
956 /**
957 * @brief Remove consecutive duplicate values from a sequence.
958 * @ingroup mutating_algorithms
959 * @param __first A forward iterator.
960 * @param __last A forward iterator.
961 * @return An iterator designating the end of the resulting sequence.
962 *
963 * Removes all but the first element from each group of consecutive
964 * values that compare equal.
965 * unique() is stable, so the relative order of elements that are
966 * not removed is unchanged.
967 * Elements between the end of the resulting sequence and @p __last
968 * are still present, but their value is unspecified.
969 */
970 template<typename _ForwardIterator>
971 _GLIBCXX20_CONSTEXPR
972 inline _ForwardIterator
973 unique(_ForwardIterator __first, _ForwardIterator __last)
974 {
975 // concept requirements
976 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
977 _ForwardIterator>)
978 __glibcxx_function_requires(_EqualityComparableConcept<
979 typename iterator_traits<_ForwardIterator>::value_type>)
980 __glibcxx_requires_valid_range(__first, __last);
981
982 return std::__unique(__first, __last,
983 __gnu_cxx::__ops::__iter_equal_to_iter());
984 }
985
986 /**
987 * @brief Remove consecutive values from a sequence using a predicate.
988 * @ingroup mutating_algorithms
989 * @param __first A forward iterator.
990 * @param __last A forward iterator.
991 * @param __binary_pred A binary predicate.
992 * @return An iterator designating the end of the resulting sequence.
993 *
994 * Removes all but the first element from each group of consecutive
995 * values for which @p __binary_pred returns true.
996 * unique() is stable, so the relative order of elements that are
997 * not removed is unchanged.
998 * Elements between the end of the resulting sequence and @p __last
999 * are still present, but their value is unspecified.
1000 */
1001 template<typename _ForwardIterator, typename _BinaryPredicate>
1002 _GLIBCXX20_CONSTEXPR
1003 inline _ForwardIterator
1004 unique(_ForwardIterator __first, _ForwardIterator __last,
1005 _BinaryPredicate __binary_pred)
1006 {
1007 // concept requirements
1008 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1009 _ForwardIterator>)
1010 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1011 typename iterator_traits<_ForwardIterator>::value_type,
1012 typename iterator_traits<_ForwardIterator>::value_type>)
1013 __glibcxx_requires_valid_range(__first, __last);
1014
1015 return std::__unique(__first, __last,
1016 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred));
1017 }
1018
1019 /**
1020 * This is an uglified
1021 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1022 * _BinaryPredicate)
1023 * overloaded for forward iterators and output iterator as result.
1024 */
1025 template<typename _ForwardIterator, typename _OutputIterator,
1026 typename _BinaryPredicate>
1027 _GLIBCXX20_CONSTEXPR
1028 _OutputIterator
1029 __unique_copy(_ForwardIterator __first, _ForwardIterator __last,
1030 _OutputIterator __result, _BinaryPredicate __binary_pred,
1031 forward_iterator_tag, output_iterator_tag)
1032 {
1033 // concept requirements -- iterators already checked
1034 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1035 typename iterator_traits<_ForwardIterator>::value_type,
1036 typename iterator_traits<_ForwardIterator>::value_type>)
1037
1038 _ForwardIterator __next = __first;
1039 *__result = *__first;
1040 while (++__next != __last)
1041 if (!__binary_pred(__first, __next))
1042 {
1043 __first = __next;
1044 *++__result = *__first;
1045 }
1046 return ++__result;
1047 }
1048
1049 /**
1050 * This is an uglified
1051 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1052 * _BinaryPredicate)
1053 * overloaded for input iterators and output iterator as result.
1054 */
1055 template<typename _InputIterator, typename _OutputIterator,
1056 typename _BinaryPredicate>
1057 _GLIBCXX20_CONSTEXPR
1058 _OutputIterator
1059 __unique_copy(_InputIterator __first, _InputIterator __last,
1060 _OutputIterator __result, _BinaryPredicate __binary_pred,
1061 input_iterator_tag, output_iterator_tag)
1062 {
1063 // concept requirements -- iterators already checked
1064 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1065 typename iterator_traits<_InputIterator>::value_type,
1066 typename iterator_traits<_InputIterator>::value_type>)
1067
1068 typename iterator_traits<_InputIterator>::value_type __value = *__first;
1069 __decltype(__gnu_cxx::__ops::__iter_comp_val(__binary_pred))
1070 __rebound_pred
1071 = __gnu_cxx::__ops::__iter_comp_val(__binary_pred);
1072 *__result = __value;
1073 while (++__first != __last)
1074 if (!__rebound_pred(__first, __value))
1075 {
1076 __value = *__first;
1077 *++__result = __value;
1078 }
1079 return ++__result;
1080 }
1081
1082 /**
1083 * This is an uglified
1084 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1085 * _BinaryPredicate)
1086 * overloaded for input iterators and forward iterator as result.
1087 */
1088 template<typename _InputIterator, typename _ForwardIterator,
1089 typename _BinaryPredicate>
1090 _GLIBCXX20_CONSTEXPR
1091 _ForwardIterator
1092 __unique_copy(_InputIterator __first, _InputIterator __last,
1093 _ForwardIterator __result, _BinaryPredicate __binary_pred,
1094 input_iterator_tag, forward_iterator_tag)
1095 {
1096 // concept requirements -- iterators already checked
1097 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1098 typename iterator_traits<_ForwardIterator>::value_type,
1099 typename iterator_traits<_InputIterator>::value_type>)
1100 *__result = *__first;
1101 while (++__first != __last)
1102 if (!__binary_pred(__result, __first))
1103 *++__result = *__first;
1104 return ++__result;
1105 }
1106
1107 /**
1108 * This is an uglified reverse(_BidirectionalIterator,
1109 * _BidirectionalIterator)
1110 * overloaded for bidirectional iterators.
1111 */
1112 template<typename _BidirectionalIterator>
1113 _GLIBCXX20_CONSTEXPR
1114 void
1115 __reverse(_BidirectionalIterator __first, _BidirectionalIterator __last,
1116 bidirectional_iterator_tag)
1117 {
1118 while (true)
1119 if (__first == __last || __first == --__last)
1120 return;
1121 else
1122 {
1123 std::iter_swap(__first, __last);
1124 ++__first;
1125 }
1126 }
1127
1128 /**
1129 * This is an uglified reverse(_BidirectionalIterator,
1130 * _BidirectionalIterator)
1131 * overloaded for random access iterators.
1132 */
1133 template<typename _RandomAccessIterator>
1134 _GLIBCXX20_CONSTEXPR
1135 void
1136 __reverse(_RandomAccessIterator __first, _RandomAccessIterator __last,
1137 random_access_iterator_tag)
1138 {
1139 if (__first == __last)
1140 return;
1141 --__last;
1142 while (__first < __last)
1143 {
1144 std::iter_swap(__first, __last);
1145 ++__first;
1146 --__last;
1147 }
1148 }
1149
1150 /**
1151 * @brief Reverse a sequence.
1152 * @ingroup mutating_algorithms
1153 * @param __first A bidirectional iterator.
1154 * @param __last A bidirectional iterator.
1155 * @return reverse() returns no value.
1156 *
1157 * Reverses the order of the elements in the range @p [__first,__last),
1158 * so that the first element becomes the last etc.
1159 * For every @c i such that @p 0<=i<=(__last-__first)/2), @p reverse()
1160 * swaps @p *(__first+i) and @p *(__last-(i+1))
1161 */
1162 template<typename _BidirectionalIterator>
1163 _GLIBCXX20_CONSTEXPR
1164 inline void
1165 reverse(_BidirectionalIterator __first, _BidirectionalIterator __last)
1166 {
1167 // concept requirements
1168 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1169 _BidirectionalIterator>)
1170 __glibcxx_requires_valid_range(__first, __last);
1171 std::__reverse(__first, __last, std::__iterator_category(__first));
1172 }
1173
1174 /**
1175 * @brief Copy a sequence, reversing its elements.
1176 * @ingroup mutating_algorithms
1177 * @param __first A bidirectional iterator.
1178 * @param __last A bidirectional iterator.
1179 * @param __result An output iterator.
1180 * @return An iterator designating the end of the resulting sequence.
1181 *
1182 * Copies the elements in the range @p [__first,__last) to the
1183 * range @p [__result,__result+(__last-__first)) such that the
1184 * order of the elements is reversed. For every @c i such that @p
1185 * 0<=i<=(__last-__first), @p reverse_copy() performs the
1186 * assignment @p *(__result+(__last-__first)-1-i) = *(__first+i).
1187 * The ranges @p [__first,__last) and @p
1188 * [__result,__result+(__last-__first)) must not overlap.
1189 */
1190 template<typename _BidirectionalIterator, typename _OutputIterator>
1191 _GLIBCXX20_CONSTEXPR
1192 _OutputIterator
1193 reverse_copy(_BidirectionalIterator __first, _BidirectionalIterator __last,
1194 _OutputIterator __result)
1195 {
1196 // concept requirements
1197 __glibcxx_function_requires(_BidirectionalIteratorConcept<
1198 _BidirectionalIterator>)
1199 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1200 typename iterator_traits<_BidirectionalIterator>::value_type>)
1201 __glibcxx_requires_valid_range(__first, __last);
1202
1203 while (__first != __last)
1204 {
1205 --__last;
1206 *__result = *__last;
1207 ++__result;
1208 }
1209 return __result;
1210 }
1211
1212 /**
1213 * This is a helper function for the rotate algorithm specialized on RAIs.
1214 * It returns the greatest common divisor of two integer values.
1215 */
1216 template<typename _EuclideanRingElement>
1217 _GLIBCXX20_CONSTEXPR
1218 _EuclideanRingElement
1219 __gcd(_EuclideanRingElement __m, _EuclideanRingElement __n)
1220 {
1221 while (__n != 0)
1222 {
1223 _EuclideanRingElement __t = __m % __n;
1224 __m = __n;
1225 __n = __t;
1226 }
1227 return __m;
1228 }
1229
1230 inline namespace _V2
1231 {
1232
1233 /// This is a helper function for the rotate algorithm.
1234 template<typename _ForwardIterator>
1235 _GLIBCXX20_CONSTEXPR
1236 _ForwardIterator
1237 __rotate(_ForwardIterator __first,
1238 _ForwardIterator __middle,
1239 _ForwardIterator __last,
1240 forward_iterator_tag)
1241 {
1242 if (__first == __middle)
1243 return __last;
1244 else if (__last == __middle)
1245 return __first;
1246
1247 _ForwardIterator __first2 = __middle;
1248 do
1249 {
1250 std::iter_swap(__first, __first2);
1251 ++__first;
1252 ++__first2;
1253 if (__first == __middle)
1254 __middle = __first2;
1255 }
1256 while (__first2 != __last);
1257
1258 _ForwardIterator __ret = __first;
1259
1260 __first2 = __middle;
1261
1262 while (__first2 != __last)
1263 {
1264 std::iter_swap(__first, __first2);
1265 ++__first;
1266 ++__first2;
1267 if (__first == __middle)
1268 __middle = __first2;
1269 else if (__first2 == __last)
1270 __first2 = __middle;
1271 }
1272 return __ret;
1273 }
1274
1275 /// This is a helper function for the rotate algorithm.
1276 template<typename _BidirectionalIterator>
1277 _GLIBCXX20_CONSTEXPR
1278 _BidirectionalIterator
1279 __rotate(_BidirectionalIterator __first,
1280 _BidirectionalIterator __middle,
1281 _BidirectionalIterator __last,
1282 bidirectional_iterator_tag)
1283 {
1284 // concept requirements
1285 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1286 _BidirectionalIterator>)
1287
1288 if (__first == __middle)
1289 return __last;
1290 else if (__last == __middle)
1291 return __first;
1292
1293 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1294 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1295
1296 while (__first != __middle && __middle != __last)
1297 {
1298 std::iter_swap(__first, --__last);
1299 ++__first;
1300 }
1301
1302 if (__first == __middle)
1303 {
1304 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1305 return __last;
1306 }
1307 else
1308 {
1309 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1310 return __first;
1311 }
1312 }
1313
1314 /// This is a helper function for the rotate algorithm.
1315 template<typename _RandomAccessIterator>
1316 _GLIBCXX20_CONSTEXPR
1317 _RandomAccessIterator
1318 __rotate(_RandomAccessIterator __first,
1319 _RandomAccessIterator __middle,
1320 _RandomAccessIterator __last,
1321 random_access_iterator_tag)
1322 {
1323 // concept requirements
1324 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1325 _RandomAccessIterator>)
1326
1327 if (__first == __middle)
1328 return __last;
1329 else if (__last == __middle)
1330 return __first;
1331
1332 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
1333 _Distance;
1334 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1335 _ValueType;
1336
1337 _Distance __n = __last - __first;
1338 _Distance __k = __middle - __first;
1339
1340 if (__k == __n - __k)
1341 {
1342 std::swap_ranges(__first, __middle, __middle);
1343 return __middle;
1344 }
1345
1346 _RandomAccessIterator __p = __first;
1347 _RandomAccessIterator __ret = __first + (__last - __middle);
1348
1349 for (;;)
1350 {
1351 if (__k < __n - __k)
1352 {
1353 if (__is_pod(_ValueType) && __k == 1)
1354 {
1355 _ValueType __t = _GLIBCXX_MOVE(*__p)std::move(*__p);
1356 _GLIBCXX_MOVE3(__p + 1, __p + __n, __p)std::move(__p + 1, __p + __n, __p);
1357 *(__p + __n - 1) = _GLIBCXX_MOVE(__t)std::move(__t);
1358 return __ret;
1359 }
1360 _RandomAccessIterator __q = __p + __k;
1361 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1362 {
1363 std::iter_swap(__p, __q);
1364 ++__p;
1365 ++__q;
1366 }
1367 __n %= __k;
1368 if (__n == 0)
1369 return __ret;
1370 std::swap(__n, __k);
1371 __k = __n - __k;
1372 }
1373 else
1374 {
1375 __k = __n - __k;
1376 if (__is_pod(_ValueType) && __k == 1)
1377 {
1378 _ValueType __t = _GLIBCXX_MOVE(*(__p + __n - 1))std::move(*(__p + __n - 1));
1379 _GLIBCXX_MOVE_BACKWARD3(__p, __p + __n - 1, __p + __n)std::move_backward(__p, __p + __n - 1, __p + __n);
1380 *__p = _GLIBCXX_MOVE(__t)std::move(__t);
1381 return __ret;
1382 }
1383 _RandomAccessIterator __q = __p + __n;
1384 __p = __q - __k;
1385 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1386 {
1387 --__p;
1388 --__q;
1389 std::iter_swap(__p, __q);
1390 }
1391 __n %= __k;
1392 if (__n == 0)
1393 return __ret;
1394 std::swap(__n, __k);
1395 }
1396 }
1397 }
1398
1399 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1400 // DR 488. rotate throws away useful information
1401 /**
1402 * @brief Rotate the elements of a sequence.
1403 * @ingroup mutating_algorithms
1404 * @param __first A forward iterator.
1405 * @param __middle A forward iterator.
1406 * @param __last A forward iterator.
1407 * @return first + (last - middle).
1408 *
1409 * Rotates the elements of the range @p [__first,__last) by
1410 * @p (__middle - __first) positions so that the element at @p __middle
1411 * is moved to @p __first, the element at @p __middle+1 is moved to
1412 * @p __first+1 and so on for each element in the range
1413 * @p [__first,__last).
1414 *
1415 * This effectively swaps the ranges @p [__first,__middle) and
1416 * @p [__middle,__last).
1417 *
1418 * Performs
1419 * @p *(__first+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1420 * for each @p n in the range @p [0,__last-__first).
1421 */
1422 template<typename _ForwardIterator>
1423 _GLIBCXX20_CONSTEXPR
1424 inline _ForwardIterator
1425 rotate(_ForwardIterator __first, _ForwardIterator __middle,
1426 _ForwardIterator __last)
1427 {
1428 // concept requirements
1429 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1430 _ForwardIterator>)
1431 __glibcxx_requires_valid_range(__first, __middle);
1432 __glibcxx_requires_valid_range(__middle, __last);
1433
1434 return std::__rotate(__first, __middle, __last,
1435 std::__iterator_category(__first));
1436 }
1437
1438 } // namespace _V2
1439
1440 /**
1441 * @brief Copy a sequence, rotating its elements.
1442 * @ingroup mutating_algorithms
1443 * @param __first A forward iterator.
1444 * @param __middle A forward iterator.
1445 * @param __last A forward iterator.
1446 * @param __result An output iterator.
1447 * @return An iterator designating the end of the resulting sequence.
1448 *
1449 * Copies the elements of the range @p [__first,__last) to the
1450 * range beginning at @result, rotating the copied elements by
1451 * @p (__middle-__first) positions so that the element at @p __middle
1452 * is moved to @p __result, the element at @p __middle+1 is moved
1453 * to @p __result+1 and so on for each element in the range @p
1454 * [__first,__last).
1455 *
1456 * Performs
1457 * @p *(__result+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1458 * for each @p n in the range @p [0,__last-__first).
1459 */
1460 template<typename _ForwardIterator, typename _OutputIterator>
1461 _GLIBCXX20_CONSTEXPR
1462 inline _OutputIterator
1463 rotate_copy(_ForwardIterator __first, _ForwardIterator __middle,
1464 _ForwardIterator __last, _OutputIterator __result)
1465 {
1466 // concept requirements
1467 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
1468 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1469 typename iterator_traits<_ForwardIterator>::value_type>)
1470 __glibcxx_requires_valid_range(__first, __middle);
1471 __glibcxx_requires_valid_range(__middle, __last);
1472
1473 return std::copy(__first, __middle,
1474 std::copy(__middle, __last, __result));
1475 }
1476
1477 /// This is a helper function...
1478 template<typename _ForwardIterator, typename _Predicate>
1479 _GLIBCXX20_CONSTEXPR
1480 _ForwardIterator
1481 __partition(_ForwardIterator __first, _ForwardIterator __last,
1482 _Predicate __pred, forward_iterator_tag)
1483 {
1484 if (__first == __last)
1485 return __first;
1486
1487 while (__pred(*__first))
1488 if (++__first == __last)
1489 return __first;
1490
1491 _ForwardIterator __next = __first;
1492
1493 while (++__next != __last)
1494 if (__pred(*__next))
1495 {
1496 std::iter_swap(__first, __next);
1497 ++__first;
1498 }
1499
1500 return __first;
1501 }
1502
1503 /// This is a helper function...
1504 template<typename _BidirectionalIterator, typename _Predicate>
1505 _GLIBCXX20_CONSTEXPR
1506 _BidirectionalIterator
1507 __partition(_BidirectionalIterator __first, _BidirectionalIterator __last,
1508 _Predicate __pred, bidirectional_iterator_tag)
1509 {
1510 while (true)
1511 {
1512 while (true)
1513 if (__first == __last)
1514 return __first;
1515 else if (__pred(*__first))
1516 ++__first;
1517 else
1518 break;
1519 --__last;
1520 while (true)
1521 if (__first == __last)
1522 return __first;
1523 else if (!bool(__pred(*__last)))
1524 --__last;
1525 else
1526 break;
1527 std::iter_swap(__first, __last);
1528 ++__first;
1529 }
1530 }
1531
1532 // partition
1533
1534 /// This is a helper function...
1535 /// Requires __first != __last and !__pred(__first)
1536 /// and __len == distance(__first, __last).
1537 ///
1538 /// !__pred(__first) allows us to guarantee that we don't
1539 /// move-assign an element onto itself.
1540 template<typename _ForwardIterator, typename _Pointer, typename _Predicate,
1541 typename _Distance>
1542 _ForwardIterator
1543 __stable_partition_adaptive(_ForwardIterator __first,
1544 _ForwardIterator __last,
1545 _Predicate __pred, _Distance __len,
1546 _Pointer __buffer,
1547 _Distance __buffer_size)
1548 {
1549 if (__len == 1)
1550 return __first;
1551
1552 if (__len <= __buffer_size)
1553 {
1554 _ForwardIterator __result1 = __first;
1555 _Pointer __result2 = __buffer;
1556
1557 // The precondition guarantees that !__pred(__first), so
1558 // move that element to the buffer before starting the loop.
1559 // This ensures that we only call __pred once per element.
1560 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1561 ++__result2;
1562 ++__first;
1563 for (; __first != __last; ++__first)
1564 if (__pred(__first))
1565 {
1566 *__result1 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1567 ++__result1;
1568 }
1569 else
1570 {
1571 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1572 ++__result2;
1573 }
1574
1575 _GLIBCXX_MOVE3(__buffer, __result2, __result1)std::move(__buffer, __result2, __result1);
1576 return __result1;
1577 }
1578
1579 _ForwardIterator __middle = __first;
1580 std::advance(__middle, __len / 2);
1581 _ForwardIterator __left_split =
1582 std::__stable_partition_adaptive(__first, __middle, __pred,
1583 __len / 2, __buffer,
1584 __buffer_size);
1585
1586 // Advance past true-predicate values to satisfy this
1587 // function's preconditions.
1588 _Distance __right_len = __len - __len / 2;
1589 _ForwardIterator __right_split =
1590 std::__find_if_not_n(__middle, __right_len, __pred);
1591
1592 if (__right_len)
1593 __right_split =
1594 std::__stable_partition_adaptive(__right_split, __last, __pred,
1595 __right_len,
1596 __buffer, __buffer_size);
1597
1598 return std::rotate(__left_split, __middle, __right_split);
1599 }
1600
1601 template<typename _ForwardIterator, typename _Predicate>
1602 _ForwardIterator
1603 __stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1604 _Predicate __pred)
1605 {
1606 __first = std::__find_if_not(__first, __last, __pred);
1607
1608 if (__first == __last)
1609 return __first;
1610
1611 typedef typename iterator_traits<_ForwardIterator>::value_type
1612 _ValueType;
1613 typedef typename iterator_traits<_ForwardIterator>::difference_type
1614 _DistanceType;
1615
1616 _Temporary_buffer<_ForwardIterator, _ValueType>
1617 __buf(__first, std::distance(__first, __last));
1618 return
1619 std::__stable_partition_adaptive(__first, __last, __pred,
1620 _DistanceType(__buf.requested_size()),
1621 __buf.begin(),
1622 _DistanceType(__buf.size()));
1623 }
1624
1625 /**
1626 * @brief Move elements for which a predicate is true to the beginning
1627 * of a sequence, preserving relative ordering.
1628 * @ingroup mutating_algorithms
1629 * @param __first A forward iterator.
1630 * @param __last A forward iterator.
1631 * @param __pred A predicate functor.
1632 * @return An iterator @p middle such that @p __pred(i) is true for each
1633 * iterator @p i in the range @p [first,middle) and false for each @p i
1634 * in the range @p [middle,last).
1635 *
1636 * Performs the same function as @p partition() with the additional
1637 * guarantee that the relative ordering of elements in each group is
1638 * preserved, so any two elements @p x and @p y in the range
1639 * @p [__first,__last) such that @p __pred(x)==__pred(y) will have the same
1640 * relative ordering after calling @p stable_partition().
1641 */
1642 template<typename _ForwardIterator, typename _Predicate>
1643 inline _ForwardIterator
1644 stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1645 _Predicate __pred)
1646 {
1647 // concept requirements
1648 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1649 _ForwardIterator>)
1650 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
1651 typename iterator_traits<_ForwardIterator>::value_type>)
1652 __glibcxx_requires_valid_range(__first, __last);
1653
1654 return std::__stable_partition(__first, __last,
1655 __gnu_cxx::__ops::__pred_iter(__pred));
1656 }
1657
1658 /// This is a helper function for the sort routines.
1659 template<typename _RandomAccessIterator, typename _Compare>
1660 _GLIBCXX20_CONSTEXPR
1661 void
1662 __heap_select(_RandomAccessIterator __first,
1663 _RandomAccessIterator __middle,
1664 _RandomAccessIterator __last, _Compare __comp)
1665 {
1666 std::__make_heap(__first, __middle, __comp);
1667 for (_RandomAccessIterator __i = __middle; __i < __last; ++__i)
1668 if (__comp(__i, __first))
1669 std::__pop_heap(__first, __middle, __i, __comp);
1670 }
1671
1672 // partial_sort
1673
1674 template<typename _InputIterator, typename _RandomAccessIterator,
1675 typename _Compare>
1676 _GLIBCXX20_CONSTEXPR
1677 _RandomAccessIterator
1678 __partial_sort_copy(_InputIterator __first, _InputIterator __last,
1679 _RandomAccessIterator __result_first,
1680 _RandomAccessIterator __result_last,
1681 _Compare __comp)
1682 {
1683 typedef typename iterator_traits<_InputIterator>::value_type
1684 _InputValueType;
1685 typedef iterator_traits<_RandomAccessIterator> _RItTraits;
1686 typedef typename _RItTraits::difference_type _DistanceType;
1687
1688 if (__result_first == __result_last)
1689 return __result_last;
1690 _RandomAccessIterator __result_real_last = __result_first;
1691 while (__first != __last && __result_real_last != __result_last)
1692 {
1693 *__result_real_last = *__first;
1694 ++__result_real_last;
1695 ++__first;
1696 }
1697
1698 std::__make_heap(__result_first, __result_real_last, __comp);
1699 while (__first != __last)
1700 {
1701 if (__comp(__first, __result_first))
1702 std::__adjust_heap(__result_first, _DistanceType(0),
1703 _DistanceType(__result_real_last
1704 - __result_first),
1705 _InputValueType(*__first), __comp);
1706 ++__first;
1707 }
1708 std::__sort_heap(__result_first, __result_real_last, __comp);
1709 return __result_real_last;
1710 }
1711
1712 /**
1713 * @brief Copy the smallest elements of a sequence.
1714 * @ingroup sorting_algorithms
1715 * @param __first An iterator.
1716 * @param __last Another iterator.
1717 * @param __result_first A random-access iterator.
1718 * @param __result_last Another random-access iterator.
1719 * @return An iterator indicating the end of the resulting sequence.
1720 *
1721 * Copies and sorts the smallest N values from the range @p [__first,__last)
1722 * to the range beginning at @p __result_first, where the number of
1723 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1724 * @p (__result_last-__result_first).
1725 * After the sort if @e i and @e j are iterators in the range
1726 * @p [__result_first,__result_first+N) such that i precedes j then
1727 * *j<*i is false.
1728 * The value returned is @p __result_first+N.
1729 */
1730 template<typename _InputIterator, typename _RandomAccessIterator>
1731 _GLIBCXX20_CONSTEXPR
1732 inline _RandomAccessIterator
1733 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1734 _RandomAccessIterator __result_first,
1735 _RandomAccessIterator __result_last)
1736 {
1737#ifdef _GLIBCXX_CONCEPT_CHECKS
1738 typedef typename iterator_traits<_InputIterator>::value_type
1739 _InputValueType;
1740 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1741 _OutputValueType;
1742#endif
1743
1744 // concept requirements
1745 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1746 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1747 _OutputValueType>)
1748 __glibcxx_function_requires(_LessThanOpConcept<_InputValueType,
1749 _OutputValueType>)
1750 __glibcxx_function_requires(_LessThanComparableConcept<_OutputValueType>)
1751 __glibcxx_requires_valid_range(__first, __last);
1752 __glibcxx_requires_irreflexive(__first, __last);
1753 __glibcxx_requires_valid_range(__result_first, __result_last);
1754
1755 return std::__partial_sort_copy(__first, __last,
1756 __result_first, __result_last,
1757 __gnu_cxx::__ops::__iter_less_iter());
1758 }
1759
1760 /**
1761 * @brief Copy the smallest elements of a sequence using a predicate for
1762 * comparison.
1763 * @ingroup sorting_algorithms
1764 * @param __first An input iterator.
1765 * @param __last Another input iterator.
1766 * @param __result_first A random-access iterator.
1767 * @param __result_last Another random-access iterator.
1768 * @param __comp A comparison functor.
1769 * @return An iterator indicating the end of the resulting sequence.
1770 *
1771 * Copies and sorts the smallest N values from the range @p [__first,__last)
1772 * to the range beginning at @p result_first, where the number of
1773 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1774 * @p (__result_last-__result_first).
1775 * After the sort if @e i and @e j are iterators in the range
1776 * @p [__result_first,__result_first+N) such that i precedes j then
1777 * @p __comp(*j,*i) is false.
1778 * The value returned is @p __result_first+N.
1779 */
1780 template<typename _InputIterator, typename _RandomAccessIterator,
1781 typename _Compare>
1782 _GLIBCXX20_CONSTEXPR
1783 inline _RandomAccessIterator
1784 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1785 _RandomAccessIterator __result_first,
1786 _RandomAccessIterator __result_last,
1787 _Compare __comp)
1788 {
1789#ifdef _GLIBCXX_CONCEPT_CHECKS
1790 typedef typename iterator_traits<_InputIterator>::value_type
1791 _InputValueType;
1792 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1793 _OutputValueType;
1794#endif
1795
1796 // concept requirements
1797 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1798 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1799 _RandomAccessIterator>)
1800 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1801 _OutputValueType>)
1802 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1803 _InputValueType, _OutputValueType>)
1804 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1805 _OutputValueType, _OutputValueType>)
1806 __glibcxx_requires_valid_range(__first, __last);
1807 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
1808 __glibcxx_requires_valid_range(__result_first, __result_last);
1809
1810 return std::__partial_sort_copy(__first, __last,
1811 __result_first, __result_last,
1812 __gnu_cxx::__ops::__iter_comp_iter(__comp));
1813 }
1814
1815 /// This is a helper function for the sort routine.
1816 template<typename _RandomAccessIterator, typename _Compare>
1817 _GLIBCXX20_CONSTEXPR
1818 void
1819 __unguarded_linear_insert(_RandomAccessIterator __last,
1820 _Compare __comp)
1821 {
1822 typename iterator_traits<_RandomAccessIterator>::value_type
1823 __val = _GLIBCXX_MOVE(*__last)std::move(*__last);
1824 _RandomAccessIterator __next = __last;
1825 --__next;
1826 while (__comp(__val, __next))
1827 {
1828 *__last = _GLIBCXX_MOVE(*__next)std::move(*__next);
1829 __last = __next;
1830 --__next;
1831 }
1832 *__last = _GLIBCXX_MOVE(__val)std::move(__val);
1833 }
1834
1835 /// This is a helper function for the sort routine.
1836 template<typename _RandomAccessIterator, typename _Compare>
1837 _GLIBCXX20_CONSTEXPR
1838 void
1839 __insertion_sort(_RandomAccessIterator __first,
1840 _RandomAccessIterator __last, _Compare __comp)
1841 {
1842 if (__first == __last) return;
1843
1844 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
1845 {
1846 if (__comp(__i, __first))
1847 {
1848 typename iterator_traits<_RandomAccessIterator>::value_type
1849 __val = _GLIBCXX_MOVE(*__i)std::move(*__i);
1850 _GLIBCXX_MOVE_BACKWARD3(__first, __i, __i + 1)std::move_backward(__first, __i, __i + 1);
1851 *__first = _GLIBCXX_MOVE(__val)std::move(__val);
1852 }
1853 else
1854 std::__unguarded_linear_insert(__i,
1855 __gnu_cxx::__ops::__val_comp_iter(__comp));
1856 }
1857 }
1858
1859 /// This is a helper function for the sort routine.
1860 template<typename _RandomAccessIterator, typename _Compare>
1861 _GLIBCXX20_CONSTEXPR
1862 inline void
1863 __unguarded_insertion_sort(_RandomAccessIterator __first,
1864 _RandomAccessIterator __last, _Compare __comp)
1865 {
1866 for (_RandomAccessIterator __i = __first; __i != __last; ++__i)
1867 std::__unguarded_linear_insert(__i,
1868 __gnu_cxx::__ops::__val_comp_iter(__comp));
1869 }
1870
1871 /**
1872 * @doctodo
1873 * This controls some aspect of the sort routines.
1874 */
1875 enum { _S_threshold = 16 };
1876
1877 /// This is a helper function for the sort routine.
1878 template<typename _RandomAccessIterator, typename _Compare>
1879 _GLIBCXX20_CONSTEXPR
1880 void
1881 __final_insertion_sort(_RandomAccessIterator __first,
1882 _RandomAccessIterator __last, _Compare __comp)
1883 {
1884 if (__last - __first > int(_S_threshold))
1885 {
1886 std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
1887 std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
1888 __comp);
1889 }
1890 else
1891 std::__insertion_sort(__first, __last, __comp);
1892 }
1893
1894 /// This is a helper function...
1895 template<typename _RandomAccessIterator, typename _Compare>
1896 _GLIBCXX20_CONSTEXPR
1897 _RandomAccessIterator
1898 __unguarded_partition(_RandomAccessIterator __first,
1899 _RandomAccessIterator __last,
1900 _RandomAccessIterator __pivot, _Compare __comp)
1901 {
1902 while (true)
1903 {
1904 while (__comp(__first, __pivot))
1905 ++__first;
1906 --__last;
1907 while (__comp(__pivot, __last))
1908 --__last;
1909 if (!(__first < __last))
1910 return __first;
1911 std::iter_swap(__first, __last);
1912 ++__first;
1913 }
1914 }
1915
1916 /// This is a helper function...
1917 template<typename _RandomAccessIterator, typename _Compare>
1918 _GLIBCXX20_CONSTEXPR
1919 inline _RandomAccessIterator
1920 __unguarded_partition_pivot(_RandomAccessIterator __first,
1921 _RandomAccessIterator __last, _Compare __comp)
1922 {
1923 _RandomAccessIterator __mid = __first + (__last - __first) / 2;
1924 std::__move_median_to_first(__first, __first + 1, __mid, __last - 1,
1925 __comp);
1926 return std::__unguarded_partition(__first + 1, __last, __first, __comp);
1927 }
1928
1929 template<typename _RandomAccessIterator, typename _Compare>
1930 _GLIBCXX20_CONSTEXPR
1931 inline void
1932 __partial_sort(_RandomAccessIterator __first,
1933 _RandomAccessIterator __middle,
1934 _RandomAccessIterator __last,
1935 _Compare __comp)
1936 {
1937 std::__heap_select(__first, __middle, __last, __comp);
1938 std::__sort_heap(__first, __middle, __comp);
1939 }
1940
1941 /// This is a helper function for the sort routine.
1942 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1943 _GLIBCXX20_CONSTEXPR
1944 void
1945 __introsort_loop(_RandomAccessIterator __first,
1946 _RandomAccessIterator __last,
1947 _Size __depth_limit, _Compare __comp)
1948 {
1949 while (__last - __first > int(_S_threshold))
1950 {
1951 if (__depth_limit == 0)
1952 {
1953 std::__partial_sort(__first, __last, __last, __comp);
1954 return;
1955 }
1956 --__depth_limit;
1957 _RandomAccessIterator __cut =
1958 std::__unguarded_partition_pivot(__first, __last, __comp);
1959 std::__introsort_loop(__cut, __last, __depth_limit, __comp);
1960 __last = __cut;
1961 }
1962 }
1963
1964 // sort
1965
1966 template<typename _RandomAccessIterator, typename _Compare>
1967 _GLIBCXX20_CONSTEXPR
1968 inline void
1969 __sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
1970 _Compare __comp)
1971 {
1972 if (__first != __last)
1973 {
1974 std::__introsort_loop(__first, __last,
1975 std::__lg(__last - __first) * 2,
1976 __comp);
1977 std::__final_insertion_sort(__first, __last, __comp);
1978 }
1979 }
1980
1981 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1982 _GLIBCXX20_CONSTEXPR
1983 void
1984 __introselect(_RandomAccessIterator __first, _RandomAccessIterator __nth,
1985 _RandomAccessIterator __last, _Size __depth_limit,
1986 _Compare __comp)
1987 {
1988 while (__last - __first > 3)
1989 {
1990 if (__depth_limit == 0)
1991 {
1992 std::__heap_select(__first, __nth + 1, __last, __comp);
1993 // Place the nth largest element in its final position.
1994 std::iter_swap(__first, __nth);
1995 return;
1996 }
1997 --__depth_limit;
1998 _RandomAccessIterator __cut =
1999 std::__unguarded_partition_pivot(__first, __last, __comp);
2000 if (__cut <= __nth)
2001 __first = __cut;
2002 else
2003 __last = __cut;
2004 }
2005 std::__insertion_sort(__first, __last, __comp);
2006 }
2007
2008 // nth_element
2009
2010 // lower_bound moved to stl_algobase.h
2011
2012 /**
2013 * @brief Finds the first position in which @p __val could be inserted
2014 * without changing the ordering.
2015 * @ingroup binary_search_algorithms
2016 * @param __first An iterator.
2017 * @param __last Another iterator.
2018 * @param __val The search term.
2019 * @param __comp A functor to use for comparisons.
2020 * @return An iterator pointing to the first element <em>not less
2021 * than</em> @p __val, or end() if every element is less
2022 * than @p __val.
2023 * @ingroup binary_search_algorithms
2024 *
2025 * The comparison function should have the same effects on ordering as
2026 * the function used for the initial sort.
2027 */
2028 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2029 _GLIBCXX20_CONSTEXPR
2030 inline _ForwardIterator
2031 lower_bound(_ForwardIterator __first, _ForwardIterator __last,
2032 const _Tp& __val, _Compare __comp)
2033 {
2034 // concept requirements
2035 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2036 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2037 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2038 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2039 __val, __comp);
2040
2041 return std::__lower_bound(__first, __last, __val,
2042 __gnu_cxx::__ops::__iter_comp_val(__comp));
2043 }
2044
2045 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2046 _GLIBCXX20_CONSTEXPR
2047 _ForwardIterator
2048 __upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2049 const _Tp& __val, _Compare __comp)
2050 {
2051 typedef typename iterator_traits<_ForwardIterator>::difference_type
2052 _DistanceType;
2053
2054 _DistanceType __len = std::distance(__first, __last);
2055
2056 while (__len > 0)
2057 {
2058 _DistanceType __half = __len >> 1;
2059 _ForwardIterator __middle = __first;
2060 std::advance(__middle, __half);
2061 if (__comp(__val, __middle))
2062 __len = __half;
2063 else
2064 {
2065 __first = __middle;
2066 ++__first;
2067 __len = __len - __half - 1;
2068 }
2069 }
2070 return __first;
2071 }
2072
2073 /**
2074 * @brief Finds the last position in which @p __val could be inserted
2075 * without changing the ordering.
2076 * @ingroup binary_search_algorithms
2077 * @param __first An iterator.
2078 * @param __last Another iterator.
2079 * @param __val The search term.
2080 * @return An iterator pointing to the first element greater than @p __val,
2081 * or end() if no elements are greater than @p __val.
2082 * @ingroup binary_search_algorithms
2083 */
2084 template<typename _ForwardIterator, typename _Tp>
2085 _GLIBCXX20_CONSTEXPR
2086 inline _ForwardIterator
2087 upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2088 const _Tp& __val)
2089 {
2090 // concept requirements
2091 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2092 __glibcxx_function_requires(_LessThanOpConcept<
2093 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2094 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2095
2096 return std::__upper_bound(__first, __last, __val,
2097 __gnu_cxx::__ops::__val_less_iter());
2098 }
2099
2100 /**
2101 * @brief Finds the last position in which @p __val could be inserted
2102 * without changing the ordering.
2103 * @ingroup binary_search_algorithms
2104 * @param __first An iterator.
2105 * @param __last Another iterator.
2106 * @param __val The search term.
2107 * @param __comp A functor to use for comparisons.
2108 * @return An iterator pointing to the first element greater than @p __val,
2109 * or end() if no elements are greater than @p __val.
2110 * @ingroup binary_search_algorithms
2111 *
2112 * The comparison function should have the same effects on ordering as
2113 * the function used for the initial sort.
2114 */
2115 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2116 _GLIBCXX20_CONSTEXPR
2117 inline _ForwardIterator
2118 upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2119 const _Tp& __val, _Compare __comp)
2120 {
2121 // concept requirements
2122 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2123 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2124 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2125 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2126 __val, __comp);
2127
2128 return std::__upper_bound(__first, __last, __val,
2129 __gnu_cxx::__ops::__val_comp_iter(__comp));
2130 }
2131
2132 template<typename _ForwardIterator, typename _Tp,
2133 typename _CompareItTp, typename _CompareTpIt>
2134 _GLIBCXX20_CONSTEXPR
2135 pair<_ForwardIterator, _ForwardIterator>
2136 __equal_range(_ForwardIterator __first, _ForwardIterator __last,
2137 const _Tp& __val,
2138 _CompareItTp __comp_it_val, _CompareTpIt __comp_val_it)
2139 {
2140 typedef typename iterator_traits<_ForwardIterator>::difference_type
2141 _DistanceType;
2142
2143 _DistanceType __len = std::distance(__first, __last);
2144
2145 while (__len > 0)
2146 {
2147 _DistanceType __half = __len >> 1;
2148 _ForwardIterator __middle = __first;
2149 std::advance(__middle, __half);
2150 if (__comp_it_val(__middle, __val))
2151 {
2152 __first = __middle;
2153 ++__first;
2154 __len = __len - __half - 1;
2155 }
2156 else if (__comp_val_it(__val, __middle))
2157 __len = __half;
2158 else
2159 {
2160 _ForwardIterator __left
2161 = std::__lower_bound(__first, __middle, __val, __comp_it_val);
2162 std::advance(__first, __len);
2163 _ForwardIterator __right
2164 = std::__upper_bound(++__middle, __first, __val, __comp_val_it);
2165 return pair<_ForwardIterator, _ForwardIterator>(__left, __right);
2166 }
2167 }
2168 return pair<_ForwardIterator, _ForwardIterator>(__first, __first);
2169 }
2170
2171 /**
2172 * @brief Finds the largest subrange in which @p __val could be inserted
2173 * at any place in it without changing the ordering.
2174 * @ingroup binary_search_algorithms
2175 * @param __first An iterator.
2176 * @param __last Another iterator.
2177 * @param __val The search term.
2178 * @return An pair of iterators defining the subrange.
2179 * @ingroup binary_search_algorithms
2180 *
2181 * This is equivalent to
2182 * @code
2183 * std::make_pair(lower_bound(__first, __last, __val),
2184 * upper_bound(__first, __last, __val))
2185 * @endcode
2186 * but does not actually call those functions.
2187 */
2188 template<typename _ForwardIterator, typename _Tp>
2189 _GLIBCXX20_CONSTEXPR
2190 inline pair<_ForwardIterator, _ForwardIterator>
2191 equal_range(_ForwardIterator __first, _ForwardIterator __last,
2192 const _Tp& __val)
2193 {
2194 // concept requirements
2195 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2196 __glibcxx_function_requires(_LessThanOpConcept<
2197 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2198 __glibcxx_function_requires(_LessThanOpConcept<
2199 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2200 __glibcxx_requires_partitioned_lower(__first, __last, __val);
2201 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2202
2203 return std::__equal_range(__first, __last, __val,
2204 __gnu_cxx::__ops::__iter_less_val(),
2205 __gnu_cxx::__ops::__val_less_iter());
2206 }
2207
2208 /**
2209 * @brief Finds the largest subrange in which @p __val could be inserted
2210 * at any place in it without changing the ordering.
2211 * @param __first An iterator.
2212 * @param __last Another iterator.
2213 * @param __val The search term.
2214 * @param __comp A functor to use for comparisons.
2215 * @return An pair of iterators defining the subrange.
2216 * @ingroup binary_search_algorithms
2217 *
2218 * This is equivalent to
2219 * @code
2220 * std::make_pair(lower_bound(__first, __last, __val, __comp),
2221 * upper_bound(__first, __last, __val, __comp))
2222 * @endcode
2223 * but does not actually call those functions.
2224 */
2225 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2226 _GLIBCXX20_CONSTEXPR
2227 inline pair<_ForwardIterator, _ForwardIterator>
2228 equal_range(_ForwardIterator __first, _ForwardIterator __last,
2229 const _Tp& __val, _Compare __comp)
2230 {
2231 // concept requirements
2232 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2233 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2234 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2235 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2236 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2237 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2238 __val, __comp);
2239 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2240 __val, __comp);
2241
2242 return std::__equal_range(__first, __last, __val,
2243 __gnu_cxx::__ops::__iter_comp_val(__comp),
2244 __gnu_cxx::__ops::__val_comp_iter(__comp));
2245 }
2246
2247 /**
2248 * @brief Determines whether an element exists in a range.
2249 * @ingroup binary_search_algorithms
2250 * @param __first An iterator.
2251 * @param __last Another iterator.
2252 * @param __val The search term.
2253 * @return True if @p __val (or its equivalent) is in [@p
2254 * __first,@p __last ].
2255 *
2256 * Note that this does not actually return an iterator to @p __val. For
2257 * that, use std::find or a container's specialized find member functions.
2258 */
2259 template<typename _ForwardIterator, typename _Tp>
2260 _GLIBCXX20_CONSTEXPR
2261 bool
2262 binary_search(_ForwardIterator __first, _ForwardIterator __last,
2263 const _Tp& __val)
2264 {
2265 // concept requirements
2266 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2267 __glibcxx_function_requires(_LessThanOpConcept<
2268 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2269 __glibcxx_requires_partitioned_lower(__first, __last, __val);
2270 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2271
2272 _ForwardIterator __i
2273 = std::__lower_bound(__first, __last, __val,
2274 __gnu_cxx::__ops::__iter_less_val());
2275 return __i != __last && !(__val < *__i);
2276 }
2277
2278 /**
2279 * @brief Determines whether an element exists in a range.
2280 * @ingroup binary_search_algorithms
2281 * @param __first An iterator.
2282 * @param __last Another iterator.
2283 * @param __val The search term.
2284 * @param __comp A functor to use for comparisons.
2285 * @return True if @p __val (or its equivalent) is in @p [__first,__last].
2286 *
2287 * Note that this does not actually return an iterator to @p __val. For
2288 * that, use std::find or a container's specialized find member functions.
2289 *
2290 * The comparison function should have the same effects on ordering as
2291 * the function used for the initial sort.
2292 */
2293 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2294 _GLIBCXX20_CONSTEXPR
2295 bool
2296 binary_search(_ForwardIterator __first, _ForwardIterator __last,
2297 const _Tp& __val, _Compare __comp)
2298 {
2299 // concept requirements
2300 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2301 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2302 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2303 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2304 __val, __comp);
2305 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2306 __val, __comp);
2307
2308 _ForwardIterator __i
2309 = std::__lower_bound(__first, __last, __val,
2310 __gnu_cxx::__ops::__iter_comp_val(__comp));
2311 return __i != __last && !bool(__comp(__val, *__i));
2312 }
2313
2314 // merge
2315
2316 /// This is a helper function for the __merge_adaptive routines.
2317 template<typename _InputIterator1, typename _InputIterator2,
2318 typename _OutputIterator, typename _Compare>
2319 void
2320 __move_merge_adaptive(_InputIterator1 __first1, _InputIterator1 __last1,
2321 _InputIterator2 __first2, _InputIterator2 __last2,
2322 _OutputIterator __result, _Compare __comp)
2323 {
2324 while (__first1 != __last1 && __first2 != __last2)
2325 {
2326 if (__comp(__first2, __first1))
2327 {
2328 *__result = _GLIBCXX_MOVE(*__first2)std::move(*__first2);
2329 ++__first2;
2330 }
2331 else
2332 {
2333 *__result = _GLIBCXX_MOVE(*__first1)std::move(*__first1);
2334 ++__first1;
2335 }
2336 ++__result;
2337 }
2338 if (__first1 != __last1)
2339 _GLIBCXX_MOVE3(__first1, __last1, __result)std::move(__first1, __last1, __result);
2340 }
2341
2342 /// This is a helper function for the __merge_adaptive routines.
2343 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
2344 typename _BidirectionalIterator3, typename _Compare>
2345 void
2346 __move_merge_adaptive_backward(_BidirectionalIterator1 __first1,
2347 _BidirectionalIterator1 __last1,
2348 _BidirectionalIterator2 __first2,
2349 _BidirectionalIterator2 __last2,
2350 _BidirectionalIterator3 __result,
2351 _Compare __comp)
2352 {
2353 if (__first1 == __last1)
2354 {
2355 _GLIBCXX_MOVE_BACKWARD3(__first2, __last2, __result)std::move_backward(__first2, __last2, __result);
2356 return;
2357 }
2358 else if (__first2 == __last2)
2359 return;
2360
2361 --__last1;
2362 --__last2;
2363 while (true)
2364 {
2365 if (__comp(__last2, __last1))
2366 {
2367 *--__result = _GLIBCXX_MOVE(*__last1)std::move(*__last1);
2368 if (__first1 == __last1)
2369 {
2370 _GLIBCXX_MOVE_BACKWARD3(__first2, ++__last2, __result)std::move_backward(__first2, ++__last2, __result);
2371 return;
2372 }
2373 --__last1;
2374 }
2375 else
2376 {
2377 *--__result = _GLIBCXX_MOVE(*__last2)std::move(*__last2);
2378 if (__first2 == __last2)
2379 return;
2380 --__last2;
2381 }
2382 }
2383 }
2384
2385 /// This is a helper function for the merge routines.
2386 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
2387 typename _Distance>
2388 _BidirectionalIterator1
2389 __rotate_adaptive(_BidirectionalIterator1 __first,
2390 _BidirectionalIterator1 __middle,
2391 _BidirectionalIterator1 __last,
2392 _Distance __len1, _Distance __len2,
2393 _BidirectionalIterator2 __buffer,
2394 _Distance __buffer_size)
2395 {
2396 _BidirectionalIterator2 __buffer_end;
2397 if (__len1 > __len2 && __len2 <= __buffer_size)
2398 {
2399 if (__len2)
2400 {
2401 __buffer_end = _GLIBCXX_MOVE3(__middle, __last, __buffer)std::move(__middle, __last, __buffer);
2402 _GLIBCXX_MOVE_BACKWARD3(__first, __middle, __last)std::move_backward(__first, __middle, __last);
2403 return _GLIBCXX_MOVE3(__buffer, __buffer_end, __first)std::move(__buffer, __buffer_end, __first);
2404 }
2405 else
2406 return __first;
2407 }
2408 else if (__len1 <= __buffer_size)
2409 {
2410 if (__len1)
2411 {
2412 __buffer_end = _GLIBCXX_MOVE3(__first, __middle, __buffer)std::move(__first, __middle, __buffer);
2413 _GLIBCXX_MOVE3(__middle, __last, __first)std::move(__middle, __last, __first);
2414 return _GLIBCXX_MOVE_BACKWARD3(__buffer, __buffer_end, __last)std::move_backward(__buffer, __buffer_end, __last);
2415 }
2416 else
2417 return __last;
2418 }
2419 else
2420 return std::rotate(__first, __middle, __last);
2421 }
2422
2423 /// This is a helper function for the merge routines.
2424 template<typename _BidirectionalIterator, typename _Distance,
2425 typename _Pointer, typename _Compare>
2426 void
2427 __merge_adaptive(_BidirectionalIterator __first,
2428 _BidirectionalIterator __middle,
2429 _BidirectionalIterator __last,
2430 _Distance __len1, _Distance __len2,
2431 _Pointer __buffer, _Distance __buffer_size,
2432 _Compare __comp)
2433 {
2434 if (__len1 <= __len2 && __len1 <= __buffer_size)
2435 {
2436 _Pointer __buffer_end = _GLIBCXX_MOVE3(__first, __middle, __buffer)std::move(__first, __middle, __buffer);
2437 std::__move_merge_adaptive(__buffer, __buffer_end, __middle, __last,
2438 __first, __comp);
2439 }
2440 else if (__len2 <= __buffer_size)
2441 {
2442 _Pointer __buffer_end = _GLIBCXX_MOVE3(__middle, __last, __buffer)std::move(__middle, __last, __buffer);
2443 std::__move_merge_adaptive_backward(__first, __middle, __buffer,
2444 __buffer_end, __last, __comp);
2445 }
2446 else
2447 {
2448 _BidirectionalIterator __first_cut = __first;
2449 _BidirectionalIterator __second_cut = __middle;
2450 _Distance __len11 = 0;
2451 _Distance __len22 = 0;
2452 if (__len1 > __len2)
2453 {
2454 __len11 = __len1 / 2;
2455 std::advance(__first_cut, __len11);
2456 __second_cut
2457 = std::__lower_bound(__middle, __last, *__first_cut,
2458 __gnu_cxx::__ops::__iter_comp_val(__comp));
2459 __len22 = std::distance(__middle, __second_cut);
2460 }
2461 else
2462 {
2463 __len22 = __len2 / 2;
2464 std::advance(__second_cut, __len22);
2465 __first_cut
2466 = std::__upper_bound(__first, __middle, *__second_cut,
2467 __gnu_cxx::__ops::__val_comp_iter(__comp));
2468 __len11 = std::distance(__first, __first_cut);
2469 }
2470
2471 _BidirectionalIterator __new_middle
2472 = std::__rotate_adaptive(__first_cut, __middle, __second_cut,
2473 __len1 - __len11, __len22, __buffer,
2474 __buffer_size);
2475 std::__merge_adaptive(__first, __first_cut, __new_middle, __len11,
2476 __len22, __buffer, __buffer_size, __comp);
2477 std::__merge_adaptive(__new_middle, __second_cut, __last,
2478 __len1 - __len11,
2479 __len2 - __len22, __buffer,
2480 __buffer_size, __comp);
2481 }
2482 }
2483
2484 /// This is a helper function for the merge routines.
2485 template<typename _BidirectionalIterator, typename _Distance,
2486 typename _Compare>
2487 void
2488 __merge_without_buffer(_BidirectionalIterator __first,
2489 _BidirectionalIterator __middle,
2490 _BidirectionalIterator __last,
2491 _Distance __len1, _Distance __len2,
2492 _Compare __comp)
2493 {
2494 if (__len1 == 0 || __len2 == 0)
2495 return;
2496
2497 if (__len1 + __len2 == 2)
2498 {
2499 if (__comp(__middle, __first))
2500 std::iter_swap(__first, __middle);
2501 return;
2502 }
2503
2504 _BidirectionalIterator __first_cut = __first;
2505 _BidirectionalIterator __second_cut = __middle;
2506 _Distance __len11 = 0;
2507 _Distance __len22 = 0;
2508 if (__len1 > __len2)
2509 {
2510 __len11 = __len1 / 2;
2511 std::advance(__first_cut, __len11);
2512 __second_cut
2513 = std::__lower_bound(__middle, __last, *__first_cut,
2514 __gnu_cxx::__ops::__iter_comp_val(__comp));
2515 __len22 = std::distance(__middle, __second_cut);
2516 }
2517 else
2518 {
2519 __len22 = __len2 / 2;
2520 std::advance(__second_cut, __len22);
2521 __first_cut
2522 = std::__upper_bound(__first, __middle, *__second_cut,
2523 __gnu_cxx::__ops::__val_comp_iter(__comp));
2524 __len11 = std::distance(__first, __first_cut);
2525 }
2526
2527 _BidirectionalIterator __new_middle
2528 = std::rotate(__first_cut, __middle, __second_cut);
2529 std::__merge_without_buffer(__first, __first_cut, __new_middle,
2530 __len11, __len22, __comp);
2531 std::__merge_without_buffer(__new_middle, __second_cut, __last,
2532 __len1 - __len11, __len2 - __len22, __comp);
2533 }
2534
2535 template<typename _BidirectionalIterator, typename _Compare>
2536 void
2537 __inplace_merge(_BidirectionalIterator __first,
2538 _BidirectionalIterator __middle,
2539 _BidirectionalIterator __last,
2540 _Compare __comp)
2541 {
2542 typedef typename iterator_traits<_BidirectionalIterator>::value_type
2543 _ValueType;
2544 typedef typename iterator_traits<_BidirectionalIterator>::difference_type
2545 _DistanceType;
2546
2547 if (__first == __middle || __middle == __last)
2548 return;
2549
2550 const _DistanceType __len1 = std::distance(__first, __middle);
2551 const _DistanceType __len2 = std::distance(__middle, __last);
2552
2553 typedef _Temporary_buffer<_BidirectionalIterator, _ValueType> _TmpBuf;
2554 _TmpBuf __buf(__first, __len1 + __len2);
2555
2556 if (__buf.begin() == 0)
2557 std::__merge_without_buffer
2558 (__first, __middle, __last, __len1, __len2, __comp);
2559 else
2560 std::__merge_adaptive
2561 (__first, __middle, __last, __len1, __len2, __buf.begin(),
2562 _DistanceType(__buf.size()), __comp);
2563 }
2564
2565 /**
2566 * @brief Merges two sorted ranges in place.
2567 * @ingroup sorting_algorithms
2568 * @param __first An iterator.
2569 * @param __middle Another iterator.
2570 * @param __last Another iterator.
2571 * @return Nothing.
2572 *
2573 * Merges two sorted and consecutive ranges, [__first,__middle) and
2574 * [__middle,__last), and puts the result in [__first,__last). The
2575 * output will be sorted. The sort is @e stable, that is, for
2576 * equivalent elements in the two ranges, elements from the first
2577 * range will always come before elements from the second.
2578 *
2579 * If enough additional memory is available, this takes (__last-__first)-1
2580 * comparisons. Otherwise an NlogN algorithm is used, where N is
2581 * distance(__first,__last).
2582 */
2583 template<typename _BidirectionalIterator>
2584 inline void
2585 inplace_merge(_BidirectionalIterator __first,
2586 _BidirectionalIterator __middle,
2587 _BidirectionalIterator __last)
2588 {
2589 // concept requirements
2590 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
2591 _BidirectionalIterator>)
2592 __glibcxx_function_requires(_LessThanComparableConcept<
2593 typename iterator_traits<_BidirectionalIterator>::value_type>)
2594 __glibcxx_requires_sorted(__first, __middle);
2595 __glibcxx_requires_sorted(__middle, __last);
2596 __glibcxx_requires_irreflexive(__first, __last);
2597
2598 std::__inplace_merge(__first, __middle, __last,
2599 __gnu_cxx::__ops::__iter_less_iter());
2600 }
2601
2602 /**
2603 * @brief Merges two sorted ranges in place.
2604 * @ingroup sorting_algorithms
2605 * @param __first An iterator.
2606 * @param __middle Another iterator.
2607 * @param __last Another iterator.
2608 * @param __comp A functor to use for comparisons.
2609 * @return Nothing.
2610 *
2611 * Merges two sorted and consecutive ranges, [__first,__middle) and
2612 * [middle,last), and puts the result in [__first,__last). The output will
2613 * be sorted. The sort is @e stable, that is, for equivalent
2614 * elements in the two ranges, elements from the first range will always
2615 * come before elements from the second.
2616 *
2617 * If enough additional memory is available, this takes (__last-__first)-1
2618 * comparisons. Otherwise an NlogN algorithm is used, where N is
2619 * distance(__first,__last).
2620 *
2621 * The comparison function should have the same effects on ordering as
2622 * the function used for the initial sort.
2623 */
2624 template<typename _BidirectionalIterator, typename _Compare>
2625 inline void
2626 inplace_merge(_BidirectionalIterator __first,
2627 _BidirectionalIterator __middle,
2628 _BidirectionalIterator __last,
2629 _Compare __comp)
2630 {
2631 // concept requirements
2632 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
2633 _BidirectionalIterator>)
2634 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2635 typename iterator_traits<_BidirectionalIterator>::value_type,
2636 typename iterator_traits<_BidirectionalIterator>::value_type>)
2637 __glibcxx_requires_sorted_pred(__first, __middle, __comp);
2638 __glibcxx_requires_sorted_pred(__middle, __last, __comp);
2639 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
2640
2641 std::__inplace_merge(__first, __middle, __last,
2642 __gnu_cxx::__ops::__iter_comp_iter(__comp));
2643 }
2644
2645
2646 /// This is a helper function for the __merge_sort_loop routines.
2647 template<typename _InputIterator, typename _OutputIterator,
2648 typename _Compare>
2649 _OutputIterator
2650 __move_merge(_InputIterator __first1, _InputIterator __last1,
2651 _InputIterator __first2, _InputIterator __last2,
2652 _OutputIterator __result, _Compare __comp)
2653 {
2654 while (__first1 != __last1 && __first2 != __last2)
2655 {
2656 if (__comp(__first2, __first1))
2657 {
2658 *__result = _GLIBCXX_MOVE(*__first2)std::move(*__first2);
2659 ++__first2;
2660 }
2661 else
2662 {
2663 *__result = _GLIBCXX_MOVE(*__first1)std::move(*__first1);
2664 ++__first1;
2665 }
2666 ++__result;
2667 }
2668 return _GLIBCXX_MOVE3(__first2, __last2,std::move(__first2, __last2, std::move(__first1, __last1, __result
))
2669 _GLIBCXX_MOVE3(__first1, __last1,std::move(__first2, __last2, std::move(__first1, __last1, __result
))
2670 __result))std::move(__first2, __last2, std::move(__first1, __last1, __result
))
;
2671 }
2672
2673 template<typename _RandomAccessIterator1, typename _RandomAccessIterator2,
2674 typename _Distance, typename _Compare>
2675 void
2676 __merge_sort_loop(_RandomAccessIterator1 __first,
2677 _RandomAccessIterator1 __last,
2678 _RandomAccessIterator2 __result, _Distance __step_size,
2679 _Compare __comp)
2680 {
2681 const _Distance __two_step = 2 * __step_size;
2682
2683 while (__last - __first >= __two_step)
2684 {
2685 __result = std::__move_merge(__first, __first + __step_size,
2686 __first + __step_size,
2687 __first + __two_step,
2688 __result, __comp);
2689 __first += __two_step;
2690 }
2691 __step_size = std::min(_Distance(__last - __first), __step_size);
2692
2693 std::__move_merge(__first, __first + __step_size,
2694 __first + __step_size, __last, __result, __comp);
2695 }
2696
2697 template<typename _RandomAccessIterator, typename _Distance,
2698 typename _Compare>
2699 _GLIBCXX20_CONSTEXPR
2700 void
2701 __chunk_insertion_sort(_RandomAccessIterator __first,
2702 _RandomAccessIterator __last,
2703 _Distance __chunk_size, _Compare __comp)
2704 {
2705 while (__last - __first >= __chunk_size)
2706 {
2707 std::__insertion_sort(__first, __first + __chunk_size, __comp);
2708 __first += __chunk_size;
2709 }
2710 std::__insertion_sort(__first, __last, __comp);
2711 }
2712
2713 enum { _S_chunk_size = 7 };
2714
2715 template<typename _RandomAccessIterator, typename _Pointer, typename _Compare>
2716 void
2717 __merge_sort_with_buffer(_RandomAccessIterator __first,
2718 _RandomAccessIterator __last,
2719 _Pointer __buffer, _Compare __comp)
2720 {
2721 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
2722 _Distance;
2723
2724 const _Distance __len = __last - __first;
2725 const _Pointer __buffer_last = __buffer + __len;
2726
2727 _Distance __step_size = _S_chunk_size;
2728 std::__chunk_insertion_sort(__first, __last, __step_size, __comp);
2729
2730 while (__step_size < __len)
2731 {
2732 std::__merge_sort_loop(__first, __last, __buffer,
2733 __step_size, __comp);
2734 __step_size *= 2;
2735 std::__merge_sort_loop(__buffer, __buffer_last, __first,
2736 __step_size, __comp);
2737 __step_size *= 2;
2738 }
2739 }
2740
2741 template<typename _RandomAccessIterator, typename _Pointer,
2742 typename _Distance, typename _Compare>
2743 void
2744 __stable_sort_adaptive(_RandomAccessIterator __first,
2745 _RandomAccessIterator __last,
2746 _Pointer __buffer, _Distance __buffer_size,
2747 _Compare __comp)
2748 {
2749 const _Distance __len = (__last - __first + 1) / 2;
2750 const _RandomAccessIterator __middle = __first + __len;
2751 if (__len > __buffer_size)
2752 {
2753 std::__stable_sort_adaptive(__first, __middle, __buffer,
2754 __buffer_size, __comp);
2755 std::__stable_sort_adaptive(__middle, __last, __buffer,
2756 __buffer_size, __comp);
2757 }
2758 else
2759 {
2760 std::__merge_sort_with_buffer(__first, __middle, __buffer, __comp);
2761 std::__merge_sort_with_buffer(__middle, __last, __buffer, __comp);
2762 }
2763 std::__merge_adaptive(__first, __middle, __last,
2764 _Distance(__middle - __first),
2765 _Distance(__last - __middle),
2766 __buffer, __buffer_size,
2767 __comp);
2768 }
2769
2770 /// This is a helper function for the stable sorting routines.
2771 template<typename _RandomAccessIterator, typename _Compare>
2772 void
2773 __inplace_stable_sort(_RandomAccessIterator __first,
2774 _RandomAccessIterator __last, _Compare __comp)
2775 {
2776 if (__last - __first < 15)
2777 {
2778 std::__insertion_sort(__first, __last, __comp);
2779 return;
2780 }
2781 _RandomAccessIterator __middle = __first + (__last - __first) / 2;
2782 std::__inplace_stable_sort(__first, __middle, __comp);
2783 std::__inplace_stable_sort(__middle, __last, __comp);
2784 std::__merge_without_buffer(__first, __middle, __last,
2785 __middle - __first,
2786 __last - __middle,
2787 __comp);
2788 }
2789
2790 // stable_sort
2791
2792 // Set algorithms: includes, set_union, set_intersection, set_difference,
2793 // set_symmetric_difference. All of these algorithms have the precondition
2794 // that their input ranges are sorted and the postcondition that their output
2795 // ranges are sorted.
2796
2797 template<typename _InputIterator1, typename _InputIterator2,
2798 typename _Compare>
2799 _GLIBCXX20_CONSTEXPR
2800 bool
2801 __includes(_InputIterator1 __first1, _InputIterator1 __last1,
2802 _InputIterator2 __first2, _InputIterator2 __last2,
2803 _Compare __comp)
2804 {
2805 while (__first1 != __last1 && __first2 != __last2)
2806 if (__comp(__first2, __first1))
2807 return false;
2808 else if (__comp(__first1, __first2))
2809 ++__first1;
2810 else
2811 {
2812 ++__first1;
2813 ++__first2;
2814 }
2815
2816 return __first2 == __last2;
2817 }
2818
2819 /**
2820 * @brief Determines whether all elements of a sequence exists in a range.
2821 * @param __first1 Start of search range.
2822 * @param __last1 End of search range.
2823 * @param __first2 Start of sequence
2824 * @param __last2 End of sequence.
2825 * @return True if each element in [__first2,__last2) is contained in order
2826 * within [__first1,__last1). False otherwise.
2827 * @ingroup set_algorithms
2828 *
2829 * This operation expects both [__first1,__last1) and
2830 * [__first2,__last2) to be sorted. Searches for the presence of
2831 * each element in [__first2,__last2) within [__first1,__last1).
2832 * The iterators over each range only move forward, so this is a
2833 * linear algorithm. If an element in [__first2,__last2) is not
2834 * found before the search iterator reaches @p __last2, false is
2835 * returned.
2836 */
2837 template<typename _InputIterator1, typename _InputIterator2>
2838 _GLIBCXX20_CONSTEXPR
2839 inline bool
2840 includes(_InputIterator1 __first1, _InputIterator1 __last1,
2841 _InputIterator2 __first2, _InputIterator2 __last2)
2842 {
2843 // concept requirements
2844 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
2845 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
2846 __glibcxx_function_requires(_LessThanOpConcept<
2847 typename iterator_traits<_InputIterator1>::value_type,
2848 typename iterator_traits<_InputIterator2>::value_type>)
2849 __glibcxx_function_requires(_LessThanOpConcept<
2850 typename iterator_traits<_InputIterator2>::value_type,
2851 typename iterator_traits<_InputIterator1>::value_type>)
2852 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
2853 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
2854 __glibcxx_requires_irreflexive2(__first1, __last1);
2855 __glibcxx_requires_irreflexive2(__first2, __last2);
2856
2857 return std::__includes(__first1, __last1, __first2, __last2,
2858 __gnu_cxx::__ops::__iter_less_iter());
2859 }
2860
2861 /**
2862 * @brief Determines whether all elements of a sequence exists in a range
2863 * using comparison.
2864 * @ingroup set_algorithms
2865 * @param __first1 Start of search range.
2866 * @param __last1 End of search range.
2867 * @param __first2 Start of sequence
2868 * @param __last2 End of sequence.
2869 * @param __comp Comparison function to use.
2870 * @return True if each element in [__first2,__last2) is contained
2871 * in order within [__first1,__last1) according to comp. False
2872 * otherwise. @ingroup set_algorithms
2873 *
2874 * This operation expects both [__first1,__last1) and
2875 * [__first2,__last2) to be sorted. Searches for the presence of
2876 * each element in [__first2,__last2) within [__first1,__last1),
2877 * using comp to decide. The iterators over each range only move
2878 * forward, so this is a linear algorithm. If an element in
2879 * [__first2,__last2) is not found before the search iterator
2880 * reaches @p __last2, false is returned.
2881 */
2882 template<typename _InputIterator1, typename _InputIterator2,
2883 typename _Compare>
2884 _GLIBCXX20_CONSTEXPR
2885 inline bool
2886 includes(_InputIterator1 __first1, _InputIterator1 __last1,
2887 _InputIterator2 __first2, _InputIterator2 __last2,
2888 _Compare __comp)
2889 {
2890 // concept requirements
2891 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
2892 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
2893 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2894 typename iterator_traits<_InputIterator1>::value_type,
2895 typename iterator_traits<_InputIterator2>::value_type>)
2896 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2897 typename iterator_traits<_InputIterator2>::value_type,
2898 typename iterator_traits<_InputIterator1>::value_type>)
2899 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
2900 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
2901 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
2902 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
2903
2904 return std::__includes(__first1, __last1, __first2, __last2,
2905 __gnu_cxx::__ops::__iter_comp_iter(__comp));
2906 }
2907
2908 // nth_element
2909 // merge
2910 // set_difference
2911 // set_intersection
2912 // set_union
2913 // stable_sort
2914 // set_symmetric_difference
2915 // min_element
2916 // max_element
2917
2918 template<typename _BidirectionalIterator, typename _Compare>
2919 _GLIBCXX20_CONSTEXPR
2920 bool
2921 __next_permutation(_BidirectionalIterator __first,
2922 _BidirectionalIterator __last, _Compare __comp)
2923 {
2924 if (__first == __last)
2925 return false;
2926 _BidirectionalIterator __i = __first;
2927 ++__i;
2928 if (__i == __last)
2929 return false;
2930 __i = __last;
2931 --__i;
2932
2933 for(;;)
2934 {
2935 _BidirectionalIterator __ii = __i;
2936 --__i;
2937 if (__comp(__i, __ii))
2938 {
2939 _BidirectionalIterator __j = __last;
2940 while (!__comp(__i, --__j))
2941 {}
2942 std::iter_swap(__i, __j);
2943 std::__reverse(__ii, __last,
2944 std::__iterator_category(__first));
2945 return true;
2946 }
2947 if (__i == __first)
2948 {
2949 std::__reverse(__first, __last,
2950 std::__iterator_category(__first));
2951 return false;
2952 }
2953 }
2954 }
2955
2956 /**
2957 * @brief Permute range into the next @e dictionary ordering.
2958 * @ingroup sorting_algorithms
2959 * @param __first Start of range.
2960 * @param __last End of range.
2961 * @return False if wrapped to first permutation, true otherwise.
2962 *
2963 * Treats all permutations of the range as a set of @e dictionary sorted
2964 * sequences. Permutes the current sequence into the next one of this set.
2965 * Returns true if there are more sequences to generate. If the sequence
2966 * is the largest of the set, the smallest is generated and false returned.
2967 */
2968 template<typename _BidirectionalIterator>
2969 _GLIBCXX20_CONSTEXPR
2970 inline bool
2971 next_permutation(_BidirectionalIterator __first,
2972 _BidirectionalIterator __last)
2973 {
2974 // concept requirements
2975 __glibcxx_function_requires(_BidirectionalIteratorConcept<
2976 _BidirectionalIterator>)
2977 __glibcxx_function_requires(_LessThanComparableConcept<
2978 typename iterator_traits<_BidirectionalIterator>::value_type>)
2979 __glibcxx_requires_valid_range(__first, __last);
2980 __glibcxx_requires_irreflexive(__first, __last);
2981
2982 return std::__next_permutation
2983 (__first, __last, __gnu_cxx::__ops::__iter_less_iter());
2984 }
2985
2986 /**
2987 * @brief Permute range into the next @e dictionary ordering using
2988 * comparison functor.
2989 * @ingroup sorting_algorithms
2990 * @param __first Start of range.
2991 * @param __last End of range.
2992 * @param __comp A comparison functor.
2993 * @return False if wrapped to first permutation, true otherwise.
2994 *
2995 * Treats all permutations of the range [__first,__last) as a set of
2996 * @e dictionary sorted sequences ordered by @p __comp. Permutes the current
2997 * sequence into the next one of this set. Returns true if there are more
2998 * sequences to generate. If the sequence is the largest of the set, the
2999 * smallest is generated and false returned.
3000 */
3001 template<typename _BidirectionalIterator, typename _Compare>
3002 _GLIBCXX20_CONSTEXPR
3003 inline bool
3004 next_permutation(_BidirectionalIterator __first,
3005 _BidirectionalIterator __last, _Compare __comp)
3006 {
3007 // concept requirements
3008 __glibcxx_function_requires(_BidirectionalIteratorConcept<
3009 _BidirectionalIterator>)
3010 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3011 typename iterator_traits<_BidirectionalIterator>::value_type,
3012 typename iterator_traits<_BidirectionalIterator>::value_type>)
3013 __glibcxx_requires_valid_range(__first, __last);
3014 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3015
3016 return std::__next_permutation
3017 (__first, __last, __gnu_cxx::__ops::__iter_comp_iter(__comp));
3018 }
3019
3020 template<typename _BidirectionalIterator, typename _Compare>
3021 _GLIBCXX20_CONSTEXPR
3022 bool
3023 __prev_permutation(_BidirectionalIterator __first,
3024 _BidirectionalIterator __last, _Compare __comp)
3025 {
3026 if (__first == __last)
3027 return false;
3028 _BidirectionalIterator __i = __first;
3029 ++__i;
3030 if (__i == __last)
3031 return false;
3032 __i = __last;
3033 --__i;
3034
3035 for(;;)
3036 {
3037 _BidirectionalIterator __ii = __i;
3038 --__i;
3039 if (__comp(__ii, __i))
3040 {
3041 _BidirectionalIterator __j = __last;
3042 while (!__comp(--__j, __i))
3043 {}
3044 std::iter_swap(__i, __j);
3045 std::__reverse(__ii, __last,
3046 std::__iterator_category(__first));
3047 return true;
3048 }
3049 if (__i == __first)
3050 {
3051 std::__reverse(__first, __last,
3052 std::__iterator_category(__first));
3053 return false;
3054 }
3055 }
3056 }
3057
3058 /**
3059 * @brief Permute range into the previous @e dictionary ordering.
3060 * @ingroup sorting_algorithms
3061 * @param __first Start of range.
3062 * @param __last End of range.
3063 * @return False if wrapped to last permutation, true otherwise.
3064 *
3065 * Treats all permutations of the range as a set of @e dictionary sorted
3066 * sequences. Permutes the current sequence into the previous one of this
3067 * set. Returns true if there are more sequences to generate. If the
3068 * sequence is the smallest of the set, the largest is generated and false
3069 * returned.
3070 */
3071 template<typename _BidirectionalIterator>
3072 _GLIBCXX20_CONSTEXPR
3073 inline bool
3074 prev_permutation(_BidirectionalIterator __first,
3075 _BidirectionalIterator __last)
3076 {
3077 // concept requirements
3078 __glibcxx_function_requires(_BidirectionalIteratorConcept<
3079 _BidirectionalIterator>)
3080 __glibcxx_function_requires(_LessThanComparableConcept<
3081 typename iterator_traits<_BidirectionalIterator>::value_type>)
3082 __glibcxx_requires_valid_range(__first, __last);
3083 __glibcxx_requires_irreflexive(__first, __last);
3084
3085 return std::__prev_permutation(__first, __last,
3086 __gnu_cxx::__ops::__iter_less_iter());
3087 }
3088
3089 /**
3090 * @brief Permute range into the previous @e dictionary ordering using
3091 * comparison functor.
3092 * @ingroup sorting_algorithms
3093 * @param __first Start of range.
3094 * @param __last End of range.
3095 * @param __comp A comparison functor.
3096 * @return False if wrapped to last permutation, true otherwise.
3097 *
3098 * Treats all permutations of the range [__first,__last) as a set of
3099 * @e dictionary sorted sequences ordered by @p __comp. Permutes the current
3100 * sequence into the previous one of this set. Returns true if there are
3101 * more sequences to generate. If the sequence is the smallest of the set,
3102 * the largest is generated and false returned.
3103 */
3104 template<typename _BidirectionalIterator, typename _Compare>
3105 _GLIBCXX20_CONSTEXPR
3106 inline bool
3107 prev_permutation(_BidirectionalIterator __first,
3108 _BidirectionalIterator __last, _Compare __comp)
3109 {
3110 // concept requirements
3111 __glibcxx_function_requires(_BidirectionalIteratorConcept<
3112 _BidirectionalIterator>)
3113 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3114 typename iterator_traits<_BidirectionalIterator>::value_type,
3115 typename iterator_traits<_BidirectionalIterator>::value_type>)
3116 __glibcxx_requires_valid_range(__first, __last);
3117 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3118
3119 return std::__prev_permutation(__first, __last,
3120 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3121 }
3122
3123 // replace
3124 // replace_if
3125
3126 template<typename _InputIterator, typename _OutputIterator,
3127 typename _Predicate, typename _Tp>
3128 _GLIBCXX20_CONSTEXPR
3129 _OutputIterator
3130 __replace_copy_if(_InputIterator __first, _InputIterator __last,
3131 _OutputIterator __result,
3132 _Predicate __pred, const _Tp& __new_value)
3133 {
3134 for (; __first != __last; ++__first, (void)++__result)
3135 if (__pred(__first))
3136 *__result = __new_value;
3137 else
3138 *__result = *__first;
3139 return __result;
3140 }
3141
3142 /**
3143 * @brief Copy a sequence, replacing each element of one value with another
3144 * value.
3145 * @param __first An input iterator.
3146 * @param __last An input iterator.
3147 * @param __result An output iterator.
3148 * @param __old_value The value to be replaced.
3149 * @param __new_value The replacement value.
3150 * @return The end of the output sequence, @p result+(last-first).
3151 *
3152 * Copies each element in the input range @p [__first,__last) to the
3153 * output range @p [__result,__result+(__last-__first)) replacing elements
3154 * equal to @p __old_value with @p __new_value.
3155 */
3156 template<typename _InputIterator, typename _OutputIterator, typename _Tp>
3157 _GLIBCXX20_CONSTEXPR
3158 inline _OutputIterator
3159 replace_copy(_InputIterator __first, _InputIterator __last,
3160 _OutputIterator __result,
3161 const _Tp& __old_value, const _Tp& __new_value)
3162 {
3163 // concept requirements
3164 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3165 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
3166 typename iterator_traits<_InputIterator>::value_type>)
3167 __glibcxx_function_requires(_EqualOpConcept<
3168 typename iterator_traits<_InputIterator>::value_type, _Tp>)
3169 __glibcxx_requires_valid_range(__first, __last);
3170
3171 return std::__replace_copy_if(__first, __last, __result,
3172 __gnu_cxx::__ops::__iter_equals_val(__old_value),
3173 __new_value);
3174 }
3175
3176 /**
3177 * @brief Copy a sequence, replacing each value for which a predicate
3178 * returns true with another value.
3179 * @ingroup mutating_algorithms
3180 * @param __first An input iterator.
3181 * @param __last An input iterator.
3182 * @param __result An output iterator.
3183 * @param __pred A predicate.
3184 * @param __new_value The replacement value.
3185 * @return The end of the output sequence, @p __result+(__last-__first).
3186 *
3187 * Copies each element in the range @p [__first,__last) to the range
3188 * @p [__result,__result+(__last-__first)) replacing elements for which
3189 * @p __pred returns true with @p __new_value.
3190 */
3191 template<typename _InputIterator, typename _OutputIterator,
3192 typename _Predicate, typename _Tp>
3193 _GLIBCXX20_CONSTEXPR
3194 inline _OutputIterator
3195 replace_copy_if(_InputIterator __first, _InputIterator __last,
3196 _OutputIterator __result,
3197 _Predicate __pred, const _Tp& __new_value)
3198 {
3199 // concept requirements
3200 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3201 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
3202 typename iterator_traits<_InputIterator>::value_type>)
3203 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
3204 typename iterator_traits<_InputIterator>::value_type>)
3205 __glibcxx_requires_valid_range(__first, __last);
3206
3207 return std::__replace_copy_if(__first, __last, __result,
3208 __gnu_cxx::__ops::__pred_iter(__pred),
3209 __new_value);
3210 }
3211
3212#if __cplusplus201402L >= 201103L
3213 /**
3214 * @brief Determines whether the elements of a sequence are sorted.
3215 * @ingroup sorting_algorithms
3216 * @param __first An iterator.
3217 * @param __last Another iterator.
3218 * @return True if the elements are sorted, false otherwise.
3219 */
3220 template<typename _ForwardIterator>
3221 _GLIBCXX20_CONSTEXPR
3222 inline bool
3223 is_sorted(_ForwardIterator __first, _ForwardIterator __last)
3224 { return std::is_sorted_until(__first, __last) == __last; }
3225
3226 /**
3227 * @brief Determines whether the elements of a sequence are sorted
3228 * according to a comparison functor.
3229 * @ingroup sorting_algorithms
3230 * @param __first An iterator.
3231 * @param __last Another iterator.
3232 * @param __comp A comparison functor.
3233 * @return True if the elements are sorted, false otherwise.
3234 */
3235 template<typename _ForwardIterator, typename _Compare>
3236 _GLIBCXX20_CONSTEXPR
3237 inline bool
3238 is_sorted(_ForwardIterator __first, _ForwardIterator __last,
3239 _Compare __comp)
3240 { return std::is_sorted_until(__first, __last, __comp) == __last; }
3241
3242 template<typename _ForwardIterator, typename _Compare>
3243 _GLIBCXX20_CONSTEXPR
3244 _ForwardIterator
3245 __is_sorted_until(_ForwardIterator __first, _ForwardIterator __last,
3246 _Compare __comp)
3247 {
3248 if (__first == __last)
3249 return __last;
3250
3251 _ForwardIterator __next = __first;
3252 for (++__next; __next != __last; __first = __next, (void)++__next)
3253 if (__comp(__next, __first))
3254 return __next;
3255 return __next;
3256 }
3257
3258 /**
3259 * @brief Determines the end of a sorted sequence.
3260 * @ingroup sorting_algorithms
3261 * @param __first An iterator.
3262 * @param __last Another iterator.
3263 * @return An iterator pointing to the last iterator i in [__first, __last)
3264 * for which the range [__first, i) is sorted.
3265 */
3266 template<typename _ForwardIterator>
3267 _GLIBCXX20_CONSTEXPR
3268 inline _ForwardIterator
3269 is_sorted_until(_ForwardIterator __first, _ForwardIterator __last)
3270 {
3271 // concept requirements
3272 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3273 __glibcxx_function_requires(_LessThanComparableConcept<
3274 typename iterator_traits<_ForwardIterator>::value_type>)
3275 __glibcxx_requires_valid_range(__first, __last);
3276 __glibcxx_requires_irreflexive(__first, __last);
3277
3278 return std::__is_sorted_until(__first, __last,
3279 __gnu_cxx::__ops::__iter_less_iter());
3280 }
3281
3282 /**
3283 * @brief Determines the end of a sorted sequence using comparison functor.
3284 * @ingroup sorting_algorithms
3285 * @param __first An iterator.
3286 * @param __last Another iterator.
3287 * @param __comp A comparison functor.
3288 * @return An iterator pointing to the last iterator i in [__first, __last)
3289 * for which the range [__first, i) is sorted.
3290 */
3291 template<typename _ForwardIterator, typename _Compare>
3292 _GLIBCXX20_CONSTEXPR
3293 inline _ForwardIterator
3294 is_sorted_until(_ForwardIterator __first, _ForwardIterator __last,
3295 _Compare __comp)
3296 {
3297 // concept requirements
3298 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3299 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3300 typename iterator_traits<_ForwardIterator>::value_type,
3301 typename iterator_traits<_ForwardIterator>::value_type>)
3302 __glibcxx_requires_valid_range(__first, __last);
3303 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3304
3305 return std::__is_sorted_until(__first, __last,
3306 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3307 }
3308
3309 /**
3310 * @brief Determines min and max at once as an ordered pair.
3311 * @ingroup sorting_algorithms
3312 * @param __a A thing of arbitrary type.
3313 * @param __b Another thing of arbitrary type.
3314 * @return A pair(__b, __a) if __b is smaller than __a, pair(__a,
3315 * __b) otherwise.
3316 */
3317 template<typename _Tp>
3318 _GLIBCXX14_CONSTEXPRconstexpr
3319 inline pair<const _Tp&, const _Tp&>
3320 minmax(const _Tp& __a, const _Tp& __b)
3321 {
3322 // concept requirements
3323 __glibcxx_function_requires(_LessThanComparableConcept<_Tp>)
3324
3325 return __b < __a ? pair<const _Tp&, const _Tp&>(__b, __a)
3326 : pair<const _Tp&, const _Tp&>(__a, __b);
3327 }
3328
3329 /**
3330 * @brief Determines min and max at once as an ordered pair.
3331 * @ingroup sorting_algorithms
3332 * @param __a A thing of arbitrary type.
3333 * @param __b Another thing of arbitrary type.
3334 * @param __comp A @link comparison_functors comparison functor @endlink.
3335 * @return A pair(__b, __a) if __b is smaller than __a, pair(__a,
3336 * __b) otherwise.
3337 */
3338 template<typename _Tp, typename _Compare>
3339 _GLIBCXX14_CONSTEXPRconstexpr
3340 inline pair<const _Tp&, const _Tp&>
3341 minmax(const _Tp& __a, const _Tp& __b, _Compare __comp)
3342 {
3343 return __comp(__b, __a) ? pair<const _Tp&, const _Tp&>(__b, __a)
3344 : pair<const _Tp&, const _Tp&>(__a, __b);
3345 }
3346
3347 template<typename _ForwardIterator, typename _Compare>
3348 _GLIBCXX14_CONSTEXPRconstexpr
3349 pair<_ForwardIterator, _ForwardIterator>
3350 __minmax_element(_ForwardIterator __first, _ForwardIterator __last,
3351 _Compare __comp)
3352 {
3353 _ForwardIterator __next = __first;
3354 if (__first == __last
3355 || ++__next == __last)
3356 return std::make_pair(__first, __first);
3357
3358 _ForwardIterator __min{}, __max{};
3359 if (__comp(__next, __first))
3360 {
3361 __min = __next;
3362 __max = __first;
3363 }
3364 else
3365 {
3366 __min = __first;
3367 __max = __next;
3368 }
3369
3370 __first = __next;
3371 ++__first;
3372
3373 while (__first != __last)
3374 {
3375 __next = __first;
3376 if (++__next == __last)
3377 {
3378 if (__comp(__first, __min))
3379 __min = __first;
3380 else if (!__comp(__first, __max))
3381 __max = __first;
3382 break;
3383 }
3384
3385 if (__comp(__next, __first))
3386 {
3387 if (__comp(__next, __min))
3388 __min = __next;
3389 if (!__comp(__first, __max))
3390 __max = __first;
3391 }
3392 else
3393 {
3394 if (__comp(__first, __min))
3395 __min = __first;
3396 if (!__comp(__next, __max))
3397 __max = __next;
3398 }
3399
3400 __first = __next;
3401 ++__first;
3402 }
3403
3404 return std::make_pair(__min, __max);
3405 }
3406
3407 /**
3408 * @brief Return a pair of iterators pointing to the minimum and maximum
3409 * elements in a range.
3410 * @ingroup sorting_algorithms
3411 * @param __first Start of range.
3412 * @param __last End of range.
3413 * @return make_pair(m, M), where m is the first iterator i in
3414 * [__first, __last) such that no other element in the range is
3415 * smaller, and where M is the last iterator i in [__first, __last)
3416 * such that no other element in the range is larger.
3417 */
3418 template<typename _ForwardIterator>
3419 _GLIBCXX14_CONSTEXPRconstexpr
3420 inline pair<_ForwardIterator, _ForwardIterator>
3421 minmax_element(_ForwardIterator __first, _ForwardIterator __last)
3422 {
3423 // concept requirements
3424 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3425 __glibcxx_function_requires(_LessThanComparableConcept<
3426 typename iterator_traits<_ForwardIterator>::value_type>)
3427 __glibcxx_requires_valid_range(__first, __last);
3428 __glibcxx_requires_irreflexive(__first, __last);
3429
3430 return std::__minmax_element(__first, __last,
3431 __gnu_cxx::__ops::__iter_less_iter());
3432 }
3433
3434 /**
3435 * @brief Return a pair of iterators pointing to the minimum and maximum
3436 * elements in a range.
3437 * @ingroup sorting_algorithms
3438 * @param __first Start of range.
3439 * @param __last End of range.
3440 * @param __comp Comparison functor.
3441 * @return make_pair(m, M), where m is the first iterator i in
3442 * [__first, __last) such that no other element in the range is
3443 * smaller, and where M is the last iterator i in [__first, __last)
3444 * such that no other element in the range is larger.
3445 */
3446 template<typename _ForwardIterator, typename _Compare>
3447 _GLIBCXX14_CONSTEXPRconstexpr
3448 inline pair<_ForwardIterator, _ForwardIterator>
3449 minmax_element(_ForwardIterator __first, _ForwardIterator __last,
3450 _Compare __comp)
3451 {
3452 // concept requirements
3453 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3454 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3455 typename iterator_traits<_ForwardIterator>::value_type,
3456 typename iterator_traits<_ForwardIterator>::value_type>)
3457 __glibcxx_requires_valid_range(__first, __last);
3458 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3459
3460 return std::__minmax_element(__first, __last,
3461 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3462 }
3463
3464 // N2722 + DR 915.
3465 template<typename _Tp>
3466 _GLIBCXX14_CONSTEXPRconstexpr
3467 inline _Tp
3468 min(initializer_list<_Tp> __l)
3469 { return *std::min_element(__l.begin(), __l.end()); }
3470
3471 template<typename _Tp, typename _Compare>
3472 _GLIBCXX14_CONSTEXPRconstexpr
3473 inline _Tp
3474 min(initializer_list<_Tp> __l, _Compare __comp)
3475 { return *std::min_element(__l.begin(), __l.end(), __comp); }
3476
3477 template<typename _Tp>
3478 _GLIBCXX14_CONSTEXPRconstexpr
3479 inline _Tp
3480 max(initializer_list<_Tp> __l)
3481 { return *std::max_element(__l.begin(), __l.end()); }
3482
3483 template<typename _Tp, typename _Compare>
3484 _GLIBCXX14_CONSTEXPRconstexpr
3485 inline _Tp
3486 max(initializer_list<_Tp> __l, _Compare __comp)
3487 { return *std::max_element(__l.begin(), __l.end(), __comp); }
3488
3489 template<typename _Tp>
3490 _GLIBCXX14_CONSTEXPRconstexpr
3491 inline pair<_Tp, _Tp>
3492 minmax(initializer_list<_Tp> __l)
3493 {
3494 pair<const _Tp*, const _Tp*> __p =
3495 std::minmax_element(__l.begin(), __l.end());
3496 return std::make_pair(*__p.first, *__p.second);
3497 }
3498
3499 template<typename _Tp, typename _Compare>
3500 _GLIBCXX14_CONSTEXPRconstexpr
3501 inline pair<_Tp, _Tp>
3502 minmax(initializer_list<_Tp> __l, _Compare __comp)
3503 {
3504 pair<const _Tp*, const _Tp*> __p =
3505 std::minmax_element(__l.begin(), __l.end(), __comp);
3506 return std::make_pair(*__p.first, *__p.second);
3507 }
3508
3509 /**
3510 * @brief Checks whether a permutation of the second sequence is equal
3511 * to the first sequence.
3512 * @ingroup non_mutating_algorithms
3513 * @param __first1 Start of first range.
3514 * @param __last1 End of first range.
3515 * @param __first2 Start of second range.
3516 * @param __pred A binary predicate.
3517 * @return true if there exists a permutation of the elements in
3518 * the range [__first2, __first2 + (__last1 - __first1)),
3519 * beginning with ForwardIterator2 begin, such that
3520 * equal(__first1, __last1, __begin, __pred) returns true;
3521 * otherwise, returns false.
3522 */
3523 template<typename _ForwardIterator1, typename _ForwardIterator2,
3524 typename _BinaryPredicate>
3525 _GLIBCXX20_CONSTEXPR
3526 inline bool
3527 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3528 _ForwardIterator2 __first2, _BinaryPredicate __pred)
3529 {
3530 // concept requirements
3531 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
3532 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
3533 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
3534 typename iterator_traits<_ForwardIterator1>::value_type,
3535 typename iterator_traits<_ForwardIterator2>::value_type>)
3536 __glibcxx_requires_valid_range(__first1, __last1);
3537
3538 return std::__is_permutation(__first1, __last1, __first2,
3539 __gnu_cxx::__ops::__iter_comp_iter(__pred));
3540 }
3541
3542#if __cplusplus201402L > 201103L
3543 template<typename _ForwardIterator1, typename _ForwardIterator2,
3544 typename _BinaryPredicate>
3545 _GLIBCXX20_CONSTEXPR
3546 bool
3547 __is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3548 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
3549 _BinaryPredicate __pred)
3550 {
3551 using _Cat1
3552 = typename iterator_traits<_ForwardIterator1>::iterator_category;
3553 using _Cat2
3554 = typename iterator_traits<_ForwardIterator2>::iterator_category;
3555 using _It1_is_RA = is_same<_Cat1, random_access_iterator_tag>;
3556 using _It2_is_RA = is_same<_Cat2, random_access_iterator_tag>;
3557 constexpr bool __ra_iters = _It1_is_RA() && _It2_is_RA();
3558 if (__ra_iters)
3559 {
3560 auto __d1 = std::distance(__first1, __last1);
3561 auto __d2 = std::distance(__first2, __last2);
3562 if (__d1 != __d2)
3563 return false;
3564 }
3565
3566 // Efficiently compare identical prefixes: O(N) if sequences
3567 // have the same elements in the same order.
3568 for (; __first1 != __last1 && __first2 != __last2;
3569 ++__first1, (void)++__first2)
3570 if (!__pred(__first1, __first2))
3571 break;
3572
3573 if (__ra_iters)
3574 {
3575 if (__first1 == __last1)
3576 return true;
3577 }
3578 else
3579 {
3580 auto __d1 = std::distance(__first1, __last1);
3581 auto __d2 = std::distance(__first2, __last2);
3582 if (__d1 == 0 && __d2 == 0)
3583 return true;
3584 if (__d1 != __d2)
3585 return false;
3586 }
3587
3588 for (_ForwardIterator1 __scan = __first1; __scan != __last1; ++__scan)
3589 {
3590 if (__scan != std::__find_if(__first1, __scan,
3591 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan)))
3592 continue; // We've seen this one before.
3593
3594 auto __matches = std::__count_if(__first2, __last2,
3595 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan));
3596 if (0 == __matches
3597 || std::__count_if(__scan, __last1,
3598 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan))
3599 != __matches)
3600 return false;
3601 }
3602 return true;
3603 }
3604
3605 /**
3606 * @brief Checks whether a permutaion of the second sequence is equal
3607 * to the first sequence.
3608 * @ingroup non_mutating_algorithms
3609 * @param __first1 Start of first range.
3610 * @param __last1 End of first range.
3611 * @param __first2 Start of second range.
3612 * @param __last2 End of first range.
3613 * @return true if there exists a permutation of the elements in the range
3614 * [__first2, __last2), beginning with ForwardIterator2 begin,
3615 * such that equal(__first1, __last1, begin) returns true;
3616 * otherwise, returns false.
3617 */
3618 template<typename _ForwardIterator1, typename _ForwardIterator2>
3619 _GLIBCXX20_CONSTEXPR
3620 inline bool
3621 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3622 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
3623 {
3624 __glibcxx_requires_valid_range(__first1, __last1);
3625 __glibcxx_requires_valid_range(__first2, __last2);
3626
3627 return
3628 std::__is_permutation(__first1, __last1, __first2, __last2,
3629 __gnu_cxx::__ops::__iter_equal_to_iter());
3630 }
3631
3632 /**
3633 * @brief Checks whether a permutation of the second sequence is equal
3634 * to the first sequence.
3635 * @ingroup non_mutating_algorithms
3636 * @param __first1 Start of first range.
3637 * @param __last1 End of first range.
3638 * @param __first2 Start of second range.
3639 * @param __last2 End of first range.
3640 * @param __pred A binary predicate.
3641 * @return true if there exists a permutation of the elements in the range
3642 * [__first2, __last2), beginning with ForwardIterator2 begin,
3643 * such that equal(__first1, __last1, __begin, __pred) returns true;
3644 * otherwise, returns false.
3645 */
3646 template<typename _ForwardIterator1, typename _ForwardIterator2,
3647 typename _BinaryPredicate>
3648 _GLIBCXX20_CONSTEXPR
3649 inline bool
3650 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3651 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
3652 _BinaryPredicate __pred)
3653 {
3654 __glibcxx_requires_valid_range(__first1, __last1);
3655 __glibcxx_requires_valid_range(__first2, __last2);
3656
3657 return std::__is_permutation(__first1, __last1, __first2, __last2,
3658 __gnu_cxx::__ops::__iter_comp_iter(__pred));
3659 }
3660
3661#if __cplusplus201402L > 201402L
3662
3663#define __cpp_lib_clamp 201603
3664
3665 /**
3666 * @brief Returns the value clamped between lo and hi.
3667 * @ingroup sorting_algorithms
3668 * @param __val A value of arbitrary type.
3669 * @param __lo A lower limit of arbitrary type.
3670 * @param __hi An upper limit of arbitrary type.
3671 * @return max(__val, __lo) if __val < __hi or min(__val, __hi) otherwise.
3672 */
3673 template<typename _Tp>
3674 constexpr const _Tp&
3675 clamp(const _Tp& __val, const _Tp& __lo, const _Tp& __hi)
3676 {
3677 __glibcxx_assert(!(__hi < __lo));
3678 return (__val < __lo) ? __lo : (__hi < __val) ? __hi : __val;
3679 }
3680
3681 /**
3682 * @brief Returns the value clamped between lo and hi.
3683 * @ingroup sorting_algorithms
3684 * @param __val A value of arbitrary type.
3685 * @param __lo A lower limit of arbitrary type.
3686 * @param __hi An upper limit of arbitrary type.
3687 * @param __comp A comparison functor.
3688 * @return max(__val, __lo, __comp) if __comp(__val, __hi)
3689 * or min(__val, __hi, __comp) otherwise.
3690 */
3691 template<typename _Tp, typename _Compare>
3692 constexpr const _Tp&
3693 clamp(const _Tp& __val, const _Tp& __lo, const _Tp& __hi, _Compare __comp)
3694 {
3695 __glibcxx_assert(!__comp(__hi, __lo));
3696 return __comp(__val, __lo) ? __lo : __comp(__hi, __val) ? __hi : __val;
3697 }
3698#endif // C++17
3699#endif // C++14
3700
3701#ifdef _GLIBCXX_USE_C99_STDINT_TR11
3702 /**
3703 * @brief Generate two uniformly distributed integers using a
3704 * single distribution invocation.
3705 * @param __b0 The upper bound for the first integer.
3706 * @param __b1 The upper bound for the second integer.
3707 * @param __g A UniformRandomBitGenerator.
3708 * @return A pair (i, j) with i and j uniformly distributed
3709 * over [0, __b0) and [0, __b1), respectively.
3710 *
3711 * Requires: __b0 * __b1 <= __g.max() - __g.min().
3712 *
3713 * Using uniform_int_distribution with a range that is very
3714 * small relative to the range of the generator ends up wasting
3715 * potentially expensively generated randomness, since
3716 * uniform_int_distribution does not store leftover randomness
3717 * between invocations.
3718 *
3719 * If we know we want two integers in ranges that are sufficiently
3720 * small, we can compose the ranges, use a single distribution
3721 * invocation, and significantly reduce the waste.
3722 */
3723 template<typename _IntType, typename _UniformRandomBitGenerator>
3724 pair<_IntType, _IntType>
3725 __gen_two_uniform_ints(_IntType __b0, _IntType __b1,
3726 _UniformRandomBitGenerator&& __g)
3727 {
3728 _IntType __x
3729 = uniform_int_distribution<_IntType>{0, (__b0 * __b1) - 1}(__g);
3730 return std::make_pair(__x / __b1, __x % __b1);
3731 }
3732
3733 /**
3734 * @brief Shuffle the elements of a sequence using a uniform random
3735 * number generator.
3736 * @ingroup mutating_algorithms
3737 * @param __first A forward iterator.
3738 * @param __last A forward iterator.
3739 * @param __g A UniformRandomNumberGenerator (26.5.1.3).
3740 * @return Nothing.
3741 *
3742 * Reorders the elements in the range @p [__first,__last) using @p __g to
3743 * provide random numbers.
3744 */
3745 template<typename _RandomAccessIterator,
3746 typename _UniformRandomNumberGenerator>
3747 void
3748 shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last,
3749 _UniformRandomNumberGenerator&& __g)
3750 {
3751 // concept requirements
3752 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
3753 _RandomAccessIterator>)
3754 __glibcxx_requires_valid_range(__first, __last);
3755
3756 if (__first == __last)
3757 return;
3758
3759 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
3760 _DistanceType;
3761
3762 typedef typename std::make_unsigned<_DistanceType>::type __ud_type;
3763 typedef typename std::uniform_int_distribution<__ud_type> __distr_type;
3764 typedef typename __distr_type::param_type __p_type;
3765
3766 typedef typename remove_reference<_UniformRandomNumberGenerator>::type
3767 _Gen;
3768 typedef typename common_type<typename _Gen::result_type, __ud_type>::type
3769 __uc_type;
3770
3771 const __uc_type __urngrange = __g.max() - __g.min();
3772 const __uc_type __urange = __uc_type(__last - __first);
3773
3774 if (__urngrange / __urange >= __urange)
3775 // I.e. (__urngrange >= __urange * __urange) but without wrap issues.
3776 {
3777 _RandomAccessIterator __i = __first + 1;
3778
3779 // Since we know the range isn't empty, an even number of elements
3780 // means an uneven number of elements /to swap/, in which case we
3781 // do the first one up front:
3782
3783 if ((__urange % 2) == 0)
3784 {
3785 __distr_type __d{0, 1};
3786 std::iter_swap(__i++, __first + __d(__g));
3787 }
3788
3789 // Now we know that __last - __i is even, so we do the rest in pairs,
3790 // using a single distribution invocation to produce swap positions
3791 // for two successive elements at a time:
3792
3793 while (__i != __last)
3794 {
3795 const __uc_type __swap_range = __uc_type(__i - __first) + 1;
3796
3797 const pair<__uc_type, __uc_type> __pospos =
3798 __gen_two_uniform_ints(__swap_range, __swap_range + 1, __g);
3799
3800 std::iter_swap(__i++, __first + __pospos.first);
3801 std::iter_swap(__i++, __first + __pospos.second);
3802 }
3803
3804 return;
3805 }
3806
3807 __distr_type __d;
3808
3809 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
3810 std::iter_swap(__i, __first + __d(__g, __p_type(0, __i - __first)));
3811 }
3812#endif
3813
3814#endif // C++11
3815
3816_GLIBCXX_BEGIN_NAMESPACE_ALGO
3817
3818 /**
3819 * @brief Apply a function to every element of a sequence.
3820 * @ingroup non_mutating_algorithms
3821 * @param __first An input iterator.
3822 * @param __last An input iterator.
3823 * @param __f A unary function object.
3824 * @return @p __f
3825 *
3826 * Applies the function object @p __f to each element in the range
3827 * @p [first,last). @p __f must not modify the order of the sequence.
3828 * If @p __f has a return value it is ignored.
3829 */
3830 template<typename _InputIterator, typename _Function>
3831 _GLIBCXX20_CONSTEXPR
3832 _Function
3833 for_each(_InputIterator __first, _InputIterator __last, _Function __f)
3834 {
3835 // concept requirements
3836 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3837 __glibcxx_requires_valid_range(__first, __last);
3838 for (; __first != __last; ++__first)
3839 __f(*__first);
3840 return __f; // N.B. [alg.foreach] says std::move(f) but it's redundant.
3841 }
3842
3843#if __cplusplus201402L >= 201703L
3844 /**
3845 * @brief Apply a function to every element of a sequence.
3846 * @ingroup non_mutating_algorithms
3847 * @param __first An input iterator.
3848 * @param __n A value convertible to an integer.
3849 * @param __f A unary function object.
3850 * @return `__first+__n`
3851 *
3852 * Applies the function object `__f` to each element in the range
3853 * `[first, first+n)`. `__f` must not modify the order of the sequence.
3854 * If `__f` has a return value it is ignored.
3855 */
3856 template<typename _InputIterator, typename _Size, typename _Function>
3857 _GLIBCXX20_CONSTEXPR
3858 _InputIterator
3859 for_each_n(_InputIterator __first, _Size __n, _Function __f)
3860 {
3861 auto __n2 = std::__size_to_integer(__n);
3862 using _Cat = typename iterator_traits<_InputIterator>::iterator_category;
3863 if constexpr (is_base_of_v<random_access_iterator_tag, _Cat>)
3864 {
3865 if (__n2 <= 0)
3866 return __first;
3867 auto __last = __first + __n2;
3868 std::for_each(__first, __last, std::move(__f));
3869 return __last;
3870 }
3871 else
3872 {
3873 while (__n2-->0)
3874 {
3875 __f(*__first);
3876 ++__first;
3877 }
3878 return __first;
3879 }
3880 }
3881#endif // C++17
3882
3883 /**
3884 * @brief Find the first occurrence of a value in a sequence.
3885 * @ingroup non_mutating_algorithms
3886 * @param __first An input iterator.
3887 * @param __last An input iterator.
3888 * @param __val The value to find.
3889 * @return The first iterator @c i in the range @p [__first,__last)
3890 * such that @c *i == @p __val, or @p __last if no such iterator exists.
3891 */
3892 template<typename _InputIterator, typename _Tp>
3893 _GLIBCXX20_CONSTEXPR
3894 inline _InputIterator
3895 find(_InputIterator __first, _InputIterator __last,
3896 const _Tp& __val)
3897 {
3898 // concept requirements
3899 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3900 __glibcxx_function_requires(_EqualOpConcept<
3901 typename iterator_traits<_InputIterator>::value_type, _Tp>)
3902 __glibcxx_requires_valid_range(__first, __last);
3903 return std::__find_if(__first, __last,
3904 __gnu_cxx::__ops::__iter_equals_val(__val));
3905 }
3906
3907 /**
3908 * @brief Find the first element in a sequence for which a
3909 * predicate is true.
3910 * @ingroup non_mutating_algorithms
3911 * @param __first An input iterator.
3912 * @param __last An input iterator.
3913 * @param __pred A predicate.
3914 * @return The first iterator @c i in the range @p [__first,__last)
3915 * such that @p __pred(*i) is true, or @p __last if no such iterator exists.
3916 */
3917 template<typename _InputIterator, typename _Predicate>
3918 _GLIBCXX20_CONSTEXPR
3919 inline _InputIterator
3920 find_if(_InputIterator __first, _InputIterator __last,
3921 _Predicate __pred)
3922 {
3923 // concept requirements
3924 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3925 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
3926 typename iterator_traits<_InputIterator>::value_type>)
3927 __glibcxx_requires_valid_range(__first, __last);
3928
3929 return std::__find_if(__first, __last,
3930 __gnu_cxx::__ops::__pred_iter(__pred));
3931 }
3932
3933 /**
3934 * @brief Find element from a set in a sequence.
3935 * @ingroup non_mutating_algorithms
3936 * @param __first1 Start of range to search.
3937 * @param __last1 End of range to search.
3938 * @param __first2 Start of match candidates.
3939 * @param __last2 End of match candidates.
3940 * @return The first iterator @c i in the range
3941 * @p [__first1,__last1) such that @c *i == @p *(i2) such that i2 is an
3942 * iterator in [__first2,__last2), or @p __last1 if no such iterator exists.
3943 *
3944 * Searches the range @p [__first1,__last1) for an element that is
3945 * equal to some element in the range [__first2,__last2). If
3946 * found, returns an iterator in the range [__first1,__last1),
3947 * otherwise returns @p __last1.
3948 */
3949 template<typename _InputIterator, typename _ForwardIterator>
3950 _GLIBCXX20_CONSTEXPR
3951 _InputIterator
3952 find_first_of(_InputIterator __first1, _InputIterator __last1,
3953 _ForwardIterator __first2, _ForwardIterator __last2)
3954 {
3955 // concept requirements
3956 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3957 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3958 __glibcxx_function_requires(_EqualOpConcept<
3959 typename iterator_traits<_InputIterator>::value_type,
3960 typename iterator_traits<_ForwardIterator>::value_type>)
3961 __glibcxx_requires_valid_range(__first1, __last1);
3962 __glibcxx_requires_valid_range(__first2, __last2);
3963
3964 for (; __first1 != __last1; ++__first1)
3965 for (_ForwardIterator __iter = __first2; __iter != __last2; ++__iter)
3966 if (*__first1 == *__iter)
3967 return __first1;
3968 return __last1;
3969 }
3970
3971 /**
3972 * @brief Find element from a set in a sequence using a predicate.
3973 * @ingroup non_mutating_algorithms
3974 * @param __first1 Start of range to search.
3975 * @param __last1 End of range to search.
3976 * @param __first2 Start of match candidates.
3977 * @param __last2 End of match candidates.
3978 * @param __comp Predicate to use.
3979 * @return The first iterator @c i in the range
3980 * @p [__first1,__last1) such that @c comp(*i, @p *(i2)) is true
3981 * and i2 is an iterator in [__first2,__last2), or @p __last1 if no
3982 * such iterator exists.
3983 *
3984
3985 * Searches the range @p [__first1,__last1) for an element that is
3986 * equal to some element in the range [__first2,__last2). If
3987 * found, returns an iterator in the range [__first1,__last1),
3988 * otherwise returns @p __last1.
3989 */
3990 template<typename _InputIterator, typename _ForwardIterator,
3991 typename _BinaryPredicate>
3992 _GLIBCXX20_CONSTEXPR
3993 _InputIterator
3994 find_first_of(_InputIterator __first1, _InputIterator __last1,
3995 _ForwardIterator __first2, _ForwardIterator __last2,
3996 _BinaryPredicate __comp)
3997 {
3998 // concept requirements
3999 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4000 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4001 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4002 typename iterator_traits<_InputIterator>::value_type,
4003 typename iterator_traits<_ForwardIterator>::value_type>)
4004 __glibcxx_requires_valid_range(__first1, __last1);
4005 __glibcxx_requires_valid_range(__first2, __last2);
4006
4007 for (; __first1 != __last1; ++__first1)
4008 for (_ForwardIterator __iter = __first2; __iter != __last2; ++__iter)
4009 if (__comp(*__first1, *__iter))
4010 return __first1;
4011 return __last1;
4012 }
4013
4014 /**
4015 * @brief Find two adjacent values in a sequence that are equal.
4016 * @ingroup non_mutating_algorithms
4017 * @param __first A forward iterator.
4018 * @param __last A forward iterator.
4019 * @return The first iterator @c i such that @c i and @c i+1 are both
4020 * valid iterators in @p [__first,__last) and such that @c *i == @c *(i+1),
4021 * or @p __last if no such iterator exists.
4022 */
4023 template<typename _ForwardIterator>
4024 _GLIBCXX20_CONSTEXPR
4025 inline _ForwardIterator
4026 adjacent_find(_ForwardIterator __first, _ForwardIterator __last)
4027 {
4028 // concept requirements
4029 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4030 __glibcxx_function_requires(_EqualityComparableConcept<
4031 typename iterator_traits<_ForwardIterator>::value_type>)
4032 __glibcxx_requires_valid_range(__first, __last);
4033
4034 return std::__adjacent_find(__first, __last,
4035 __gnu_cxx::__ops::__iter_equal_to_iter());
4036 }
4037
4038 /**
4039 * @brief Find two adjacent values in a sequence using a predicate.
4040 * @ingroup non_mutating_algorithms
4041 * @param __first A forward iterator.
4042 * @param __last A forward iterator.
4043 * @param __binary_pred A binary predicate.
4044 * @return The first iterator @c i such that @c i and @c i+1 are both
4045 * valid iterators in @p [__first,__last) and such that
4046 * @p __binary_pred(*i,*(i+1)) is true, or @p __last if no such iterator
4047 * exists.
4048 */
4049 template<typename _ForwardIterator, typename _BinaryPredicate>
4050 _GLIBCXX20_CONSTEXPR
4051 inline _ForwardIterator
4052 adjacent_find(_ForwardIterator __first, _ForwardIterator __last,
4053 _BinaryPredicate __binary_pred)
4054 {
4055 // concept requirements
4056 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4057 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4058 typename iterator_traits<_ForwardIterator>::value_type,
4059 typename iterator_traits<_ForwardIterator>::value_type>)
4060 __glibcxx_requires_valid_range(__first, __last);
4061
4062 return std::__adjacent_find(__first, __last,
4063 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred));
4064 }
4065
4066 /**
4067 * @brief Count the number of copies of a value in a sequence.
4068 * @ingroup non_mutating_algorithms
4069 * @param __first An input iterator.
4070 * @param __last An input iterator.
4071 * @param __value The value to be counted.
4072 * @return The number of iterators @c i in the range @p [__first,__last)
4073 * for which @c *i == @p __value
4074 */
4075 template<typename _InputIterator, typename _Tp>
4076 _GLIBCXX20_CONSTEXPR
4077 inline typename iterator_traits<_InputIterator>::difference_type
4078 count(_InputIterator __first, _InputIterator __last, const _Tp& __value)
4079 {
4080 // concept requirements
4081 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4082 __glibcxx_function_requires(_EqualOpConcept<
4083 typename iterator_traits<_InputIterator>::value_type, _Tp>)
4084 __glibcxx_requires_valid_range(__first, __last);
4085
4086 return std::__count_if(__first, __last,
4087 __gnu_cxx::__ops::__iter_equals_val(__value));
4088 }
4089
4090 /**
4091 * @brief Count the elements of a sequence for which a predicate is true.
4092 * @ingroup non_mutating_algorithms
4093 * @param __first An input iterator.
4094 * @param __last An input iterator.
4095 * @param __pred A predicate.
4096 * @return The number of iterators @c i in the range @p [__first,__last)
4097 * for which @p __pred(*i) is true.
4098 */
4099 template<typename _InputIterator, typename _Predicate>
4100 _GLIBCXX20_CONSTEXPR
4101 inline typename iterator_traits<_InputIterator>::difference_type
4102 count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred)
4103 {
4104 // concept requirements
4105 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4106 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
4107 typename iterator_traits<_InputIterator>::value_type>)
4108 __glibcxx_requires_valid_range(__first, __last);
4109
4110 return std::__count_if(__first, __last,
4111 __gnu_cxx::__ops::__pred_iter(__pred));
4112 }
4113
4114 /**
4115 * @brief Search a sequence for a matching sub-sequence.
4116 * @ingroup non_mutating_algorithms
4117 * @param __first1 A forward iterator.
4118 * @param __last1 A forward iterator.
4119 * @param __first2 A forward iterator.
4120 * @param __last2 A forward iterator.
4121 * @return The first iterator @c i in the range @p
4122 * [__first1,__last1-(__last2-__first2)) such that @c *(i+N) == @p
4123 * *(__first2+N) for each @c N in the range @p
4124 * [0,__last2-__first2), or @p __last1 if no such iterator exists.
4125 *
4126 * Searches the range @p [__first1,__last1) for a sub-sequence that
4127 * compares equal value-by-value with the sequence given by @p
4128 * [__first2,__last2) and returns an iterator to the first element
4129 * of the sub-sequence, or @p __last1 if the sub-sequence is not
4130 * found.
4131 *
4132 * Because the sub-sequence must lie completely within the range @p
4133 * [__first1,__last1) it must start at a position less than @p
4134 * __last1-(__last2-__first2) where @p __last2-__first2 is the
4135 * length of the sub-sequence.
4136 *
4137 * This means that the returned iterator @c i will be in the range
4138 * @p [__first1,__last1-(__last2-__first2))
4139 */
4140 template<typename _ForwardIterator1, typename _ForwardIterator2>
4141 _GLIBCXX20_CONSTEXPR
4142 inline _ForwardIterator1
4143 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
4144 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
4145 {
4146 // concept requirements
4147 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
4148 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
4149 __glibcxx_function_requires(_EqualOpConcept<
4150 typename iterator_traits<_ForwardIterator1>::value_type,
4151 typename iterator_traits<_ForwardIterator2>::value_type>)
4152 __glibcxx_requires_valid_range(__first1, __last1);
4153 __glibcxx_requires_valid_range(__first2, __last2);
4154
4155 return std::__search(__first1, __last1, __first2, __last2,
4156 __gnu_cxx::__ops::__iter_equal_to_iter());
4157 }
4158
4159 /**
4160 * @brief Search a sequence for a matching sub-sequence using a predicate.
4161 * @ingroup non_mutating_algorithms
4162 * @param __first1 A forward iterator.
4163 * @param __last1 A forward iterator.
4164 * @param __first2 A forward iterator.
4165 * @param __last2 A forward iterator.
4166 * @param __predicate A binary predicate.
4167 * @return The first iterator @c i in the range
4168 * @p [__first1,__last1-(__last2-__first2)) such that
4169 * @p __predicate(*(i+N),*(__first2+N)) is true for each @c N in the range
4170 * @p [0,__last2-__first2), or @p __last1 if no such iterator exists.
4171 *
4172 * Searches the range @p [__first1,__last1) for a sub-sequence that
4173 * compares equal value-by-value with the sequence given by @p
4174 * [__first2,__last2), using @p __predicate to determine equality,
4175 * and returns an iterator to the first element of the
4176 * sub-sequence, or @p __last1 if no such iterator exists.
4177 *
4178 * @see search(_ForwardIter1, _ForwardIter1, _ForwardIter2, _ForwardIter2)
4179 */
4180 template<typename _ForwardIterator1, typename _ForwardIterator2,
4181 typename _BinaryPredicate>
4182 _GLIBCXX20_CONSTEXPR
4183 inline _ForwardIterator1
4184 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
4185 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
4186 _BinaryPredicate __predicate)
4187 {
4188 // concept requirements
4189 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
4190 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
4191 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4192 typename iterator_traits<_ForwardIterator1>::value_type,
4193 typename iterator_traits<_ForwardIterator2>::value_type>)
4194 __glibcxx_requires_valid_range(__first1, __last1);
4195 __glibcxx_requires_valid_range(__first2, __last2);
4196
4197 return std::__search(__first1, __last1, __first2, __last2,
4198 __gnu_cxx::__ops::__iter_comp_iter(__predicate));
4199 }
4200
4201 /**
4202 * @brief Search a sequence for a number of consecutive values.
4203 * @ingroup non_mutating_algorithms
4204 * @param __first A forward iterator.
4205 * @param __last A forward iterator.
4206 * @param __count The number of consecutive values.
4207 * @param __val The value to find.
4208 * @return The first iterator @c i in the range @p
4209 * [__first,__last-__count) such that @c *(i+N) == @p __val for
4210 * each @c N in the range @p [0,__count), or @p __last if no such
4211 * iterator exists.
4212 *
4213 * Searches the range @p [__first,__last) for @p count consecutive elements
4214 * equal to @p __val.
4215 */
4216 template<typename _ForwardIterator, typename _Integer, typename _Tp>
4217 _GLIBCXX20_CONSTEXPR
4218 inline _ForwardIterator
4219 search_n(_ForwardIterator __first, _ForwardIterator __last,
4220 _Integer __count, const _Tp& __val)
4221 {
4222 // concept requirements
4223 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4224 __glibcxx_function_requires(_EqualOpConcept<
4225 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4226 __glibcxx_requires_valid_range(__first, __last);
4227
4228 return std::__search_n(__first, __last, __count,
4229 __gnu_cxx::__ops::__iter_equals_val(__val));
4230 }
4231
4232
4233 /**
4234 * @brief Search a sequence for a number of consecutive values using a
4235 * predicate.
4236 * @ingroup non_mutating_algorithms
4237 * @param __first A forward iterator.
4238 * @param __last A forward iterator.
4239 * @param __count The number of consecutive values.
4240 * @param __val The value to find.
4241 * @param __binary_pred A binary predicate.
4242 * @return The first iterator @c i in the range @p
4243 * [__first,__last-__count) such that @p
4244 * __binary_pred(*(i+N),__val) is true for each @c N in the range
4245 * @p [0,__count), or @p __last if no such iterator exists.
4246 *
4247 * Searches the range @p [__first,__last) for @p __count
4248 * consecutive elements for which the predicate returns true.
4249 */
4250 template<typename _ForwardIterator, typename _Integer, typename _Tp,
4251 typename _BinaryPredicate>
4252 _GLIBCXX20_CONSTEXPR
4253 inline _ForwardIterator
4254 search_n(_ForwardIterator __first, _ForwardIterator __last,
4255 _Integer __count, const _Tp& __val,
4256 _BinaryPredicate __binary_pred)
4257 {
4258 // concept requirements
4259 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4260 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4261 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4262 __glibcxx_requires_valid_range(__first, __last);
4263
4264 return std::__search_n(__first, __last, __count,
4265 __gnu_cxx::__ops::__iter_comp_val(__binary_pred, __val));
4266 }
4267
4268#if __cplusplus201402L > 201402L
4269 /** @brief Search a sequence using a Searcher object.
4270 *
4271 * @param __first A forward iterator.
4272 * @param __last A forward iterator.
4273 * @param __searcher A callable object.
4274 * @return @p __searcher(__first,__last).first
4275 */
4276 template<typename _ForwardIterator, typename _Searcher>
4277 _GLIBCXX20_CONSTEXPR
4278 inline _ForwardIterator
4279 search(_ForwardIterator __first, _ForwardIterator __last,
4280 const _Searcher& __searcher)
4281 { return __searcher(__first, __last).first; }
4282#endif
4283
4284 /**
4285 * @brief Perform an operation on a sequence.
4286 * @ingroup mutating_algorithms
4287 * @param __first An input iterator.
4288 * @param __last An input iterator.
4289 * @param __result An output iterator.
4290 * @param __unary_op A unary operator.
4291 * @return An output iterator equal to @p __result+(__last-__first).
4292 *
4293 * Applies the operator to each element in the input range and assigns
4294 * the results to successive elements of the output sequence.
4295 * Evaluates @p *(__result+N)=unary_op(*(__first+N)) for each @c N in the
4296 * range @p [0,__last-__first).
4297 *
4298 * @p unary_op must not alter its argument.
4299 */
4300 template<typename _InputIterator, typename _OutputIterator,
4301 typename _UnaryOperation>
4302 _GLIBCXX20_CONSTEXPR
4303 _OutputIterator
4304 transform(_InputIterator __first, _InputIterator __last,
4305 _OutputIterator __result, _UnaryOperation __unary_op)
4306 {
4307 // concept requirements
4308 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4309 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4310 // "the type returned by a _UnaryOperation"
4311 __typeof__(__unary_op(*__first))>)
4312 __glibcxx_requires_valid_range(__first, __last);
4313
4314 for (; __first != __last; ++__first, (void)++__result)
4315 *__result = __unary_op(*__first);
4316 return __result;
4317 }
4318
4319 /**
4320 * @brief Perform an operation on corresponding elements of two sequences.
4321 * @ingroup mutating_algorithms
4322 * @param __first1 An input iterator.
4323 * @param __last1 An input iterator.
4324 * @param __first2 An input iterator.
4325 * @param __result An output iterator.
4326 * @param __binary_op A binary operator.
4327 * @return An output iterator equal to @p result+(last-first).
4328 *
4329 * Applies the operator to the corresponding elements in the two
4330 * input ranges and assigns the results to successive elements of the
4331 * output sequence.
4332 * Evaluates @p
4333 * *(__result+N)=__binary_op(*(__first1+N),*(__first2+N)) for each
4334 * @c N in the range @p [0,__last1-__first1).
4335 *
4336 * @p binary_op must not alter either of its arguments.
4337 */
4338 template<typename _InputIterator1, typename _InputIterator2,
4339 typename _OutputIterator, typename _BinaryOperation>
4340 _GLIBCXX20_CONSTEXPR
4341 _OutputIterator
4342 transform(_InputIterator1 __first1, _InputIterator1 __last1,
4343 _InputIterator2 __first2, _OutputIterator __result,
4344 _BinaryOperation __binary_op)
4345 {
4346 // concept requirements
4347 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
4348 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
4349 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4350 // "the type returned by a _BinaryOperation"
4351 __typeof__(__binary_op(*__first1,*__first2))>)
4352 __glibcxx_requires_valid_range(__first1, __last1);
4353
4354 for (; __first1 != __last1; ++__first1, (void)++__first2, ++__result)
4355 *__result = __binary_op(*__first1, *__first2);
4356 return __result;
4357 }
4358
4359 /**
4360 * @brief Replace each occurrence of one value in a sequence with another
4361 * value.
4362 * @ingroup mutating_algorithms
4363 * @param __first A forward iterator.
4364 * @param __last A forward iterator.
4365 * @param __old_value The value to be replaced.
4366 * @param __new_value The replacement value.
4367 * @return replace() returns no value.
4368 *
4369 * For each iterator @c i in the range @p [__first,__last) if @c *i ==
4370 * @p __old_value then the assignment @c *i = @p __new_value is performed.
4371 */
4372 template<typename _ForwardIterator, typename _Tp>
4373 _GLIBCXX20_CONSTEXPR
4374 void
4375 replace(_ForwardIterator __first, _ForwardIterator __last,
4376 const _Tp& __old_value, const _Tp& __new_value)
4377 {
4378 // concept requirements
4379 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4380 _ForwardIterator>)
4381 __glibcxx_function_requires(_EqualOpConcept<
4382 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4383 __glibcxx_function_requires(_ConvertibleConcept<_Tp,
4384 typename iterator_traits<_ForwardIterator>::value_type>)
4385 __glibcxx_requires_valid_range(__first, __last);
4386
4387 for (; __first != __last; ++__first)
4388 if (*__first == __old_value)
4389 *__first = __new_value;
4390 }
4391
4392 /**
4393 * @brief Replace each value in a sequence for which a predicate returns
4394 * true with another value.
4395 * @ingroup mutating_algorithms
4396 * @param __first A forward iterator.
4397 * @param __last A forward iterator.
4398 * @param __pred A predicate.
4399 * @param __new_value The replacement value.
4400 * @return replace_if() returns no value.
4401 *
4402 * For each iterator @c i in the range @p [__first,__last) if @p __pred(*i)
4403 * is true then the assignment @c *i = @p __new_value is performed.
4404 */
4405 template<typename _ForwardIterator, typename _Predicate, typename _Tp>
4406 _GLIBCXX20_CONSTEXPR
4407 void
4408 replace_if(_ForwardIterator __first, _ForwardIterator __last,
4409 _Predicate __pred, const _Tp& __new_value)
4410 {
4411 // concept requirements
4412 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4413 _ForwardIterator>)
4414 __glibcxx_function_requires(_ConvertibleConcept<_Tp,
4415 typename iterator_traits<_ForwardIterator>::value_type>)
4416 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
4417 typename iterator_traits<_ForwardIterator>::value_type>)
4418 __glibcxx_requires_valid_range(__first, __last);
4419
4420 for (; __first != __last; ++__first)
4421 if (__pred(*__first))
4422 *__first = __new_value;
4423 }
4424
4425 /**
4426 * @brief Assign the result of a function object to each value in a
4427 * sequence.
4428 * @ingroup mutating_algorithms
4429 * @param __first A forward iterator.
4430 * @param __last A forward iterator.
4431 * @param __gen A function object taking no arguments and returning
4432 * std::iterator_traits<_ForwardIterator>::value_type
4433 * @return generate() returns no value.
4434 *
4435 * Performs the assignment @c *i = @p __gen() for each @c i in the range
4436 * @p [__first,__last).
4437 */
4438 template<typename _ForwardIterator, typename _Generator>
4439 _GLIBCXX20_CONSTEXPR
4440 void
4441 generate(_ForwardIterator __first, _ForwardIterator __last,
4442 _Generator __gen)
4443 {
4444 // concept requirements
4445 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4446 __glibcxx_function_requires(_GeneratorConcept<_Generator,
4447 typename iterator_traits<_ForwardIterator>::value_type>)
4448 __glibcxx_requires_valid_range(__first, __last);
4449
4450 for (; __first != __last; ++__first)
4451 *__first = __gen();
4452 }
4453
4454 /**
4455 * @brief Assign the result of a function object to each value in a
4456 * sequence.
4457 * @ingroup mutating_algorithms
4458 * @param __first A forward iterator.
4459 * @param __n The length of the sequence.
4460 * @param __gen A function object taking no arguments and returning
4461 * std::iterator_traits<_ForwardIterator>::value_type
4462 * @return The end of the sequence, @p __first+__n
4463 *
4464 * Performs the assignment @c *i = @p __gen() for each @c i in the range
4465 * @p [__first,__first+__n).
4466 *
4467 * If @p __n is negative, the function does nothing and returns @p __first.
4468 */
4469 // _GLIBCXX_RESOLVE_LIB_DEFECTS
4470 // DR 865. More algorithms that throw away information
4471 // DR 426. search_n(), fill_n(), and generate_n() with negative n
4472 template<typename _OutputIterator, typename _Size, typename _Generator>
4473 _GLIBCXX20_CONSTEXPR
4474 _OutputIterator
4475 generate_n(_OutputIterator __first, _Size __n, _Generator __gen)
4476 {
4477 // concept requirements
4478 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4479 // "the type returned by a _Generator"
4480 __typeof__(__gen())>)
4481
4482 typedef __decltype(std::__size_to_integer(__n)) _IntSize;
4483 for (_IntSize __niter = std::__size_to_integer(__n);
4484 __niter > 0; --__niter, (void) ++__first)
4485 *__first = __gen();
4486 return __first;
4487 }
4488
4489 /**
4490 * @brief Copy a sequence, removing consecutive duplicate values.
4491 * @ingroup mutating_algorithms
4492 * @param __first An input iterator.
4493 * @param __last An input iterator.
4494 * @param __result An output iterator.
4495 * @return An iterator designating the end of the resulting sequence.
4496 *
4497 * Copies each element in the range @p [__first,__last) to the range
4498 * beginning at @p __result, except that only the first element is copied
4499 * from groups of consecutive elements that compare equal.
4500 * unique_copy() is stable, so the relative order of elements that are
4501 * copied is unchanged.
4502 *
4503 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4504 * DR 241. Does unique_copy() require CopyConstructible and Assignable?
4505 *
4506 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4507 * DR 538. 241 again: Does unique_copy() require CopyConstructible and
4508 * Assignable?
4509 */
4510 template<typename _InputIterator, typename _OutputIterator>
4511 _GLIBCXX20_CONSTEXPR
4512 inline _OutputIterator
4513 unique_copy(_InputIterator __first, _InputIterator __last,
4514 _OutputIterator __result)
4515 {
4516 // concept requirements
4517 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4518 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4519 typename iterator_traits<_InputIterator>::value_type>)
4520 __glibcxx_function_requires(_EqualityComparableConcept<
4521 typename iterator_traits<_InputIterator>::value_type>)
4522 __glibcxx_requires_valid_range(__first, __last);
4523
4524 if (__first == __last)
4525 return __result;
4526 return std::__unique_copy(__first, __last, __result,
4527 __gnu_cxx::__ops::__iter_equal_to_iter(),
4528 std::__iterator_category(__first),
4529 std::__iterator_category(__result));
4530 }
4531
4532 /**
4533 * @brief Copy a sequence, removing consecutive values using a predicate.
4534 * @ingroup mutating_algorithms
4535 * @param __first An input iterator.
4536 * @param __last An input iterator.
4537 * @param __result An output iterator.
4538 * @param __binary_pred A binary predicate.
4539 * @return An iterator designating the end of the resulting sequence.
4540 *
4541 * Copies each element in the range @p [__first,__last) to the range
4542 * beginning at @p __result, except that only the first element is copied
4543 * from groups of consecutive elements for which @p __binary_pred returns
4544 * true.
4545 * unique_copy() is stable, so the relative order of elements that are
4546 * copied is unchanged.
4547 *
4548 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4549 * DR 241. Does unique_copy() require CopyConstructible and Assignable?
4550 */
4551 template<typename _InputIterator, typename _OutputIterator,
4552 typename _BinaryPredicate>
4553 _GLIBCXX20_CONSTEXPR
4554 inline _OutputIterator
4555 unique_copy(_InputIterator __first, _InputIterator __last,
4556 _OutputIterator __result,
4557 _BinaryPredicate __binary_pred)
4558 {
4559 // concept requirements -- predicates checked later
4560 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4561 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4562 typename iterator_traits<_InputIterator>::value_type>)
4563 __glibcxx_requires_valid_range(__first, __last);
4564
4565 if (__first == __last)
4566 return __result;
4567 return std::__unique_copy(__first, __last, __result,
4568 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred),
4569 std::__iterator_category(__first),
4570 std::__iterator_category(__result));
4571 }
4572
4573#if _GLIBCXX_HOSTED1
4574 /**
4575 * @brief Randomly shuffle the elements of a sequence.
4576 * @ingroup mutating_algorithms
4577 * @param __first A forward iterator.
4578 * @param __last A forward iterator.
4579 * @return Nothing.
4580 *
4581 * Reorder the elements in the range @p [__first,__last) using a random
4582 * distribution, so that every possible ordering of the sequence is
4583 * equally likely.
4584 */
4585 template<typename _RandomAccessIterator>
4586 inline void
4587 random_shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last)
4588 {
4589 // concept requirements
4590 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4591 _RandomAccessIterator>)
4592 __glibcxx_requires_valid_range(__first, __last);
4593
4594 if (__first != __last)
4595 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
4596 {
4597 // XXX rand() % N is not uniformly distributed
4598 _RandomAccessIterator __j = __first
4599 + std::rand() % ((__i - __first) + 1);
4600 if (__i != __j)
4601 std::iter_swap(__i, __j);
4602 }
4603 }
4604#endif
4605
4606 /**
4607 * @brief Shuffle the elements of a sequence using a random number
4608 * generator.
4609 * @ingroup mutating_algorithms
4610 * @param __first A forward iterator.
4611 * @param __last A forward iterator.
4612 * @param __rand The RNG functor or function.
4613 * @return Nothing.
4614 *
4615 * Reorders the elements in the range @p [__first,__last) using @p __rand to
4616 * provide a random distribution. Calling @p __rand(N) for a positive
4617 * integer @p N should return a randomly chosen integer from the
4618 * range [0,N).
4619 */
4620 template<typename _RandomAccessIterator, typename _RandomNumberGenerator>
4621 void
4622 random_shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last,
4623#if __cplusplus201402L >= 201103L
4624 _RandomNumberGenerator&& __rand)
4625#else
4626 _RandomNumberGenerator& __rand)
4627#endif
4628 {
4629 // concept requirements
4630 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4631 _RandomAccessIterator>)
4632 __glibcxx_requires_valid_range(__first, __last);
4633
4634 if (__first == __last)
4635 return;
4636 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
4637 {
4638 _RandomAccessIterator __j = __first + __rand((__i - __first) + 1);
4639 if (__i != __j)
4640 std::iter_swap(__i, __j);
4641 }
4642 }
4643
4644
4645 /**
4646 * @brief Move elements for which a predicate is true to the beginning
4647 * of a sequence.
4648 * @ingroup mutating_algorithms
4649 * @param __first A forward iterator.
4650 * @param __last A forward iterator.
4651 * @param __pred A predicate functor.
4652 * @return An iterator @p middle such that @p __pred(i) is true for each
4653 * iterator @p i in the range @p [__first,middle) and false for each @p i
4654 * in the range @p [middle,__last).
4655 *
4656 * @p __pred must not modify its operand. @p partition() does not preserve
4657 * the relative ordering of elements in each group, use
4658 * @p stable_partition() if this is needed.
4659 */
4660 template<typename _ForwardIterator, typename _Predicate>
4661 _GLIBCXX20_CONSTEXPR
4662 inline _ForwardIterator
4663 partition(_ForwardIterator __first, _ForwardIterator __last,
4664 _Predicate __pred)
4665 {
4666 // concept requirements
4667 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4668 _ForwardIterator>)
4669 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
4670 typename iterator_traits<_ForwardIterator>::value_type>)
4671 __glibcxx_requires_valid_range(__first, __last);
4672
4673 return std::__partition(__first, __last, __pred,
4674 std::__iterator_category(__first));
4675 }
4676
4677
4678 /**
4679 * @brief Sort the smallest elements of a sequence.
4680 * @ingroup sorting_algorithms
4681 * @param __first An iterator.
4682 * @param __middle Another iterator.
4683 * @param __last Another iterator.
4684 * @return Nothing.
4685 *
4686 * Sorts the smallest @p (__middle-__first) elements in the range
4687 * @p [first,last) and moves them to the range @p [__first,__middle). The
4688 * order of the remaining elements in the range @p [__middle,__last) is
4689 * undefined.
4690 * After the sort if @e i and @e j are iterators in the range
4691 * @p [__first,__middle) such that i precedes j and @e k is an iterator in
4692 * the range @p [__middle,__last) then *j<*i and *k<*i are both false.
4693 */
4694 template<typename _RandomAccessIterator>
4695 _GLIBCXX20_CONSTEXPR
4696 inline void
4697 partial_sort(_RandomAccessIterator __first,
4698 _RandomAccessIterator __middle,
4699 _RandomAccessIterator __last)
4700 {
4701 // concept requirements
4702 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4703 _RandomAccessIterator>)
4704 __glibcxx_function_requires(_LessThanComparableConcept<
4705 typename iterator_traits<_RandomAccessIterator>::value_type>)
4706 __glibcxx_requires_valid_range(__first, __middle);
4707 __glibcxx_requires_valid_range(__middle, __last);
4708 __glibcxx_requires_irreflexive(__first, __last);
4709
4710 std::__partial_sort(__first, __middle, __last,
4711 __gnu_cxx::__ops::__iter_less_iter());
4712 }
4713
4714 /**
4715 * @brief Sort the smallest elements of a sequence using a predicate
4716 * for comparison.
4717 * @ingroup sorting_algorithms
4718 * @param __first An iterator.
4719 * @param __middle Another iterator.
4720 * @param __last Another iterator.
4721 * @param __comp A comparison functor.
4722 * @return Nothing.
4723 *
4724 * Sorts the smallest @p (__middle-__first) elements in the range
4725 * @p [__first,__last) and moves them to the range @p [__first,__middle). The
4726 * order of the remaining elements in the range @p [__middle,__last) is
4727 * undefined.
4728 * After the sort if @e i and @e j are iterators in the range
4729 * @p [__first,__middle) such that i precedes j and @e k is an iterator in
4730 * the range @p [__middle,__last) then @p *__comp(j,*i) and @p __comp(*k,*i)
4731 * are both false.
4732 */
4733 template<typename _RandomAccessIterator, typename _Compare>
4734 _GLIBCXX20_CONSTEXPR
4735 inline void
4736 partial_sort(_RandomAccessIterator __first,
4737 _RandomAccessIterator __middle,
4738 _RandomAccessIterator __last,
4739 _Compare __comp)
4740 {
4741 // concept requirements
4742 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4743 _RandomAccessIterator>)
4744 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4745 typename iterator_traits<_RandomAccessIterator>::value_type,
4746 typename iterator_traits<_RandomAccessIterator>::value_type>)
4747 __glibcxx_requires_valid_range(__first, __middle);
4748 __glibcxx_requires_valid_range(__middle, __last);
4749 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4750
4751 std::__partial_sort(__first, __middle, __last,
4752 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4753 }
4754
4755 /**
4756 * @brief Sort a sequence just enough to find a particular position.
4757 * @ingroup sorting_algorithms
4758 * @param __first An iterator.
4759 * @param __nth Another iterator.
4760 * @param __last Another iterator.
4761 * @return Nothing.
4762 *
4763 * Rearranges the elements in the range @p [__first,__last) so that @p *__nth
4764 * is the same element that would have been in that position had the
4765 * whole sequence been sorted. The elements either side of @p *__nth are
4766 * not completely sorted, but for any iterator @e i in the range
4767 * @p [__first,__nth) and any iterator @e j in the range @p [__nth,__last) it
4768 * holds that *j < *i is false.
4769 */
4770 template<typename _RandomAccessIterator>
4771 _GLIBCXX20_CONSTEXPR
4772 inline void
4773 nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth,
4774 _RandomAccessIterator __last)
4775 {
4776 // concept requirements
4777 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4778 _RandomAccessIterator>)
4779 __glibcxx_function_requires(_LessThanComparableConcept<
4780 typename iterator_traits<_RandomAccessIterator>::value_type>)
4781 __glibcxx_requires_valid_range(__first, __nth);
4782 __glibcxx_requires_valid_range(__nth, __last);
4783 __glibcxx_requires_irreflexive(__first, __last);
4784
4785 if (__first == __last || __nth == __last)
4786 return;
4787
4788 std::__introselect(__first, __nth, __last,
4789 std::__lg(__last - __first) * 2,
4790 __gnu_cxx::__ops::__iter_less_iter());
4791 }
4792
4793 /**
4794 * @brief Sort a sequence just enough to find a particular position
4795 * using a predicate for comparison.
4796 * @ingroup sorting_algorithms
4797 * @param __first An iterator.
4798 * @param __nth Another iterator.
4799 * @param __last Another iterator.
4800 * @param __comp A comparison functor.
4801 * @return Nothing.
4802 *
4803 * Rearranges the elements in the range @p [__first,__last) so that @p *__nth
4804 * is the same element that would have been in that position had the
4805 * whole sequence been sorted. The elements either side of @p *__nth are
4806 * not completely sorted, but for any iterator @e i in the range
4807 * @p [__first,__nth) and any iterator @e j in the range @p [__nth,__last) it
4808 * holds that @p __comp(*j,*i) is false.
4809 */
4810 template<typename _RandomAccessIterator, typename _Compare>
4811 _GLIBCXX20_CONSTEXPR
4812 inline void
4813 nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth,
4814 _RandomAccessIterator __last, _Compare __comp)
4815 {
4816 // concept requirements
4817 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4818 _RandomAccessIterator>)
4819 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4820 typename iterator_traits<_RandomAccessIterator>::value_type,
4821 typename iterator_traits<_RandomAccessIterator>::value_type>)
4822 __glibcxx_requires_valid_range(__first, __nth);
4823 __glibcxx_requires_valid_range(__nth, __last);
4824 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4825
4826 if (__first == __last || __nth == __last)
4827 return;
4828
4829 std::__introselect(__first, __nth, __last,
4830 std::__lg(__last - __first) * 2,
4831 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4832 }
4833
4834 /**
4835 * @brief Sort the elements of a sequence.
4836 * @ingroup sorting_algorithms
4837 * @param __first An iterator.
4838 * @param __last Another iterator.
4839 * @return Nothing.
4840 *
4841 * Sorts the elements in the range @p [__first,__last) in ascending order,
4842 * such that for each iterator @e i in the range @p [__first,__last-1),
4843 * *(i+1)<*i is false.
4844 *
4845 * The relative ordering of equivalent elements is not preserved, use
4846 * @p stable_sort() if this is needed.
4847 */
4848 template<typename _RandomAccessIterator>
4849 _GLIBCXX20_CONSTEXPR
4850 inline void
4851 sort(_RandomAccessIterator __first, _RandomAccessIterator __last)
4852 {
4853 // concept requirements
4854 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4855 _RandomAccessIterator>)
4856 __glibcxx_function_requires(_LessThanComparableConcept<
4857 typename iterator_traits<_RandomAccessIterator>::value_type>)
4858 __glibcxx_requires_valid_range(__first, __last);
4859 __glibcxx_requires_irreflexive(__first, __last);
4860
4861 std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
4862 }
4863
4864 /**
4865 * @brief Sort the elements of a sequence using a predicate for comparison.
4866 * @ingroup sorting_algorithms
4867 * @param __first An iterator.
4868 * @param __last Another iterator.
4869 * @param __comp A comparison functor.
4870 * @return Nothing.
4871 *
4872 * Sorts the elements in the range @p [__first,__last) in ascending order,
4873 * such that @p __comp(*(i+1),*i) is false for every iterator @e i in the
4874 * range @p [__first,__last-1).
4875 *
4876 * The relative ordering of equivalent elements is not preserved, use
4877 * @p stable_sort() if this is needed.
4878 */
4879 template<typename _RandomAccessIterator, typename _Compare>
4880 _GLIBCXX20_CONSTEXPR
4881 inline void
4882 sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
4883 _Compare __comp)
4884 {
4885 // concept requirements
4886 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4887 _RandomAccessIterator>)
4888 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4889 typename iterator_traits<_RandomAccessIterator>::value_type,
4890 typename iterator_traits<_RandomAccessIterator>::value_type>)
4891 __glibcxx_requires_valid_range(__first, __last);
4892 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4893
4894 std::__sort(__first, __last, __gnu_cxx::__ops::__iter_comp_iter(__comp));
4895 }
4896
4897 template<typename _InputIterator1, typename _InputIterator2,
4898 typename _OutputIterator, typename _Compare>
4899 _GLIBCXX20_CONSTEXPR
4900 _OutputIterator
4901 __merge(_InputIterator1 __first1, _InputIterator1 __last1,
4902 _InputIterator2 __first2, _InputIterator2 __last2,
4903 _OutputIterator __result, _Compare __comp)
4904 {
4905 while (__first1 != __last1 && __first2 != __last2)
4906 {
4907 if (__comp(__first2, __first1))
4908 {
4909 *__result = *__first2;
4910 ++__first2;
4911 }
4912 else
4913 {
4914 *__result = *__first1;
4915 ++__first1;
4916 }
4917 ++__result;
4918 }
4919 return std::copy(__first2, __last2,
4920 std::copy(__first1, __last1, __result));
4921 }
4922
4923 /**
4924 * @brief Merges two sorted ranges.
4925 * @ingroup sorting_algorithms
4926 * @param __first1 An iterator.
4927 * @param __first2 Another iterator.
4928 * @param __last1 Another iterator.
4929 * @param __last2 Another iterator.
4930 * @param __result An iterator pointing to the end of the merged range.
4931 * @return An output iterator equal to @p __result + (__last1 - __first1)
4932 * + (__last2 - __first2).
4933 *
4934 * Merges the ranges @p [__first1,__last1) and @p [__first2,__last2) into
4935 * the sorted range @p [__result, __result + (__last1-__first1) +
4936 * (__last2-__first2)). Both input ranges must be sorted, and the
4937 * output range must not overlap with either of the input ranges.
4938 * The sort is @e stable, that is, for equivalent elements in the
4939 * two ranges, elements from the first range will always come
4940 * before elements from the second.
4941 */
4942 template<typename _InputIterator1, typename _InputIterator2,
4943 typename _OutputIterator>
4944 _GLIBCXX20_CONSTEXPR
4945 inline _OutputIterator
4946 merge(_InputIterator1 __first1, _InputIterator1 __last1,
4947 _InputIterator2 __first2, _InputIterator2 __last2,
4948 _OutputIterator __result)
4949 {
4950 // concept requirements
4951 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
4952 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
4953 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4954 typename iterator_traits<_InputIterator1>::value_type>)
4955 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4956 typename iterator_traits<_InputIterator2>::value_type>)
4957 __glibcxx_function_requires(_LessThanOpConcept<
4958 typename iterator_traits<_InputIterator2>::value_type,
4959 typename iterator_traits<_InputIterator1>::value_type>)
4960 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
4961 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
4962 __glibcxx_requires_irreflexive2(__first1, __last1);
4963 __glibcxx_requires_irreflexive2(__first2, __last2);
4964
4965 return _GLIBCXX_STD_Astd::__merge(__first1, __last1,
4966 __first2, __last2, __result,
4967 __gnu_cxx::__ops::__iter_less_iter());
4968 }
4969
4970 /**
4971 * @brief Merges two sorted ranges.
4972 * @ingroup sorting_algorithms
4973 * @param __first1 An iterator.
4974 * @param __first2 Another iterator.
4975 * @param __last1 Another iterator.
4976 * @param __last2 Another iterator.
4977 * @param __result An iterator pointing to the end of the merged range.
4978 * @param __comp A functor to use for comparisons.
4979 * @return An output iterator equal to @p __result + (__last1 - __first1)
4980 * + (__last2 - __first2).
4981 *
4982 * Merges the ranges @p [__first1,__last1) and @p [__first2,__last2) into
4983 * the sorted range @p [__result, __result + (__last1-__first1) +
4984 * (__last2-__first2)). Both input ranges must be sorted, and the
4985 * output range must not overlap with either of the input ranges.
4986 * The sort is @e stable, that is, for equivalent elements in the
4987 * two ranges, elements from the first range will always come
4988 * before elements from the second.
4989 *
4990 * The comparison function should have the same effects on ordering as
4991 * the function used for the initial sort.
4992 */
4993 template<typename _InputIterator1, typename _InputIterator2,
4994 typename _OutputIterator, typename _Compare>
4995 _GLIBCXX20_CONSTEXPR
4996 inline _OutputIterator
4997 merge(_InputIterator1 __first1, _InputIterator1 __last1,
4998 _InputIterator2 __first2, _InputIterator2 __last2,
4999 _OutputIterator __result, _Compare __comp)
5000 {
5001 // concept requirements
5002 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5003 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5004 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5005 typename iterator_traits<_InputIterator1>::value_type>)
5006 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5007 typename iterator_traits<_InputIterator2>::value_type>)
5008 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5009 typename iterator_traits<_InputIterator2>::value_type,
5010 typename iterator_traits<_InputIterator1>::value_type>)
5011 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5012 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5013 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5014 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5015
5016 return _GLIBCXX_STD_Astd::__merge(__first1, __last1,
5017 __first2, __last2, __result,
5018 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5019 }
5020
5021 template<typename _RandomAccessIterator, typename _Compare>
5022 inline void
5023 __stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
5024 _Compare __comp)
5025 {
5026 typedef typename iterator_traits<_RandomAccessIterator>::value_type
5027 _ValueType;
5028 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
5029 _DistanceType;
5030
5031 typedef _Temporary_buffer<_RandomAccessIterator, _ValueType> _TmpBuf;
5032 _TmpBuf __buf(__first, std::distance(__first, __last));
5033
5034 if (__buf.begin() == 0)
5035 std::__inplace_stable_sort(__first, __last, __comp);
5036 else
5037 std::__stable_sort_adaptive(__first, __last, __buf.begin(),
5038 _DistanceType(__buf.size()), __comp);
5039 }
5040
5041 /**
5042 * @brief Sort the elements of a sequence, preserving the relative order
5043 * of equivalent elements.
5044 * @ingroup sorting_algorithms
5045 * @param __first An iterator.
5046 * @param __last Another iterator.
5047 * @return Nothing.
5048 *
5049 * Sorts the elements in the range @p [__first,__last) in ascending order,
5050 * such that for each iterator @p i in the range @p [__first,__last-1),
5051 * @p *(i+1)<*i is false.
5052 *
5053 * The relative ordering of equivalent elements is preserved, so any two
5054 * elements @p x and @p y in the range @p [__first,__last) such that
5055 * @p x<y is false and @p y<x is false will have the same relative
5056 * ordering after calling @p stable_sort().
5057 */
5058 template<typename _RandomAccessIterator>
5059 inline void
5060 stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last)
5061 {
5062 // concept requirements
5063 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
5064 _RandomAccessIterator>)
5065 __glibcxx_function_requires(_LessThanComparableConcept<
5066 typename iterator_traits<_RandomAccessIterator>::value_type>)
5067 __glibcxx_requires_valid_range(__first, __last);
5068 __glibcxx_requires_irreflexive(__first, __last);
5069
5070 _GLIBCXX_STD_Astd::__stable_sort(__first, __last,
5071 __gnu_cxx::__ops::__iter_less_iter());
5072 }
5073
5074 /**
5075 * @brief Sort the elements of a sequence using a predicate for comparison,
5076 * preserving the relative order of equivalent elements.
5077 * @ingroup sorting_algorithms
5078 * @param __first An iterator.
5079 * @param __last Another iterator.
5080 * @param __comp A comparison functor.
5081 * @return Nothing.
5082 *
5083 * Sorts the elements in the range @p [__first,__last) in ascending order,
5084 * such that for each iterator @p i in the range @p [__first,__last-1),
5085 * @p __comp(*(i+1),*i) is false.
5086 *
5087 * The relative ordering of equivalent elements is preserved, so any two
5088 * elements @p x and @p y in the range @p [__first,__last) such that
5089 * @p __comp(x,y) is false and @p __comp(y,x) is false will have the same
5090 * relative ordering after calling @p stable_sort().
5091 */
5092 template<typename _RandomAccessIterator, typename _Compare>
5093 inline void
5094 stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
5095 _Compare __comp)
5096 {
5097 // concept requirements
5098 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
5099 _RandomAccessIterator>)
5100 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5101 typename iterator_traits<_RandomAccessIterator>::value_type,
5102 typename iterator_traits<_RandomAccessIterator>::value_type>)
5103 __glibcxx_requires_valid_range(__first, __last);
5104 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
5105
5106 _GLIBCXX_STD_Astd::__stable_sort(__first, __last,
5107 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5108 }
5109
5110 template<typename _InputIterator1, typename _InputIterator2,
5111 typename _OutputIterator,
5112 typename _Compare>
5113 _GLIBCXX20_CONSTEXPR
5114 _OutputIterator
5115 __set_union(_InputIterator1 __first1, _InputIterator1 __last1,
5116 _InputIterator2 __first2, _InputIterator2 __last2,
5117 _OutputIterator __result, _Compare __comp)
5118 {
5119 while (__first1 != __last1 && __first2 != __last2)
5120 {
5121 if (__comp(__first1, __first2))
5122 {
5123 *__result = *__first1;
5124 ++__first1;
5125 }
5126 else if (__comp(__first2, __first1))
5127 {
5128 *__result = *__first2;
5129 ++__first2;
5130 }
5131 else
5132 {
5133 *__result = *__first1;
5134 ++__first1;
5135 ++__first2;
5136 }
5137 ++__result;
5138 }
5139 return std::copy(__first2, __last2,
5140 std::copy(__first1, __last1, __result));
5141 }
5142
5143 /**
5144 * @brief Return the union of two sorted ranges.
5145 * @ingroup set_algorithms
5146 * @param __first1 Start of first range.
5147 * @param __last1 End of first range.
5148 * @param __first2 Start of second range.
5149 * @param __last2 End of second range.
5150 * @param __result Start of output range.
5151 * @return End of the output range.
5152 * @ingroup set_algorithms
5153 *
5154 * This operation iterates over both ranges, copying elements present in
5155 * each range in order to the output range. Iterators increment for each
5156 * range. When the current element of one range is less than the other,
5157 * that element is copied and the iterator advanced. If an element is
5158 * contained in both ranges, the element from the first range is copied and
5159 * both ranges advance. The output range may not overlap either input
5160 * range.
5161 */
5162 template<typename _InputIterator1, typename _InputIterator2,
5163 typename _OutputIterator>
5164 _GLIBCXX20_CONSTEXPR
5165 inline _OutputIterator
5166 set_union(_InputIterator1 __first1, _InputIterator1 __last1,
5167 _InputIterator2 __first2, _InputIterator2 __last2,
5168 _OutputIterator __result)
5169 {
5170 // concept requirements
5171 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5172 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5173 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5174 typename iterator_traits<_InputIterator1>::value_type>)
5175 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5176 typename iterator_traits<_InputIterator2>::value_type>)
5177 __glibcxx_function_requires(_LessThanOpConcept<
5178 typename iterator_traits<_InputIterator1>::value_type,
5179 typename iterator_traits<_InputIterator2>::value_type>)
5180 __glibcxx_function_requires(_LessThanOpConcept<
5181 typename iterator_traits<_InputIterator2>::value_type,
5182 typename iterator_traits<_InputIterator1>::value_type>)
5183 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5184 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5185 __glibcxx_requires_irreflexive2(__first1, __last1);
5186 __glibcxx_requires_irreflexive2(__first2, __last2);
5187
5188 return _GLIBCXX_STD_Astd::__set_union(__first1, __last1,
5189 __first2, __last2, __result,
5190 __gnu_cxx::__ops::__iter_less_iter());
5191 }
5192
5193 /**
5194 * @brief Return the union of two sorted ranges using a comparison functor.
5195 * @ingroup set_algorithms
5196 * @param __first1 Start of first range.
5197 * @param __last1 End of first range.
5198 * @param __first2 Start of second range.
5199 * @param __last2 End of second range.
5200 * @param __result Start of output range.
5201 * @param __comp The comparison functor.
5202 * @return End of the output range.
5203 * @ingroup set_algorithms
5204 *
5205 * This operation iterates over both ranges, copying elements present in
5206 * each range in order to the output range. Iterators increment for each
5207 * range. When the current element of one range is less than the other
5208 * according to @p __comp, that element is copied and the iterator advanced.
5209 * If an equivalent element according to @p __comp is contained in both
5210 * ranges, the element from the first range is copied and both ranges
5211 * advance. The output range may not overlap either input range.
5212 */
5213 template<typename _InputIterator1, typename _InputIterator2,
5214 typename _OutputIterator, typename _Compare>
5215 _GLIBCXX20_CONSTEXPR
5216 inline _OutputIterator
5217 set_union(_InputIterator1 __first1, _InputIterator1 __last1,
5218 _InputIterator2 __first2, _InputIterator2 __last2,
5219 _OutputIterator __result, _Compare __comp)
5220 {
5221 // concept requirements
5222 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5223 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5224 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5225 typename iterator_traits<_InputIterator1>::value_type>)
5226 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5227 typename iterator_traits<_InputIterator2>::value_type>)
5228 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5229 typename iterator_traits<_InputIterator1>::value_type,
5230 typename iterator_traits<_InputIterator2>::value_type>)
5231 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5232 typename iterator_traits<_InputIterator2>::value_type,
5233 typename iterator_traits<_InputIterator1>::value_type>)
5234 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5235 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5236 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5237 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5238
5239 return _GLIBCXX_STD_Astd::__set_union(__first1, __last1,
5240 __first2, __last2, __result,
5241 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5242 }
5243
5244 template<typename _InputIterator1, typename _InputIterator2,
5245 typename _OutputIterator,
5246 typename _Compare>
5247 _GLIBCXX20_CONSTEXPR
5248 _OutputIterator
5249 __set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5250 _InputIterator2 __first2, _InputIterator2 __last2,
5251 _OutputIterator __result, _Compare __comp)
5252 {
5253 while (__first1 != __last1 && __first2 != __last2)
5254 if (__comp(__first1, __first2))
5255 ++__first1;
5256 else if (__comp(__first2, __first1))
5257 ++__first2;
5258 else
5259 {
5260 *__result = *__first1;
5261 ++__first1;
5262 ++__first2;
5263 ++__result;
5264 }
5265 return __result;
5266 }
5267
5268 /**
5269 * @brief Return the intersection of two sorted ranges.
5270 * @ingroup set_algorithms
5271 * @param __first1 Start of first range.
5272 * @param __last1 End of first range.
5273 * @param __first2 Start of second range.
5274 * @param __last2 End of second range.
5275 * @param __result Start of output range.
5276 * @return End of the output range.
5277 * @ingroup set_algorithms
5278 *
5279 * This operation iterates over both ranges, copying elements present in
5280 * both ranges in order to the output range. Iterators increment for each
5281 * range. When the current element of one range is less than the other,
5282 * that iterator advances. If an element is contained in both ranges, the
5283 * element from the first range is copied and both ranges advance. The
5284 * output range may not overlap either input range.
5285 */
5286 template<typename _InputIterator1, typename _InputIterator2,
5287 typename _OutputIterator>
5288 _GLIBCXX20_CONSTEXPR
5289 inline _OutputIterator
5290 set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5291 _InputIterator2 __first2, _InputIterator2 __last2,
5292 _OutputIterator __result)
5293 {
5294 // concept requirements
5295 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5296 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5297 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5298 typename iterator_traits<_InputIterator1>::value_type>)
5299 __glibcxx_function_requires(_LessThanOpConcept<
5300 typename iterator_traits<_InputIterator1>::value_type,
5301 typename iterator_traits<_InputIterator2>::value_type>)
5302 __glibcxx_function_requires(_LessThanOpConcept<
5303 typename iterator_traits<_InputIterator2>::value_type,
5304 typename iterator_traits<_InputIterator1>::value_type>)
5305 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5306 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5307 __glibcxx_requires_irreflexive2(__first1, __last1);
5308 __glibcxx_requires_irreflexive2(__first2, __last2);
5309
5310 return _GLIBCXX_STD_Astd::__set_intersection(__first1, __last1,
5311 __first2, __last2, __result,
5312 __gnu_cxx::__ops::__iter_less_iter());
5313 }
5314
5315 /**
5316 * @brief Return the intersection of two sorted ranges using comparison
5317 * functor.
5318 * @ingroup set_algorithms
5319 * @param __first1 Start of first range.
5320 * @param __last1 End of first range.
5321 * @param __first2 Start of second range.
5322 * @param __last2 End of second range.
5323 * @param __result Start of output range.
5324 * @param __comp The comparison functor.
5325 * @return End of the output range.
5326 * @ingroup set_algorithms
5327 *
5328 * This operation iterates over both ranges, copying elements present in
5329 * both ranges in order to the output range. Iterators increment for each
5330 * range. When the current element of one range is less than the other
5331 * according to @p __comp, that iterator advances. If an element is
5332 * contained in both ranges according to @p __comp, the element from the
5333 * first range is copied and both ranges advance. The output range may not
5334 * overlap either input range.
5335 */
5336 template<typename _InputIterator1, typename _InputIterator2,
5337 typename _OutputIterator, typename _Compare>
5338 _GLIBCXX20_CONSTEXPR
5339 inline _OutputIterator
5340 set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5341 _InputIterator2 __first2, _InputIterator2 __last2,
5342 _OutputIterator __result, _Compare __comp)
5343 {
5344 // concept requirements
5345 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5346 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5347 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5348 typename iterator_traits<_InputIterator1>::value_type>)
5349 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5350 typename iterator_traits<_InputIterator1>::value_type,
5351 typename iterator_traits<_InputIterator2>::value_type>)
5352 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5353 typename iterator_traits<_InputIterator2>::value_type,
5354 typename iterator_traits<_InputIterator1>::value_type>)
5355 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5356 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5357 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5358 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5359
5360 return _GLIBCXX_STD_Astd::__set_intersection(__first1, __last1,
5361 __first2, __last2, __result,
5362 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5363 }
5364
5365 template<typename _InputIterator1, typename _InputIterator2,
5366 typename _OutputIterator,
5367 typename _Compare>
5368 _GLIBCXX20_CONSTEXPR
5369 _OutputIterator
5370 __set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5371 _InputIterator2 __first2, _InputIterator2 __last2,
5372 _OutputIterator __result, _Compare __comp)
5373 {
5374 while (__first1 != __last1 && __first2 != __last2)
5375 if (__comp(__first1, __first2))
5376 {
5377 *__result = *__first1;
5378 ++__first1;
5379 ++__result;
5380 }
5381 else if (__comp(__first2, __first1))
5382 ++__first2;
5383 else
5384 {
5385 ++__first1;
5386 ++__first2;
5387 }
5388 return std::copy(__first1, __last1, __result);
5389 }
5390
5391 /**
5392 * @brief Return the difference of two sorted ranges.
5393 * @ingroup set_algorithms
5394 * @param __first1 Start of first range.
5395 * @param __last1 End of first range.
5396 * @param __first2 Start of second range.
5397 * @param __last2 End of second range.
5398 * @param __result Start of output range.
5399 * @return End of the output range.
5400 * @ingroup set_algorithms
5401 *
5402 * This operation iterates over both ranges, copying elements present in
5403 * the first range but not the second in order to the output range.
5404 * Iterators increment for each range. When the current element of the
5405 * first range is less than the second, that element is copied and the
5406 * iterator advances. If the current element of the second range is less,
5407 * the iterator advances, but no element is copied. If an element is
5408 * contained in both ranges, no elements are copied and both ranges
5409 * advance. The output range may not overlap either input range.
5410 */
5411 template<typename _InputIterator1, typename _InputIterator2,
5412 typename _OutputIterator>
5413 _GLIBCXX20_CONSTEXPR
5414 inline _OutputIterator
5415 set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5416 _InputIterator2 __first2, _InputIterator2 __last2,
5417 _OutputIterator __result)
5418 {
5419 // concept requirements
5420 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5421 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5422 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5423 typename iterator_traits<_InputIterator1>::value_type>)
5424 __glibcxx_function_requires(_LessThanOpConcept<
5425 typename iterator_traits<_InputIterator1>::value_type,
5426 typename iterator_traits<_InputIterator2>::value_type>)
5427 __glibcxx_function_requires(_LessThanOpConcept<
5428 typename iterator_traits<_InputIterator2>::value_type,
5429 typename iterator_traits<_InputIterator1>::value_type>)
5430 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5431 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5432 __glibcxx_requires_irreflexive2(__first1, __last1);
5433 __glibcxx_requires_irreflexive2(__first2, __last2);
5434
5435 return _GLIBCXX_STD_Astd::__set_difference(__first1, __last1,
5436 __first2, __last2, __result,
5437 __gnu_cxx::__ops::__iter_less_iter());
5438 }
5439
5440 /**
5441 * @brief Return the difference of two sorted ranges using comparison
5442 * functor.
5443 * @ingroup set_algorithms
5444 * @param __first1 Start of first range.
5445 * @param __last1 End of first range.
5446 * @param __first2 Start of second range.
5447 * @param __last2 End of second range.
5448 * @param __result Start of output range.
5449 * @param __comp The comparison functor.
5450 * @return End of the output range.
5451 * @ingroup set_algorithms
5452 *
5453 * This operation iterates over both ranges, copying elements present in
5454 * the first range but not the second in order to the output range.
5455 * Iterators increment for each range. When the current element of the
5456 * first range is less than the second according to @p __comp, that element
5457 * is copied and the iterator advances. If the current element of the
5458 * second range is less, no element is copied and the iterator advances.
5459 * If an element is contained in both ranges according to @p __comp, no
5460 * elements are copied and both ranges advance. The output range may not
5461 * overlap either input range.
5462 */
5463 template<typename _InputIterator1, typename _InputIterator2,
5464 typename _OutputIterator, typename _Compare>
5465 _GLIBCXX20_CONSTEXPR
5466 inline _OutputIterator
5467 set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5468 _InputIterator2 __first2, _InputIterator2 __last2,
5469 _OutputIterator __result, _Compare __comp)
5470 {
5471 // concept requirements
5472 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5473 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5474 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5475 typename iterator_traits<_InputIterator1>::value_type>)
5476 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5477 typename iterator_traits<_InputIterator1>::value_type,
5478 typename iterator_traits<_InputIterator2>::value_type>)
5479 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5480 typename iterator_traits<_InputIterator2>::value_type,
5481 typename iterator_traits<_InputIterator1>::value_type>)
5482 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5483 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5484 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5485 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5486
5487 return _GLIBCXX_STD_Astd::__set_difference(__first1, __last1,
5488 __first2, __last2, __result,
5489 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5490 }
5491
5492 template<typename _InputIterator1, typename _InputIterator2,
5493 typename _OutputIterator,
5494 typename _Compare>
5495 _GLIBCXX20_CONSTEXPR
5496 _OutputIterator
5497 __set_symmetric_difference(_InputIterator1 __first1,
5498 _InputIterator1 __last1,
5499 _InputIterator2 __first2,
5500 _InputIterator2 __last2,
5501 _OutputIterator __result,
5502 _Compare __comp)
5503 {
5504 while (__first1 != __last1 && __first2 != __last2)
5505 if (__comp(__first1, __first2))
5506 {
5507 *__result = *__first1;
5508 ++__first1;
5509 ++__result;
5510 }
5511 else if (__comp(__first2, __first1))
5512 {
5513 *__result = *__first2;
5514 ++__first2;
5515 ++__result;
5516 }
5517 else
5518 {
5519 ++__first1;
5520 ++__first2;
5521 }
5522 return std::copy(__first2, __last2,
5523 std::copy(__first1, __last1, __result));
5524 }
5525
5526 /**
5527 * @brief Return the symmetric difference of two sorted ranges.
5528 * @ingroup set_algorithms
5529 * @param __first1 Start of first range.
5530 * @param __last1 End of first range.
5531 * @param __first2 Start of second range.
5532 * @param __last2 End of second range.
5533 * @param __result Start of output range.
5534 * @return End of the output range.
5535 * @ingroup set_algorithms
5536 *
5537 * This operation iterates over both ranges, copying elements present in
5538 * one range but not the other in order to the output range. Iterators
5539 * increment for each range. When the current element of one range is less
5540 * than the other, that element is copied and the iterator advances. If an
5541 * element is contained in both ranges, no elements are copied and both
5542 * ranges advance. The output range may not overlap either input range.
5543 */
5544 template<typename _InputIterator1, typename _InputIterator2,
5545 typename _OutputIterator>
5546 _GLIBCXX20_CONSTEXPR
5547 inline _OutputIterator
5548 set_symmetric_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5549 _InputIterator2 __first2, _InputIterator2 __last2,
5550 _OutputIterator __result)
5551 {
5552 // concept requirements
5553 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5554 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5555 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5556 typename iterator_traits<_InputIterator1>::value_type>)
5557 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5558 typename iterator_traits<_InputIterator2>::value_type>)
5559 __glibcxx_function_requires(_LessThanOpConcept<
5560 typename iterator_traits<_InputIterator1>::value_type,
5561 typename iterator_traits<_InputIterator2>::value_type>)
5562 __glibcxx_function_requires(_LessThanOpConcept<
5563 typename iterator_traits<_InputIterator2>::value_type,
5564 typename iterator_traits<_InputIterator1>::value_type>)
5565 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5566 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5567 __glibcxx_requires_irreflexive2(__first1, __last1);
5568 __glibcxx_requires_irreflexive2(__first2, __last2);
5569
5570 return _GLIBCXX_STD_Astd::__set_symmetric_difference(__first1, __last1,
5571 __first2, __last2, __result,
5572 __gnu_cxx::__ops::__iter_less_iter());
5573 }
5574
5575 /**
5576 * @brief Return the symmetric difference of two sorted ranges using
5577 * comparison functor.
5578 * @ingroup set_algorithms
5579 * @param __first1 Start of first range.
5580 * @param __last1 End of first range.
5581 * @param __first2 Start of second range.
5582 * @param __last2 End of second range.
5583 * @param __result Start of output range.
5584 * @param __comp The comparison functor.
5585 * @return End of the output range.
5586 * @ingroup set_algorithms
5587 *
5588 * This operation iterates over both ranges, copying elements present in
5589 * one range but not the other in order to the output range. Iterators
5590 * increment for each range. When the current element of one range is less
5591 * than the other according to @p comp, that element is copied and the
5592 * iterator advances. If an element is contained in both ranges according
5593 * to @p __comp, no elements are copied and both ranges advance. The output
5594 * range may not overlap either input range.
5595 */
5596 template<typename _InputIterator1, typename _InputIterator2,
5597 typename _OutputIterator, typename _Compare>
5598 _GLIBCXX20_CONSTEXPR
5599 inline _OutputIterator
5600 set_symmetric_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5601 _InputIterator2 __first2, _InputIterator2 __last2,
5602 _OutputIterator __result,
5603 _Compare __comp)
5604 {
5605 // concept requirements
5606 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5607 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5608 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5609 typename iterator_traits<_InputIterator1>::value_type>)
5610 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5611 typename iterator_traits<_InputIterator2>::value_type>)
5612 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5613 typename iterator_traits<_InputIterator1>::value_type,
5614 typename iterator_traits<_InputIterator2>::value_type>)
5615 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5616 typename iterator_traits<_InputIterator2>::value_type,
5617 typename iterator_traits<_InputIterator1>::value_type>)
5618 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5619 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5620 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5621 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5622
5623 return _GLIBCXX_STD_Astd::__set_symmetric_difference(__first1, __last1,
5624 __first2, __last2, __result,
5625 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5626 }
5627
5628 template<typename _ForwardIterator, typename _Compare>
5629 _GLIBCXX14_CONSTEXPRconstexpr
5630 _ForwardIterator
5631 __min_element(_ForwardIterator __first, _ForwardIterator __last,
5632 _Compare __comp)
5633 {
5634 if (__first == __last)
5635 return __first;
5636 _ForwardIterator __result = __first;
5637 while (++__first != __last)
5638 if (__comp(__first, __result))
5639 __result = __first;
5640 return __result;
5641 }
5642
5643 /**
5644 * @brief Return the minimum element in a range.
5645 * @ingroup sorting_algorithms
5646 * @param __first Start of range.
5647 * @param __last End of range.
5648 * @return Iterator referencing the first instance of the smallest value.
5649 */
5650 template<typename _ForwardIterator>
5651 _GLIBCXX14_CONSTEXPRconstexpr
5652 _ForwardIterator
5653 inline min_element(_ForwardIterator __first, _ForwardIterator __last)
5654 {
5655 // concept requirements
5656 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5657 __glibcxx_function_requires(_LessThanComparableConcept<
5658 typename iterator_traits<_ForwardIterator>::value_type>)
5659 __glibcxx_requires_valid_range(__first, __last);
5660 __glibcxx_requires_irreflexive(__first, __last);
5661
5662 return _GLIBCXX_STD_Astd::__min_element(__first, __last,
5663 __gnu_cxx::__ops::__iter_less_iter());
5664 }
5665
5666 /**
5667 * @brief Return the minimum element in a range using comparison functor.
5668 * @ingroup sorting_algorithms
5669 * @param __first Start of range.
5670 * @param __last End of range.
5671 * @param __comp Comparison functor.
5672 * @return Iterator referencing the first instance of the smallest value
5673 * according to __comp.
5674 */
5675 template<typename _ForwardIterator, typename _Compare>
5676 _GLIBCXX14_CONSTEXPRconstexpr
5677 inline _ForwardIterator
5678 min_element(_ForwardIterator __first, _ForwardIterator __last,
5679 _Compare __comp)
5680 {
5681 // concept requirements
5682 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5683 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5684 typename iterator_traits<_ForwardIterator>::value_type,
5685 typename iterator_traits<_ForwardIterator>::value_type>)
5686 __glibcxx_requires_valid_range(__first, __last);
5687 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
5688
5689 return _GLIBCXX_STD_Astd::__min_element(__first, __last,
5690 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5691 }
5692
5693 template<typename _ForwardIterator, typename _Compare>
5694 _GLIBCXX14_CONSTEXPRconstexpr
5695 _ForwardIterator
5696 __max_element(_ForwardIterator __first, _ForwardIterator __last,
5697 _Compare __comp)
5698 {
5699 if (__first == __last) return __first;
5700 _ForwardIterator __result = __first;
5701 while (++__first != __last)
5702 if (__comp(__result, __first))
5703 __result = __first;
5704 return __result;
5705 }
5706
5707 /**
5708 * @brief Return the maximum element in a range.
5709 * @ingroup sorting_algorithms
5710 * @param __first Start of range.
5711 * @param __last End of range.
5712 * @return Iterator referencing the first instance of the largest value.
5713 */
5714 template<typename _ForwardIterator>
5715 _GLIBCXX14_CONSTEXPRconstexpr
5716 inline _ForwardIterator
5717 max_element(_ForwardIterator __first, _ForwardIterator __last)
5718 {
5719 // concept requirements
5720 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5721 __glibcxx_function_requires(_LessThanComparableConcept<
5722 typename iterator_traits<_ForwardIterator>::value_type>)
5723 __glibcxx_requires_valid_range(__first, __last);
5724 __glibcxx_requires_irreflexive(__first, __last);
5725
5726 return _GLIBCXX_STD_Astd::__max_element(__first, __last,
5727 __gnu_cxx::__ops::__iter_less_iter());
5728 }
5729
5730 /**
5731 * @brief Return the maximum element in a range using comparison functor.
5732 * @ingroup sorting_algorithms
5733 * @param __first Start of range.
5734 * @param __last End of range.
5735 * @param __comp Comparison functor.
5736 * @return Iterator referencing the first instance of the largest value
5737 * according to __comp.
5738 */
5739 template<typename _ForwardIterator, typename _Compare>
5740 _GLIBCXX14_CONSTEXPRconstexpr
5741 inline _ForwardIterator
5742 max_element(_ForwardIterator __first, _ForwardIterator __last,
5743 _Compare __comp)
5744 {
5745 // concept requirements
5746 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5747 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5748 typename iterator_traits<_ForwardIterator>::value_type,
5749 typename iterator_traits<_ForwardIterator>::value_type>)
5750 __glibcxx_requires_valid_range(__first, __last);
5751 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
5752
5753 return _GLIBCXX_STD_Astd::__max_element(__first, __last,
5754 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5755 }
5756
5757#if __cplusplus201402L >= 201402L
5758 /// Reservoir sampling algorithm.
5759 template<typename _InputIterator, typename _RandomAccessIterator,
5760 typename _Size, typename _UniformRandomBitGenerator>
5761 _RandomAccessIterator
5762 __sample(_InputIterator __first, _InputIterator __last, input_iterator_tag,
5763 _RandomAccessIterator __out, random_access_iterator_tag,
5764 _Size __n, _UniformRandomBitGenerator&& __g)
5765 {
5766 using __distrib_type = uniform_int_distribution<_Size>;
5767 using __param_type = typename __distrib_type::param_type;
5768 __distrib_type __d{};
5769 _Size __sample_sz = 0;
5770 while (__first != __last && __sample_sz != __n)
5771 {
5772 __out[__sample_sz++] = *__first;
5773 ++__first;
5774 }
5775 for (auto __pop_sz = __sample_sz; __first != __last;
5776 ++__first, (void) ++__pop_sz)
5777 {
5778 const auto __k = __d(__g, __param_type{0, __pop_sz});
5779 if (__k < __n)
5780 __out[__k] = *__first;
5781 }
5782 return __out + __sample_sz;
5783 }
5784
5785 /// Selection sampling algorithm.
5786 template<typename _ForwardIterator, typename _OutputIterator, typename _Cat,
5787 typename _Size, typename _UniformRandomBitGenerator>
5788 _OutputIterator
5789 __sample(_ForwardIterator __first, _ForwardIterator __last,
5790 forward_iterator_tag,
5791 _OutputIterator __out, _Cat,
5792 _Size __n, _UniformRandomBitGenerator&& __g)
5793 {
5794 using __distrib_type = uniform_int_distribution<_Size>;
5795 using __param_type = typename __distrib_type::param_type;
5796 using _USize = make_unsigned_t<_Size>;
5797 using _Gen = remove_reference_t<_UniformRandomBitGenerator>;
5798 using __uc_type = common_type_t<typename _Gen::result_type, _USize>;
5799
5800 if (__first == __last)
5801 return __out;
5802
5803 __distrib_type __d{};
5804 _Size __unsampled_sz = std::distance(__first, __last);
5805 __n = std::min(__n, __unsampled_sz);
5806
5807 // If possible, we use __gen_two_uniform_ints to efficiently produce
5808 // two random numbers using a single distribution invocation:
5809
5810 const __uc_type __urngrange = __g.max() - __g.min();
5811 if (__urngrange / __uc_type(__unsampled_sz) >= __uc_type(__unsampled_sz))
5812 // I.e. (__urngrange >= __unsampled_sz * __unsampled_sz) but without
5813 // wrapping issues.
5814 {
5815 while (__n != 0 && __unsampled_sz >= 2)
5816 {
5817 const pair<_Size, _Size> __p =
5818 __gen_two_uniform_ints(__unsampled_sz, __unsampled_sz - 1, __g);
5819
5820 --__unsampled_sz;
5821 if (__p.first < __n)
5822 {
5823 *__out++ = *__first;
5824 --__n;
5825 }
5826
5827 ++__first;
5828
5829 if (__n == 0) break;
5830
5831 --__unsampled_sz;
5832 if (__p.second < __n)
5833 {
5834 *__out++ = *__first;
5835 --__n;
5836 }
5837
5838 ++__first;
5839 }
5840 }
5841
5842 // The loop above is otherwise equivalent to this one-at-a-time version:
5843
5844 for (; __n != 0; ++__first)
5845 if (__d(__g, __param_type{0, --__unsampled_sz}) < __n)
5846 {
5847 *__out++ = *__first;
5848 --__n;
5849 }
5850 return __out;
5851 }
5852
5853#if __cplusplus201402L > 201402L
5854#define __cpp_lib_sample 201603
5855 /// Take a random sample from a population.
5856 template<typename _PopulationIterator, typename _SampleIterator,
5857 typename _Distance, typename _UniformRandomBitGenerator>
5858 _SampleIterator
5859 sample(_PopulationIterator __first, _PopulationIterator __last,
5860 _SampleIterator __out, _Distance __n,
5861 _UniformRandomBitGenerator&& __g)
5862 {
5863 using __pop_cat = typename
5864 std::iterator_traits<_PopulationIterator>::iterator_category;
5865 using __samp_cat = typename
5866 std::iterator_traits<_SampleIterator>::iterator_category;
5867
5868 static_assert(
5869 __or_<is_convertible<__pop_cat, forward_iterator_tag>,
5870 is_convertible<__samp_cat, random_access_iterator_tag>>::value,
5871 "output range must use a RandomAccessIterator when input range"
5872 " does not meet the ForwardIterator requirements");
5873
5874 static_assert(is_integral<_Distance>::value,
5875 "sample size must be an integer type");
5876
5877 typename iterator_traits<_PopulationIterator>::difference_type __d = __n;
5878 return _GLIBCXX_STD_Astd::
5879 __sample(__first, __last, __pop_cat{}, __out, __samp_cat{}, __d,
5880 std::forward<_UniformRandomBitGenerator>(__g));
5881 }
5882#endif // C++17
5883#endif // C++14
5884
5885_GLIBCXX_END_NAMESPACE_ALGO
5886_GLIBCXX_END_NAMESPACE_VERSION
5887} // namespace std
5888
5889#endif /* _STL_ALGO_H */

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/TargetLibraryInfo.h

1//===-- TargetLibraryInfo.h - Library information ---------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
10#define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
11
12#include "llvm/ADT/BitVector.h"
13#include "llvm/ADT/DenseMap.h"
14#include "llvm/ADT/Optional.h"
15#include "llvm/IR/Function.h"
16#include "llvm/IR/InstrTypes.h"
17#include "llvm/IR/Module.h"
18#include "llvm/IR/PassManager.h"
19#include "llvm/Pass.h"
20
21namespace llvm {
22template <typename T> class ArrayRef;
23class Triple;
24
25/// Describes a possible vectorization of a function.
26/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
27/// by a factor 'VectorizationFactor'.
28struct VecDesc {
29 StringRef ScalarFnName;
30 StringRef VectorFnName;
31 ElementCount VectorizationFactor;
32};
33
34 enum LibFunc : unsigned {
35#define TLI_DEFINE_ENUM
36#include "llvm/Analysis/TargetLibraryInfo.def"
37
38 NumLibFuncs,
39 NotLibFunc
40 };
41
42/// Implementation of the target library information.
43///
44/// This class constructs tables that hold the target library information and
45/// make it available. However, it is somewhat expensive to compute and only
46/// depends on the triple. So users typically interact with the \c
47/// TargetLibraryInfo wrapper below.
48class TargetLibraryInfoImpl {
49 friend class TargetLibraryInfo;
50
51 unsigned char AvailableArray[(NumLibFuncs+3)/4];
52 llvm::DenseMap<unsigned, std::string> CustomNames;
53 static StringLiteral const StandardNames[NumLibFuncs];
54 bool ShouldExtI32Param, ShouldExtI32Return, ShouldSignExtI32Param;
55 unsigned SizeOfInt;
56
57 enum AvailabilityState {
58 StandardName = 3, // (memset to all ones)
59 CustomName = 1,
60 Unavailable = 0 // (memset to all zeros)
61 };
62 void setState(LibFunc F, AvailabilityState State) {
63 AvailableArray[F/4] &= ~(3 << 2*(F&3));
64 AvailableArray[F/4] |= State << 2*(F&3);
65 }
66 AvailabilityState getState(LibFunc F) const {
67 return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
68 }
69
70 /// Vectorization descriptors - sorted by ScalarFnName.
71 std::vector<VecDesc> VectorDescs;
72 /// Scalarization descriptors - same content as VectorDescs but sorted based
73 /// on VectorFnName rather than ScalarFnName.
74 std::vector<VecDesc> ScalarDescs;
75
76 /// Return true if the function type FTy is valid for the library function
77 /// F, regardless of whether the function is available.
78 bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
79 const DataLayout *DL) const;
80
81public:
82 /// List of known vector-functions libraries.
83 ///
84 /// The vector-functions library defines, which functions are vectorizable
85 /// and with which factor. The library can be specified by either frontend,
86 /// or a commandline option, and then used by
87 /// addVectorizableFunctionsFromVecLib for filling up the tables of
88 /// vectorizable functions.
89 enum VectorLibrary {
90 NoLibrary, // Don't use any vector library.
91 Accelerate, // Use Accelerate framework.
92 DarwinLibSystemM, // Use Darwin's libsystem_m.
93 LIBMVEC_X86, // GLIBC Vector Math library.
94 MASSV, // IBM MASS vector library.
95 SVML // Intel short vector math library.
96 };
97
98 TargetLibraryInfoImpl();
99 explicit TargetLibraryInfoImpl(const Triple &T);
100
101 // Provide value semantics.
102 TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
103 TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
104 TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
105 TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
106
107 /// Searches for a particular function name.
108 ///
109 /// If it is one of the known library functions, return true and set F to the
110 /// corresponding value.
111 bool getLibFunc(StringRef funcName, LibFunc &F) const;
112
113 /// Searches for a particular function name, also checking that its type is
114 /// valid for the library function matching that name.
115 ///
116 /// If it is one of the known library functions, return true and set F to the
117 /// corresponding value.
118 bool getLibFunc(const Function &FDecl, LibFunc &F) const;
119
120 /// Forces a function to be marked as unavailable.
121 void setUnavailable(LibFunc F) {
122 setState(F, Unavailable);
123 }
124
125 /// Forces a function to be marked as available.
126 void setAvailable(LibFunc F) {
127 setState(F, StandardName);
128 }
129
130 /// Forces a function to be marked as available and provide an alternate name
131 /// that must be used.
132 void setAvailableWithName(LibFunc F, StringRef Name) {
133 if (StandardNames[F] != Name) {
134 setState(F, CustomName);
135 CustomNames[F] = std::string(Name);
136 assert(CustomNames.find(F) != CustomNames.end())(static_cast <bool> (CustomNames.find(F) != CustomNames
.end()) ? void (0) : __assert_fail ("CustomNames.find(F) != CustomNames.end()"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/TargetLibraryInfo.h"
, 136, __extension__ __PRETTY_FUNCTION__))
;
137 } else {
138 setState(F, StandardName);
139 }
140 }
141
142 /// Disables all builtins.
143 ///
144 /// This can be used for options like -fno-builtin.
145 void disableAllFunctions();
146
147 /// Add a set of scalar -> vector mappings, queryable via
148 /// getVectorizedFunction and getScalarizedFunction.
149 void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
150
151 /// Calls addVectorizableFunctions with a known preset of functions for the
152 /// given vector library.
153 void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib);
154
155 /// Return true if the function F has a vector equivalent with vectorization
156 /// factor VF.
157 bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const {
158 return !getVectorizedFunction(F, VF).empty();
159 }
160
161 /// Return true if the function F has a vector equivalent with any
162 /// vectorization factor.
163 bool isFunctionVectorizable(StringRef F) const;
164
165 /// Return the name of the equivalent of F, vectorized with factor VF. If no
166 /// such mapping exists, return the empty string.
167 StringRef getVectorizedFunction(StringRef F, const ElementCount &VF) const;
168
169 /// Set to true iff i32 parameters to library functions should have signext
170 /// or zeroext attributes if they correspond to C-level int or unsigned int,
171 /// respectively.
172 void setShouldExtI32Param(bool Val) {
173 ShouldExtI32Param = Val;
174 }
175
176 /// Set to true iff i32 results from library functions should have signext
177 /// or zeroext attributes if they correspond to C-level int or unsigned int,
178 /// respectively.
179 void setShouldExtI32Return(bool Val) {
180 ShouldExtI32Return = Val;
181 }
182
183 /// Set to true iff i32 parameters to library functions should have signext
184 /// attribute if they correspond to C-level int or unsigned int.
185 void setShouldSignExtI32Param(bool Val) {
186 ShouldSignExtI32Param = Val;
187 }
188
189 /// Returns the size of the wchar_t type in bytes or 0 if the size is unknown.
190 /// This queries the 'wchar_size' metadata.
191 unsigned getWCharSize(const Module &M) const;
192
193 /// Get size of a C-level int or unsigned int, in bits.
194 unsigned getIntSize() const {
195 return SizeOfInt;
196 }
197
198 /// Initialize the C-level size of an integer.
199 void setIntSize(unsigned Bits) {
200 SizeOfInt = Bits;
201 }
202
203 /// Returns the largest vectorization factor used in the list of
204 /// vector functions.
205 void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
206 ElementCount &Scalable) const;
207
208 /// Returns true if call site / callee has cdecl-compatible calling
209 /// conventions.
210 static bool isCallingConvCCompatible(CallBase *CI);
211 static bool isCallingConvCCompatible(Function *Callee);
212};
213
214/// Provides information about what library functions are available for
215/// the current target.
216///
217/// This both allows optimizations to handle them specially and frontends to
218/// disable such optimizations through -fno-builtin etc.
219class TargetLibraryInfo {
220 friend class TargetLibraryAnalysis;
221 friend class TargetLibraryInfoWrapperPass;
222
223 /// The global (module level) TLI info.
224 const TargetLibraryInfoImpl *Impl;
225
226 /// Support for -fno-builtin* options as function attributes, overrides
227 /// information in global TargetLibraryInfoImpl.
228 BitVector OverrideAsUnavailable;
229
230public:
231 explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl,
232 Optional<const Function *> F = None)
233 : Impl(&Impl), OverrideAsUnavailable(NumLibFuncs) {
234 if (!F)
235 return;
236 if ((*F)->hasFnAttribute("no-builtins"))
237 disableAllFunctions();
238 else {
239 // Disable individual libc/libm calls in TargetLibraryInfo.
240 LibFunc LF;
241 AttributeSet FnAttrs = (*F)->getAttributes().getFnAttributes();
242 for (const Attribute &Attr : FnAttrs) {
243 if (!Attr.isStringAttribute())
244 continue;
245 auto AttrStr = Attr.getKindAsString();
246 if (!AttrStr.consume_front("no-builtin-"))
247 continue;
248 if (getLibFunc(AttrStr, LF))
249 setUnavailable(LF);
250 }
251 }
252 }
253
254 // Provide value semantics.
255 TargetLibraryInfo(const TargetLibraryInfo &TLI)
256 : Impl(TLI.Impl), OverrideAsUnavailable(TLI.OverrideAsUnavailable) {}
257 TargetLibraryInfo(TargetLibraryInfo &&TLI)
258 : Impl(TLI.Impl), OverrideAsUnavailable(TLI.OverrideAsUnavailable) {}
259 TargetLibraryInfo &operator=(const TargetLibraryInfo &TLI) {
260 Impl = TLI.Impl;
261 OverrideAsUnavailable = TLI.OverrideAsUnavailable;
262 return *this;
263 }
264 TargetLibraryInfo &operator=(TargetLibraryInfo &&TLI) {
265 Impl = TLI.Impl;
266 OverrideAsUnavailable = TLI.OverrideAsUnavailable;
267 return *this;
268 }
269
270 /// Determine whether a callee with the given TLI can be inlined into
271 /// caller with this TLI, based on 'nobuiltin' attributes. When requested,
272 /// allow inlining into a caller with a superset of the callee's nobuiltin
273 /// attributes, which is conservatively correct.
274 bool areInlineCompatible(const TargetLibraryInfo &CalleeTLI,
275 bool AllowCallerSuperset) const {
276 if (!AllowCallerSuperset)
277 return OverrideAsUnavailable == CalleeTLI.OverrideAsUnavailable;
278 BitVector B = OverrideAsUnavailable;
279 B |= CalleeTLI.OverrideAsUnavailable;
280 // We can inline if the union of the caller and callee's nobuiltin
281 // attributes is no stricter than the caller's nobuiltin attributes.
282 return B == OverrideAsUnavailable;
283 }
284
285 /// Searches for a particular function name.
286 ///
287 /// If it is one of the known library functions, return true and set F to the
288 /// corresponding value.
289 bool getLibFunc(StringRef funcName, LibFunc &F) const {
290 return Impl->getLibFunc(funcName, F);
291 }
292
293 bool getLibFunc(const Function &FDecl, LibFunc &F) const {
294 return Impl->getLibFunc(FDecl, F);
85
Value assigned to 'LF', which participates in a condition later
295 }
296
297 /// If a callbase does not have the 'nobuiltin' attribute, return if the
298 /// called function is a known library function and set F to that function.
299 bool getLibFunc(const CallBase &CB, LibFunc &F) const {
300 return !CB.isNoBuiltin() && CB.getCalledFunction() &&
87
Returning value, which participates in a condition later
301 getLibFunc(*(CB.getCalledFunction()), F);
84
Calling 'TargetLibraryInfo::getLibFunc'
86
Returning from 'TargetLibraryInfo::getLibFunc'
302 }
303
304 /// Disables all builtins.
305 ///
306 /// This can be used for options like -fno-builtin.
307 void disableAllFunctions() LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) {
308 OverrideAsUnavailable.set();
309 }
310
311 /// Forces a function to be marked as unavailable.
312 void setUnavailable(LibFunc F) LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) {
313 OverrideAsUnavailable.set(F);
314 }
315
316 TargetLibraryInfoImpl::AvailabilityState getState(LibFunc F) const {
317 if (OverrideAsUnavailable[F])
92
Calling 'BitVector::operator[]'
96
Returning from 'BitVector::operator[]'
97
Taking false branch
318 return TargetLibraryInfoImpl::Unavailable;
319 return Impl->getState(F);
320 }
321
322 /// Tests whether a library function is available.
323 bool has(LibFunc F) const {
324 return getState(F) != TargetLibraryInfoImpl::Unavailable;
91
Calling 'TargetLibraryInfo::getState'
98
Returning from 'TargetLibraryInfo::getState'
325 }
326 bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const {
327 return Impl->isFunctionVectorizable(F, VF);
328 }
329 bool isFunctionVectorizable(StringRef F) const {
330 return Impl->isFunctionVectorizable(F);
331 }
332 StringRef getVectorizedFunction(StringRef F, const ElementCount &VF) const {
333 return Impl->getVectorizedFunction(F, VF);
334 }
335
336 /// Tests if the function is both available and a candidate for optimized code
337 /// generation.
338 bool hasOptimizedCodeGen(LibFunc F) const {
339 if (getState(F) == TargetLibraryInfoImpl::Unavailable)
340 return false;
341 switch (F) {
342 default: break;
343 case LibFunc_copysign: case LibFunc_copysignf: case LibFunc_copysignl:
344 case LibFunc_fabs: case LibFunc_fabsf: case LibFunc_fabsl:
345 case LibFunc_sin: case LibFunc_sinf: case LibFunc_sinl:
346 case LibFunc_cos: case LibFunc_cosf: case LibFunc_cosl:
347 case LibFunc_sqrt: case LibFunc_sqrtf: case LibFunc_sqrtl:
348 case LibFunc_sqrt_finite: case LibFunc_sqrtf_finite:
349 case LibFunc_sqrtl_finite:
350 case LibFunc_fmax: case LibFunc_fmaxf: case LibFunc_fmaxl:
351 case LibFunc_fmin: case LibFunc_fminf: case LibFunc_fminl:
352 case LibFunc_floor: case LibFunc_floorf: case LibFunc_floorl:
353 case LibFunc_nearbyint: case LibFunc_nearbyintf: case LibFunc_nearbyintl:
354 case LibFunc_ceil: case LibFunc_ceilf: case LibFunc_ceill:
355 case LibFunc_rint: case LibFunc_rintf: case LibFunc_rintl:
356 case LibFunc_round: case LibFunc_roundf: case LibFunc_roundl:
357 case LibFunc_trunc: case LibFunc_truncf: case LibFunc_truncl:
358 case LibFunc_log2: case LibFunc_log2f: case LibFunc_log2l:
359 case LibFunc_exp2: case LibFunc_exp2f: case LibFunc_exp2l:
360 case LibFunc_memcpy: case LibFunc_memset: case LibFunc_memmove:
361 case LibFunc_memcmp: case LibFunc_bcmp: case LibFunc_strcmp:
362 case LibFunc_strcpy: case LibFunc_stpcpy: case LibFunc_strlen:
363 case LibFunc_strnlen: case LibFunc_memchr: case LibFunc_mempcpy:
364 return true;
365 }
366 return false;
367 }
368
369 StringRef getName(LibFunc F) const {
370 auto State = getState(F);
371 if (State == TargetLibraryInfoImpl::Unavailable)
372 return StringRef();
373 if (State == TargetLibraryInfoImpl::StandardName)
374 return Impl->StandardNames[F];
375 assert(State == TargetLibraryInfoImpl::CustomName)(static_cast <bool> (State == TargetLibraryInfoImpl::CustomName
) ? void (0) : __assert_fail ("State == TargetLibraryInfoImpl::CustomName"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/Analysis/TargetLibraryInfo.h"
, 375, __extension__ __PRETTY_FUNCTION__))
;
376 return Impl->CustomNames.find(F)->second;
377 }
378
379 /// Returns extension attribute kind to be used for i32 parameters
380 /// corresponding to C-level int or unsigned int. May be zeroext, signext,
381 /// or none.
382 Attribute::AttrKind getExtAttrForI32Param(bool Signed = true) const {
383 if (Impl->ShouldExtI32Param)
384 return Signed ? Attribute::SExt : Attribute::ZExt;
385 if (Impl->ShouldSignExtI32Param)
386 return Attribute::SExt;
387 return Attribute::None;
388 }
389
390 /// Returns extension attribute kind to be used for i32 return values
391 /// corresponding to C-level int or unsigned int. May be zeroext, signext,
392 /// or none.
393 Attribute::AttrKind getExtAttrForI32Return(bool Signed = true) const {
394 if (Impl->ShouldExtI32Return)
395 return Signed ? Attribute::SExt : Attribute::ZExt;
396 return Attribute::None;
397 }
398
399 /// \copydoc TargetLibraryInfoImpl::getWCharSize()
400 unsigned getWCharSize(const Module &M) const {
401 return Impl->getWCharSize(M);
402 }
403
404 /// \copydoc TargetLibraryInfoImpl::getIntSize()
405 unsigned getIntSize() const {
406 return Impl->getIntSize();
407 }
408
409 /// Handle invalidation from the pass manager.
410 ///
411 /// If we try to invalidate this info, just return false. It cannot become
412 /// invalid even if the module or function changes.
413 bool invalidate(Module &, const PreservedAnalyses &,
414 ModuleAnalysisManager::Invalidator &) {
415 return false;
416 }
417 bool invalidate(Function &, const PreservedAnalyses &,
418 FunctionAnalysisManager::Invalidator &) {
419 return false;
420 }
421 /// Returns the largest vectorization factor used in the list of
422 /// vector functions.
423 void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
424 ElementCount &ScalableVF) const {
425 Impl->getWidestVF(ScalarF, FixedVF, ScalableVF);
426 }
427
428 /// Check if the function "F" is listed in a library known to LLVM.
429 bool isKnownVectorFunctionInLibrary(StringRef F) const {
430 return this->isFunctionVectorizable(F);
431 }
432};
433
434/// Analysis pass providing the \c TargetLibraryInfo.
435///
436/// Note that this pass's result cannot be invalidated, it is immutable for the
437/// life of the module.
438class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
439public:
440 typedef TargetLibraryInfo Result;
441
442 /// Default construct the library analysis.
443 ///
444 /// This will use the module's triple to construct the library info for that
445 /// module.
446 TargetLibraryAnalysis() {}
447
448 /// Construct a library analysis with baseline Module-level info.
449 ///
450 /// This will be supplemented with Function-specific info in the Result.
451 TargetLibraryAnalysis(TargetLibraryInfoImpl BaselineInfoImpl)
452 : BaselineInfoImpl(std::move(BaselineInfoImpl)) {}
453
454 TargetLibraryInfo run(const Function &F, FunctionAnalysisManager &);
455
456private:
457 friend AnalysisInfoMixin<TargetLibraryAnalysis>;
458 static AnalysisKey Key;
459
460 Optional<TargetLibraryInfoImpl> BaselineInfoImpl;
461};
462
463class TargetLibraryInfoWrapperPass : public ImmutablePass {
464 TargetLibraryAnalysis TLA;
465 Optional<TargetLibraryInfo> TLI;
466
467 virtual void anchor();
468
469public:
470 static char ID;
471 TargetLibraryInfoWrapperPass();
472 explicit TargetLibraryInfoWrapperPass(const Triple &T);
473 explicit TargetLibraryInfoWrapperPass(const TargetLibraryInfoImpl &TLI);
474
475 TargetLibraryInfo &getTLI(const Function &F) {
476 FunctionAnalysisManager DummyFAM;
477 TLI = TLA.run(F, DummyFAM);
478 return *TLI;
479 }
480};
481
482} // end namespace llvm
483
484#endif

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h

1//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the BitVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_BITVECTOR_H
14#define LLVM_ADT_BITVECTOR_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/Support/MathExtras.h"
20#include <algorithm>
21#include <cassert>
22#include <climits>
23#include <cstdint>
24#include <cstdlib>
25#include <cstring>
26#include <utility>
27
28namespace llvm {
29
30/// ForwardIterator for the bits that are set.
31/// Iterators get invalidated when resize / reserve is called.
32template <typename BitVectorT> class const_set_bits_iterator_impl {
33 const BitVectorT &Parent;
34 int Current = 0;
35
36 void advance() {
37 assert(Current != -1 && "Trying to advance past end.")(static_cast <bool> (Current != -1 && "Trying to advance past end."
) ? void (0) : __assert_fail ("Current != -1 && \"Trying to advance past end.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 37, __extension__ __PRETTY_FUNCTION__))
;
38 Current = Parent.find_next(Current);
39 }
40
41public:
42 const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
43 : Parent(Parent), Current(Current) {}
44 explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
45 : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
46 const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
47
48 const_set_bits_iterator_impl operator++(int) {
49 auto Prev = *this;
50 advance();
51 return Prev;
52 }
53
54 const_set_bits_iterator_impl &operator++() {
55 advance();
56 return *this;
57 }
58
59 unsigned operator*() const { return Current; }
60
61 bool operator==(const const_set_bits_iterator_impl &Other) const {
62 assert(&Parent == &Other.Parent &&(static_cast <bool> (&Parent == &Other.Parent &&
"Comparing iterators from different BitVectors") ? void (0) :
__assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 63, __extension__ __PRETTY_FUNCTION__))
63 "Comparing iterators from different BitVectors")(static_cast <bool> (&Parent == &Other.Parent &&
"Comparing iterators from different BitVectors") ? void (0) :
__assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 63, __extension__ __PRETTY_FUNCTION__))
;
64 return Current == Other.Current;
65 }
66
67 bool operator!=(const const_set_bits_iterator_impl &Other) const {
68 assert(&Parent == &Other.Parent &&(static_cast <bool> (&Parent == &Other.Parent &&
"Comparing iterators from different BitVectors") ? void (0) :
__assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 69, __extension__ __PRETTY_FUNCTION__))
69 "Comparing iterators from different BitVectors")(static_cast <bool> (&Parent == &Other.Parent &&
"Comparing iterators from different BitVectors") ? void (0) :
__assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 69, __extension__ __PRETTY_FUNCTION__))
;
70 return Current != Other.Current;
71 }
72};
73
74class BitVector {
75 typedef uintptr_t BitWord;
76
77 enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT8 };
78
79 static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
80 "Unsupported word size");
81
82 using Storage = SmallVector<BitWord>;
83
84 Storage Bits; // Actual bits.
85 unsigned Size; // Size of bitvector in bits.
86
87public:
88 typedef unsigned size_type;
89
90 // Encapsulation of a single bit.
91 class reference {
92
93 BitWord *WordRef;
94 unsigned BitPos;
95
96 public:
97 reference(BitVector &b, unsigned Idx) {
98 WordRef = &b.Bits[Idx / BITWORD_SIZE];
99 BitPos = Idx % BITWORD_SIZE;
100 }
101
102 reference() = delete;
103 reference(const reference&) = default;
104
105 reference &operator=(reference t) {
106 *this = bool(t);
107 return *this;
108 }
109
110 reference& operator=(bool t) {
111 if (t)
112 *WordRef |= BitWord(1) << BitPos;
113 else
114 *WordRef &= ~(BitWord(1) << BitPos);
115 return *this;
116 }
117
118 operator bool() const {
119 return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
120 }
121 };
122
123 typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
124 typedef const_set_bits_iterator set_iterator;
125
126 const_set_bits_iterator set_bits_begin() const {
127 return const_set_bits_iterator(*this);
128 }
129 const_set_bits_iterator set_bits_end() const {
130 return const_set_bits_iterator(*this, -1);
131 }
132 iterator_range<const_set_bits_iterator> set_bits() const {
133 return make_range(set_bits_begin(), set_bits_end());
134 }
135
136 /// BitVector default ctor - Creates an empty bitvector.
137 BitVector() : Size(0) {}
138
139 /// BitVector ctor - Creates a bitvector of specified number of bits. All
140 /// bits are initialized to the specified value.
141 explicit BitVector(unsigned s, bool t = false)
142 : Bits(NumBitWords(s), 0 - (BitWord)t), Size(s) {
143 if (t)
144 clear_unused_bits();
145 }
146
147 /// empty - Tests whether there are no bits in this bitvector.
148 bool empty() const { return Size == 0; }
149
150 /// size - Returns the number of bits in this bitvector.
151 size_type size() const { return Size; }
152
153 /// count - Returns the number of bits which are set.
154 size_type count() const {
155 unsigned NumBits = 0;
156 for (auto Bit : Bits)
157 NumBits += countPopulation(Bit);
158 return NumBits;
159 }
160
161 /// any - Returns true if any bit is set.
162 bool any() const {
163 return any_of(Bits, [](BitWord Bit) { return Bit != 0; });
164 }
165
166 /// all - Returns true if all bits are set.
167 bool all() const {
168 for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
169 if (Bits[i] != ~BitWord(0))
170 return false;
171
172 // If bits remain check that they are ones. The unused bits are always zero.
173 if (unsigned Remainder = Size % BITWORD_SIZE)
174 return Bits[Size / BITWORD_SIZE] == (BitWord(1) << Remainder) - 1;
175
176 return true;
177 }
178
179 /// none - Returns true if none of the bits are set.
180 bool none() const {
181 return !any();
182 }
183
184 /// find_first_in - Returns the index of the first set / unset bit,
185 /// depending on \p Set, in the range [Begin, End).
186 /// Returns -1 if all bits in the range are unset / set.
187 int find_first_in(unsigned Begin, unsigned End, bool Set = true) const {
188 assert(Begin <= End && End <= Size)(static_cast <bool> (Begin <= End && End <=
Size) ? void (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 188, __extension__ __PRETTY_FUNCTION__))
;
189 if (Begin == End)
190 return -1;
191
192 unsigned FirstWord = Begin / BITWORD_SIZE;
193 unsigned LastWord = (End - 1) / BITWORD_SIZE;
194
195 // Check subsequent words.
196 // The code below is based on search for the first _set_ bit. If
197 // we're searching for the first _unset_, we just take the
198 // complement of each word before we use it and apply
199 // the same method.
200 for (unsigned i = FirstWord; i <= LastWord; ++i) {
201 BitWord Copy = Bits[i];
202 if (!Set)
203 Copy = ~Copy;
204
205 if (i == FirstWord) {
206 unsigned FirstBit = Begin % BITWORD_SIZE;
207 Copy &= maskTrailingZeros<BitWord>(FirstBit);
208 }
209
210 if (i == LastWord) {
211 unsigned LastBit = (End - 1) % BITWORD_SIZE;
212 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
213 }
214 if (Copy != 0)
215 return i * BITWORD_SIZE + countTrailingZeros(Copy);
216 }
217 return -1;
218 }
219
220 /// find_last_in - Returns the index of the last set bit in the range
221 /// [Begin, End). Returns -1 if all bits in the range are unset.
222 int find_last_in(unsigned Begin, unsigned End) const {
223 assert(Begin <= End && End <= Size)(static_cast <bool> (Begin <= End && End <=
Size) ? void (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 223, __extension__ __PRETTY_FUNCTION__))
;
224 if (Begin == End)
225 return -1;
226
227 unsigned LastWord = (End - 1) / BITWORD_SIZE;
228 unsigned FirstWord = Begin / BITWORD_SIZE;
229
230 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
231 unsigned CurrentWord = i - 1;
232
233 BitWord Copy = Bits[CurrentWord];
234 if (CurrentWord == LastWord) {
235 unsigned LastBit = (End - 1) % BITWORD_SIZE;
236 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
237 }
238
239 if (CurrentWord == FirstWord) {
240 unsigned FirstBit = Begin % BITWORD_SIZE;
241 Copy &= maskTrailingZeros<BitWord>(FirstBit);
242 }
243
244 if (Copy != 0)
245 return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
246 }
247
248 return -1;
249 }
250
251 /// find_first_unset_in - Returns the index of the first unset bit in the
252 /// range [Begin, End). Returns -1 if all bits in the range are set.
253 int find_first_unset_in(unsigned Begin, unsigned End) const {
254 return find_first_in(Begin, End, /* Set = */ false);
255 }
256
257 /// find_last_unset_in - Returns the index of the last unset bit in the
258 /// range [Begin, End). Returns -1 if all bits in the range are set.
259 int find_last_unset_in(unsigned Begin, unsigned End) const {
260 assert(Begin <= End && End <= Size)(static_cast <bool> (Begin <= End && End <=
Size) ? void (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 260, __extension__ __PRETTY_FUNCTION__))
;
261 if (Begin == End)
262 return -1;
263
264 unsigned LastWord = (End - 1) / BITWORD_SIZE;
265 unsigned FirstWord = Begin / BITWORD_SIZE;
266
267 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
268 unsigned CurrentWord = i - 1;
269
270 BitWord Copy = Bits[CurrentWord];
271 if (CurrentWord == LastWord) {
272 unsigned LastBit = (End - 1) % BITWORD_SIZE;
273 Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
274 }
275
276 if (CurrentWord == FirstWord) {
277 unsigned FirstBit = Begin % BITWORD_SIZE;
278 Copy |= maskTrailingOnes<BitWord>(FirstBit);
279 }
280
281 if (Copy != ~BitWord(0)) {
282 unsigned Result =
283 (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
284 return Result < Size ? Result : -1;
285 }
286 }
287 return -1;
288 }
289
290 /// find_first - Returns the index of the first set bit, -1 if none
291 /// of the bits are set.
292 int find_first() const { return find_first_in(0, Size); }
293
294 /// find_last - Returns the index of the last set bit, -1 if none of the bits
295 /// are set.
296 int find_last() const { return find_last_in(0, Size); }
297
298 /// find_next - Returns the index of the next set bit following the
299 /// "Prev" bit. Returns -1 if the next set bit is not found.
300 int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
301
302 /// find_prev - Returns the index of the first set bit that precedes the
303 /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
304 int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
305
306 /// find_first_unset - Returns the index of the first unset bit, -1 if all
307 /// of the bits are set.
308 int find_first_unset() const { return find_first_unset_in(0, Size); }
309
310 /// find_next_unset - Returns the index of the next unset bit following the
311 /// "Prev" bit. Returns -1 if all remaining bits are set.
312 int find_next_unset(unsigned Prev) const {
313 return find_first_unset_in(Prev + 1, Size);
314 }
315
316 /// find_last_unset - Returns the index of the last unset bit, -1 if all of
317 /// the bits are set.
318 int find_last_unset() const { return find_last_unset_in(0, Size); }
319
320 /// find_prev_unset - Returns the index of the first unset bit that precedes
321 /// the bit at \p PriorTo. Returns -1 if all previous bits are set.
322 int find_prev_unset(unsigned PriorTo) {
323 return find_last_unset_in(0, PriorTo);
324 }
325
326 /// clear - Removes all bits from the bitvector.
327 void clear() {
328 Size = 0;
329 Bits.clear();
330 }
331
332 /// resize - Grow or shrink the bitvector.
333 void resize(unsigned N, bool t = false) {
334 set_unused_bits(t);
335 Size = N;
336 Bits.resize(NumBitWords(N), 0 - BitWord(t));
337 clear_unused_bits();
338 }
339
340 void reserve(unsigned N) { Bits.reserve(NumBitWords(N)); }
341
342 // Set, reset, flip
343 BitVector &set() {
344 init_words(true);
345 clear_unused_bits();
346 return *this;
347 }
348
349 BitVector &set(unsigned Idx) {
350 assert(Idx < Size && "access in bound")(static_cast <bool> (Idx < Size && "access in bound"
) ? void (0) : __assert_fail ("Idx < Size && \"access in bound\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 350, __extension__ __PRETTY_FUNCTION__))
;
351 Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
352 return *this;
353 }
354
355 /// set - Efficiently set a range of bits in [I, E)
356 BitVector &set(unsigned I, unsigned E) {
357 assert(I <= E && "Attempted to set backwards range!")(static_cast <bool> (I <= E && "Attempted to set backwards range!"
) ? void (0) : __assert_fail ("I <= E && \"Attempted to set backwards range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 357, __extension__ __PRETTY_FUNCTION__))
;
358 assert(E <= size() && "Attempted to set out-of-bounds range!")(static_cast <bool> (E <= size() && "Attempted to set out-of-bounds range!"
) ? void (0) : __assert_fail ("E <= size() && \"Attempted to set out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 358, __extension__ __PRETTY_FUNCTION__))
;
359
360 if (I == E) return *this;
361
362 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
363 BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
364 BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
365 BitWord Mask = EMask - IMask;
366 Bits[I / BITWORD_SIZE] |= Mask;
367 return *this;
368 }
369
370 BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
371 Bits[I / BITWORD_SIZE] |= PrefixMask;
372 I = alignTo(I, BITWORD_SIZE);
373
374 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
375 Bits[I / BITWORD_SIZE] = ~BitWord(0);
376
377 BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
378 if (I < E)
379 Bits[I / BITWORD_SIZE] |= PostfixMask;
380
381 return *this;
382 }
383
384 BitVector &reset() {
385 init_words(false);
386 return *this;
387 }
388
389 BitVector &reset(unsigned Idx) {
390 Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
391 return *this;
392 }
393
394 /// reset - Efficiently reset a range of bits in [I, E)
395 BitVector &reset(unsigned I, unsigned E) {
396 assert(I <= E && "Attempted to reset backwards range!")(static_cast <bool> (I <= E && "Attempted to reset backwards range!"
) ? void (0) : __assert_fail ("I <= E && \"Attempted to reset backwards range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 396, __extension__ __PRETTY_FUNCTION__))
;
397 assert(E <= size() && "Attempted to reset out-of-bounds range!")(static_cast <bool> (E <= size() && "Attempted to reset out-of-bounds range!"
) ? void (0) : __assert_fail ("E <= size() && \"Attempted to reset out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 397, __extension__ __PRETTY_FUNCTION__))
;
398
399 if (I == E) return *this;
400
401 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
402 BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
403 BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
404 BitWord Mask = EMask - IMask;
405 Bits[I / BITWORD_SIZE] &= ~Mask;
406 return *this;
407 }
408
409 BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
410 Bits[I / BITWORD_SIZE] &= ~PrefixMask;
411 I = alignTo(I, BITWORD_SIZE);
412
413 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
414 Bits[I / BITWORD_SIZE] = BitWord(0);
415
416 BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
417 if (I < E)
418 Bits[I / BITWORD_SIZE] &= ~PostfixMask;
419
420 return *this;
421 }
422
423 BitVector &flip() {
424 for (auto &Bit : Bits)
425 Bit = ~Bit;
426 clear_unused_bits();
427 return *this;
428 }
429
430 BitVector &flip(unsigned Idx) {
431 Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
432 return *this;
433 }
434
435 // Indexing.
436 reference operator[](unsigned Idx) {
437 assert (Idx < Size && "Out-of-bounds Bit access.")(static_cast <bool> (Idx < Size && "Out-of-bounds Bit access."
) ? void (0) : __assert_fail ("Idx < Size && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 437, __extension__ __PRETTY_FUNCTION__))
;
438 return reference(*this, Idx);
439 }
440
441 bool operator[](unsigned Idx) const {
442 assert (Idx < Size && "Out-of-bounds Bit access.")(static_cast <bool> (Idx < Size && "Out-of-bounds Bit access."
) ? void (0) : __assert_fail ("Idx < Size && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 442, __extension__ __PRETTY_FUNCTION__))
;
93
Assuming 'Idx' is < field 'Size'
94
'?' condition is true
443 BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
444 return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
95
Assuming the condition is false
445 }
446
447 bool test(unsigned Idx) const {
448 return (*this)[Idx];
449 }
450
451 // Push single bit to end of vector.
452 void push_back(bool Val) {
453 unsigned OldSize = Size;
454 unsigned NewSize = Size + 1;
455
456 // Resize, which will insert zeros.
457 // If we already fit then the unused bits will be already zero.
458 if (NewSize > getBitCapacity())
459 resize(NewSize, false);
460 else
461 Size = NewSize;
462
463 // If true, set single bit.
464 if (Val)
465 set(OldSize);
466 }
467
468 /// Test if any common bits are set.
469 bool anyCommon(const BitVector &RHS) const {
470 unsigned ThisWords = Bits.size();
471 unsigned RHSWords = RHS.Bits.size();
472 for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
473 if (Bits[i] & RHS.Bits[i])
474 return true;
475 return false;
476 }
477
478 // Comparison operators.
479 bool operator==(const BitVector &RHS) const {
480 if (size() != RHS.size())
481 return false;
482 unsigned NumWords = Bits.size();
483 return std::equal(Bits.begin(), Bits.begin() + NumWords, RHS.Bits.begin());
484 }
485
486 bool operator!=(const BitVector &RHS) const { return !(*this == RHS); }
487
488 /// Intersection, union, disjoint union.
489 BitVector &operator&=(const BitVector &RHS) {
490 unsigned ThisWords = Bits.size();
491 unsigned RHSWords = RHS.Bits.size();
492 unsigned i;
493 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
494 Bits[i] &= RHS.Bits[i];
495
496 // Any bits that are just in this bitvector become zero, because they aren't
497 // in the RHS bit vector. Any words only in RHS are ignored because they
498 // are already zero in the LHS.
499 for (; i != ThisWords; ++i)
500 Bits[i] = 0;
501
502 return *this;
503 }
504
505 /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
506 BitVector &reset(const BitVector &RHS) {
507 unsigned ThisWords = Bits.size();
508 unsigned RHSWords = RHS.Bits.size();
509 for (unsigned i = 0; i != std::min(ThisWords, RHSWords); ++i)
510 Bits[i] &= ~RHS.Bits[i];
511 return *this;
512 }
513
514 /// test - Check if (This - RHS) is zero.
515 /// This is the same as reset(RHS) and any().
516 bool test(const BitVector &RHS) const {
517 unsigned ThisWords = Bits.size();
518 unsigned RHSWords = RHS.Bits.size();
519 unsigned i;
520 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
521 if ((Bits[i] & ~RHS.Bits[i]) != 0)
522 return true;
523
524 for (; i != ThisWords ; ++i)
525 if (Bits[i] != 0)
526 return true;
527
528 return false;
529 }
530
531 template <class F, class... ArgTys>
532 static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
533 ArgTys const &...Args) {
534 assert(llvm::all_of((static_cast <bool> (llvm::all_of( std::initializer_list
<unsigned>{Args.size()...}, [&Arg](auto const &
BV) { return Arg.size() == BV; }) && "consistent sizes"
) ? void (0) : __assert_fail ("llvm::all_of( std::initializer_list<unsigned>{Args.size()...}, [&Arg](auto const &BV) { return Arg.size() == BV; }) && \"consistent sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 537, __extension__ __PRETTY_FUNCTION__))
535 std::initializer_list<unsigned>{Args.size()...},(static_cast <bool> (llvm::all_of( std::initializer_list
<unsigned>{Args.size()...}, [&Arg](auto const &
BV) { return Arg.size() == BV; }) && "consistent sizes"
) ? void (0) : __assert_fail ("llvm::all_of( std::initializer_list<unsigned>{Args.size()...}, [&Arg](auto const &BV) { return Arg.size() == BV; }) && \"consistent sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 537, __extension__ __PRETTY_FUNCTION__))
536 [&Arg](auto const &BV) { return Arg.size() == BV; }) &&(static_cast <bool> (llvm::all_of( std::initializer_list
<unsigned>{Args.size()...}, [&Arg](auto const &
BV) { return Arg.size() == BV; }) && "consistent sizes"
) ? void (0) : __assert_fail ("llvm::all_of( std::initializer_list<unsigned>{Args.size()...}, [&Arg](auto const &BV) { return Arg.size() == BV; }) && \"consistent sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 537, __extension__ __PRETTY_FUNCTION__))
537 "consistent sizes")(static_cast <bool> (llvm::all_of( std::initializer_list
<unsigned>{Args.size()...}, [&Arg](auto const &
BV) { return Arg.size() == BV; }) && "consistent sizes"
) ? void (0) : __assert_fail ("llvm::all_of( std::initializer_list<unsigned>{Args.size()...}, [&Arg](auto const &BV) { return Arg.size() == BV; }) && \"consistent sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 537, __extension__ __PRETTY_FUNCTION__))
;
538 Out.resize(Arg.size());
539 for (size_t i = 0, e = Arg.Bits.size(); i != e; ++i)
540 Out.Bits[i] = f(Arg.Bits[i], Args.Bits[i]...);
541 Out.clear_unused_bits();
542 return Out;
543 }
544
545 BitVector &operator|=(const BitVector &RHS) {
546 if (size() < RHS.size())
547 resize(RHS.size());
548 for (size_t i = 0, e = RHS.Bits.size(); i != e; ++i)
549 Bits[i] |= RHS.Bits[i];
550 return *this;
551 }
552
553 BitVector &operator^=(const BitVector &RHS) {
554 if (size() < RHS.size())
555 resize(RHS.size());
556 for (size_t i = 0, e = RHS.Bits.size(); i != e; ++i)
557 Bits[i] ^= RHS.Bits[i];
558 return *this;
559 }
560
561 BitVector &operator>>=(unsigned N) {
562 assert(N <= Size)(static_cast <bool> (N <= Size) ? void (0) : __assert_fail
("N <= Size", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
564 return *this;
565
566 unsigned NumWords = Bits.size();
567 assert(NumWords >= 1)(static_cast <bool> (NumWords >= 1) ? void (0) : __assert_fail
("NumWords >= 1", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 567, __extension__ __PRETTY_FUNCTION__))
;
568
569 wordShr(N / BITWORD_SIZE);
570
571 unsigned BitDistance = N % BITWORD_SIZE;
572 if (BitDistance == 0)
573 return *this;
574
575 // When the shift size is not a multiple of the word size, then we have
576 // a tricky situation where each word in succession needs to extract some
577 // of the bits from the next word and or them into this word while
578 // shifting this word to make room for the new bits. This has to be done
579 // for every word in the array.
580
581 // Since we're shifting each word right, some bits will fall off the end
582 // of each word to the right, and empty space will be created on the left.
583 // The final word in the array will lose bits permanently, so starting at
584 // the beginning, work forwards shifting each word to the right, and
585 // OR'ing in the bits from the end of the next word to the beginning of
586 // the current word.
587
588 // Example:
589 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
590 // by 4 bits.
591 // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD
592 // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD
593 // Step 3: Word[1] >>= 4 ; 0x0EEFF001
594 // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001
595 // Step 5: Word[2] >>= 4 ; 0x02334455
596 // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
597 const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
598 const unsigned LSH = BITWORD_SIZE - BitDistance;
599
600 for (unsigned I = 0; I < NumWords - 1; ++I) {
601 Bits[I] >>= BitDistance;
602 Bits[I] |= (Bits[I + 1] & Mask) << LSH;
603 }
604
605 Bits[NumWords - 1] >>= BitDistance;
606
607 return *this;
608 }
609
610 BitVector &operator<<=(unsigned N) {
611 assert(N <= Size)(static_cast <bool> (N <= Size) ? void (0) : __assert_fail
("N <= Size", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
613 return *this;
614
615 unsigned NumWords = Bits.size();
616 assert(NumWords >= 1)(static_cast <bool> (NumWords >= 1) ? void (0) : __assert_fail
("NumWords >= 1", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 616, __extension__ __PRETTY_FUNCTION__))
;
617
618 wordShl(N / BITWORD_SIZE);
619
620 unsigned BitDistance = N % BITWORD_SIZE;
621 if (BitDistance == 0)
622 return *this;
623
624 // When the shift size is not a multiple of the word size, then we have
625 // a tricky situation where each word in succession needs to extract some
626 // of the bits from the previous word and or them into this word while
627 // shifting this word to make room for the new bits. This has to be done
628 // for every word in the array. This is similar to the algorithm outlined
629 // in operator>>=, but backwards.
630
631 // Since we're shifting each word left, some bits will fall off the end
632 // of each word to the left, and empty space will be created on the right.
633 // The first word in the array will lose bits permanently, so starting at
634 // the end, work backwards shifting each word to the left, and OR'ing
635 // in the bits from the end of the next word to the beginning of the
636 // current word.
637
638 // Example:
639 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
640 // by 4 bits.
641 // Step 1: Word[2] <<= 4 ; 0x23344550
642 // Step 2: Word[2] |= 0x0000000E ; 0x2334455E
643 // Step 3: Word[1] <<= 4 ; 0xEFF00110
644 // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A
645 // Step 5: Word[0] <<= 4 ; 0xABBCCDD0
646 // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
647 const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
648 const unsigned RSH = BITWORD_SIZE - BitDistance;
649
650 for (int I = NumWords - 1; I > 0; --I) {
651 Bits[I] <<= BitDistance;
652 Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
653 }
654 Bits[0] <<= BitDistance;
655 clear_unused_bits();
656
657 return *this;
658 }
659
660 void swap(BitVector &RHS) {
661 std::swap(Bits, RHS.Bits);
662 std::swap(Size, RHS.Size);
663 }
664
665 void invalid() {
666 assert(!Size && Bits.empty())(static_cast <bool> (!Size && Bits.empty()) ? void
(0) : __assert_fail ("!Size && Bits.empty()", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/BitVector.h"
, 666, __extension__ __PRETTY_FUNCTION__))
;
667 Size = (unsigned)-1;
668 }
669 bool isInvalid() const { return Size == (unsigned)-1; }
670
671 ArrayRef<BitWord> getData() const { return {&Bits[0], Bits.size()}; }
672
673 //===--------------------------------------------------------------------===//
674 // Portable bit mask operations.
675 //===--------------------------------------------------------------------===//
676 //
677 // These methods all operate on arrays of uint32_t, each holding 32 bits. The
678 // fixed word size makes it easier to work with literal bit vector constants
679 // in portable code.
680 //
681 // The LSB in each word is the lowest numbered bit. The size of a portable
682 // bit mask is always a whole multiple of 32 bits. If no bit mask size is
683 // given, the bit mask is assumed to cover the entire BitVector.
684
685 /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
686 /// This computes "*this |= Mask".
687 void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
688 applyMask<true, false>(Mask, MaskWords);
689 }
690
691 /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
692 /// Don't resize. This computes "*this &= ~Mask".
693 void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
694 applyMask<false, false>(Mask, MaskWords);
695 }
696
697 /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
698 /// Don't resize. This computes "*this |= ~Mask".
699 void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
700 applyMask<true, true>(Mask, MaskWords);
701 }
702
703 /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
704 /// Don't resize. This computes "*this &= Mask".
705 void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
706 applyMask<false, true>(Mask, MaskWords);
707 }
708
709private:
710 /// Perform a logical left shift of \p Count words by moving everything
711 /// \p Count words to the right in memory.
712 ///
713 /// While confusing, words are stored from least significant at Bits[0] to
714 /// most significant at Bits[NumWords-1]. A logical shift left, however,
715 /// moves the current least significant bit to a higher logical index, and
716 /// fills the previous least significant bits with 0. Thus, we actually
717 /// need to move the bytes of the memory to the right, not to the left.
718 /// Example:
719 /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
720 /// represents a BitVector where 0xBBBBAAAA contain the least significant
721 /// bits. So if we want to shift the BitVector left by 2 words, we need
722 /// to turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
723 /// memmove which moves right, not left.
724 void wordShl(uint32_t Count) {
725 if (Count == 0)
726 return;
727
728 uint32_t NumWords = Bits.size();
729
730 // Since we always move Word-sized chunks of data with src and dest both
731 // aligned to a word-boundary, we don't need to worry about endianness
732 // here.
733 std::copy(Bits.begin(), Bits.begin() + NumWords - Count,
734 Bits.begin() + Count);
735 std::fill(Bits.begin(), Bits.begin() + Count, 0);
736 clear_unused_bits();
737 }
738
739 /// Perform a logical right shift of \p Count words by moving those
740 /// words to the left in memory. See wordShl for more information.
741 ///
742 void wordShr(uint32_t Count) {
743 if (Count == 0)
744 return;
745
746 uint32_t NumWords = Bits.size();
747
748 std::copy(Bits.begin() + Count, Bits.begin() + NumWords, Bits.begin());
749 std::fill(Bits.begin() + NumWords - Count, Bits.begin() + NumWords, 0);
750 }
751
752 int next_unset_in_word(int WordIndex, BitWord Word) const {
753 unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
754 return Result < size() ? Result : -1;
755 }
756
757 unsigned NumBitWords(unsigned S) const {
758 return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
759 }
760
761 // Set the unused bits in the high words.
762 void set_unused_bits(bool t = true) {
763 // Then set any stray high bits of the last used word.
764 if (unsigned ExtraBits = Size % BITWORD_SIZE) {
765 BitWord ExtraBitMask = ~BitWord(0) << ExtraBits;
766 if (t)
767 Bits.back() |= ExtraBitMask;
768 else
769 Bits.back() &= ~ExtraBitMask;
770 }
771 }
772
773 // Clear the unused bits in the high words.
774 void clear_unused_bits() {
775 set_unused_bits(false);
776 }
777
778 void init_words(bool t) {
779 std::fill(Bits.begin(), Bits.end(), 0 - (BitWord)t);
780 }
781
782 template<bool AddBits, bool InvertMask>
783 void applyMask(const uint32_t *Mask, unsigned MaskWords) {
784 static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
785 MaskWords = std::min(MaskWords, (size() + 31) / 32);
786 const unsigned Scale = BITWORD_SIZE / 32;
787 unsigned i;
788 for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
789 BitWord BW = Bits[i];
790 // This inner loop should unroll completely when BITWORD_SIZE > 32.
791 for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
792 uint32_t M = *Mask++;
793 if (InvertMask) M = ~M;
794 if (AddBits) BW |= BitWord(M) << b;
795 else BW &= ~(BitWord(M) << b);
796 }
797 Bits[i] = BW;
798 }
799 for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
800 uint32_t M = *Mask++;
801 if (InvertMask) M = ~M;
802 if (AddBits) Bits[i] |= BitWord(M) << b;
803 else Bits[i] &= ~(BitWord(M) << b);
804 }
805 if (AddBits)
806 clear_unused_bits();
807 }
808
809public:
810 /// Return the size (in bytes) of the bit vector.
811 size_t getMemorySize() const { return Bits.size() * sizeof(BitWord); }
812 size_t getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
813};
814
815inline size_t capacity_in_bytes(const BitVector &X) {
816 return X.getMemorySize();
817}
818
819template <> struct DenseMapInfo<BitVector> {
820 static inline BitVector getEmptyKey() { return {}; }
821 static inline BitVector getTombstoneKey() {
822 BitVector V;
823 V.invalid();
824 return V;
825 }
826 static unsigned getHashValue(const BitVector &V) {
827 return DenseMapInfo<std::pair<unsigned, ArrayRef<uintptr_t>>>::getHashValue(
828 std::make_pair(V.size(), V.getData()));
829 }
830 static bool isEqual(const BitVector &LHS, const BitVector &RHS) {
831 if (LHS.isInvalid() || RHS.isInvalid())
832 return LHS.isInvalid() == RHS.isInvalid();
833 return LHS == RHS;
834 }
835};
836} // end namespace llvm
837
838namespace std {
839 /// Implement std::swap in terms of BitVector swap.
840inline void swap(llvm::BitVector &LHS, llvm::BitVector &RHS) { LHS.swap(RHS); }
841} // end namespace std
842
843#endif // LLVM_ADT_BITVECTOR_H

/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h

1//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides Optional, a template class modeled in the spirit of
10// OCaml's 'opt' variant. The idea is to strongly type whether or not
11// a value can be optional.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_OPTIONAL_H
16#define LLVM_ADT_OPTIONAL_H
17
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/Support/Compiler.h"
22#include "llvm/Support/type_traits.h"
23#include <cassert>
24#include <memory>
25#include <new>
26#include <utility>
27
28namespace llvm {
29
30class raw_ostream;
31
32namespace optional_detail {
33
34/// Storage for any type.
35//
36// The specialization condition intentionally uses
37// llvm::is_trivially_copy_constructible instead of
38// std::is_trivially_copy_constructible. GCC versions prior to 7.4 may
39// instantiate the copy constructor of `T` when
40// std::is_trivially_copy_constructible is instantiated. This causes
41// compilation to fail if we query the trivially copy constructible property of
42// a class which is not copy constructible.
43//
44// The current implementation of OptionalStorage insists that in order to use
45// the trivial specialization, the value_type must be trivially copy
46// constructible and trivially copy assignable due to =default implementations
47// of the copy/move constructor/assignment. It does not follow that this is
48// necessarily the case std::is_trivially_copyable is true (hence the expanded
49// specialization condition).
50//
51// The move constructible / assignable conditions emulate the remaining behavior
52// of std::is_trivially_copyable.
53template <typename T, bool = (llvm::is_trivially_copy_constructible<T>::value &&
54 std::is_trivially_copy_assignable<T>::value &&
55 (std::is_trivially_move_constructible<T>::value ||
56 !std::is_move_constructible<T>::value) &&
57 (std::is_trivially_move_assignable<T>::value ||
58 !std::is_move_assignable<T>::value))>
59class OptionalStorage {
60 union {
61 char empty;
62 T value;
63 };
64 bool hasVal;
65
66public:
67 ~OptionalStorage() { reset(); }
68
69 constexpr OptionalStorage() noexcept : empty(), hasVal(false) {}
70
71 constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
72 if (other.hasValue()) {
73 emplace(other.value);
74 }
75 }
76 constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
77 if (other.hasValue()) {
78 emplace(std::move(other.value));
79 }
80 }
81
82 template <class... Args>
83 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
84 : value(std::forward<Args>(args)...), hasVal(true) {}
85
86 void reset() noexcept {
87 if (hasVal) {
88 value.~T();
89 hasVal = false;
90 }
91 }
92
93 constexpr bool hasValue() const noexcept { return hasVal; }
94
95 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
96 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 96, __extension__ __PRETTY_FUNCTION__))
;
97 return value;
98 }
99 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
100 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 100, __extension__ __PRETTY_FUNCTION__))
;
101 return value;
102 }
103#if LLVM_HAS_RVALUE_REFERENCE_THIS1
104 T &&getValue() && noexcept {
105 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106 return std::move(value);
107 }
108#endif
109
110 template <class... Args> void emplace(Args &&... args) {
111 reset();
112 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
113 hasVal = true;
114 }
115
116 OptionalStorage &operator=(T const &y) {
117 if (hasValue()) {
118 value = y;
119 } else {
120 ::new ((void *)std::addressof(value)) T(y);
121 hasVal = true;
122 }
123 return *this;
124 }
125 OptionalStorage &operator=(T &&y) {
126 if (hasValue()) {
127 value = std::move(y);
128 } else {
129 ::new ((void *)std::addressof(value)) T(std::move(y));
130 hasVal = true;
131 }
132 return *this;
133 }
134
135 OptionalStorage &operator=(OptionalStorage const &other) {
136 if (other.hasValue()) {
137 if (hasValue()) {
138 value = other.value;
139 } else {
140 ::new ((void *)std::addressof(value)) T(other.value);
141 hasVal = true;
142 }
143 } else {
144 reset();
145 }
146 return *this;
147 }
148
149 OptionalStorage &operator=(OptionalStorage &&other) {
150 if (other.hasValue()) {
151 if (hasValue()) {
152 value = std::move(other.value);
153 } else {
154 ::new ((void *)std::addressof(value)) T(std::move(other.value));
155 hasVal = true;
156 }
157 } else {
158 reset();
159 }
160 return *this;
161 }
162};
163
164template <typename T> class OptionalStorage<T, true> {
165 union {
166 char empty;
167 T value;
168 };
169 bool hasVal = false;
170
171public:
172 ~OptionalStorage() = default;
173
174 constexpr OptionalStorage() noexcept : empty{} {}
175
176 constexpr OptionalStorage(OptionalStorage const &other) = default;
177 constexpr OptionalStorage(OptionalStorage &&other) = default;
178
179 OptionalStorage &operator=(OptionalStorage const &other) = default;
180 OptionalStorage &operator=(OptionalStorage &&other) = default;
181
182 template <class... Args>
183 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
184 : value(std::forward<Args>(args)...), hasVal(true) {}
185
186 void reset() noexcept {
187 if (hasVal) {
188 value.~T();
189 hasVal = false;
190 }
191 }
192
193 constexpr bool hasValue() const noexcept { return hasVal; }
111
Returning the value 1, which participates in a condition later
194
195 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
196 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 196, __extension__ __PRETTY_FUNCTION__))
;
197 return value;
198 }
199 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
200 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 200, __extension__ __PRETTY_FUNCTION__))
;
201 return value;
202 }
203#if LLVM_HAS_RVALUE_REFERENCE_THIS1
204 T &&getValue() && noexcept {
205 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210620111110+09e8c0d5aaef/llvm/include/llvm/ADT/Optional.h"
, 205, __extension__ __PRETTY_FUNCTION__))
;
206 return std::move(value);
207 }
208#endif
209
210 template <class... Args> void emplace(Args &&... args) {
211 reset();
212 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
213 hasVal = true;
214 }
215
216 OptionalStorage &operator=(T const &y) {
217 if (hasValue()) {
218 value = y;
219 } else {
220 ::new ((void *)std::addressof(value)) T(y);
221 hasVal = true;
222 }
223 return *this;
224 }
225 OptionalStorage &operator=(T &&y) {
226 if (hasValue()) {
227 value = std::move(y);
228 } else {
229 ::new ((void *)std::addressof(value)) T(std::move(y));
230 hasVal = true;
231 }
232 return *this;
233 }
234};
235
236} // namespace optional_detail
237
238template <typename T> class Optional {
239 optional_detail::OptionalStorage<T> Storage;
240
241public:
242 using value_type = T;
243
244 constexpr Optional() {}
245 constexpr Optional(NoneType) {}
246
247 constexpr Optional(const T &y) : Storage(in_place, y) {}
248 constexpr Optional(const Optional &O) = default;
249
250 constexpr Optional(T &&y) : Storage(in_place, std::move(y)) {}
251 constexpr Optional(Optional &&O) = default;
252
253 template <typename... ArgTypes>
254 constexpr Optional(in_place_t, ArgTypes &&...Args)
255 : Storage(in_place, std::forward<ArgTypes>(Args)...) {}
256
257 Optional &operator=(T &&y) {
258 Storage = std::move(y);
259 return *this;
260 }
261 Optional &operator=(Optional &&O) = default;
262
263 /// Create a new object by constructing it in place with the given arguments.
264 template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
265 Storage.emplace(std::forward<ArgTypes>(Args)...);
266 }
267
268 static constexpr Optional create(const T *y) {
269 return y ? Optional(*y) : Optional();
270 }
271
272 Optional &operator=(const T &y) {
273 Storage = y;
274 return *this;
275 }
276 Optional &operator=(const Optional &O) = default;
277
278 void reset() { Storage.reset(); }
279
280 constexpr const T *getPointer() const { return &Storage.getValue(); }
281 T *getPointer() { return &Storage.getValue(); }
282 constexpr const T &getValue() const LLVM_LVALUE_FUNCTION& {
283 return Storage.getValue();
284 }
285 T &getValue() LLVM_LVALUE_FUNCTION& { return Storage.getValue(); }
286
287 constexpr explicit operator bool() const { return hasValue(); }
109
Calling 'Optional::hasValue'
114
Returning from 'Optional::hasValue'
115
Returning the value 1, which participates in a condition later
288 constexpr bool hasValue() const { return Storage.hasValue(); }
110
Calling 'OptionalStorage::hasValue'
112
Returning from 'OptionalStorage::hasValue'
113
Returning the value 1, which participates in a condition later
289 constexpr const T *operator->() const { return getPointer(); }
290 T *operator->() { return getPointer(); }
291 constexpr const T &operator*() const LLVM_LVALUE_FUNCTION& {
292 return getValue();
293 }
294 T &operator*() LLVM_LVALUE_FUNCTION& { return getValue(); }
295
296 template <typename U>
297 constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION& {
298 return hasValue() ? getValue() : std::forward<U>(value);
299 }
300
301 /// Apply a function to the value if present; otherwise return None.
302 template <class Function>
303 auto map(const Function &F) const LLVM_LVALUE_FUNCTION&
304 -> Optional<decltype(F(getValue()))> {
305 if (*this) return F(getValue());
306 return None;
307 }
308
309#if LLVM_HAS_RVALUE_REFERENCE_THIS1
310 T &&getValue() && { return std::move(Storage.getValue()); }
311 T &&operator*() && { return std::move(Storage.getValue()); }
312
313 template <typename U>
314 T getValueOr(U &&value) && {
315 return hasValue() ? std::move(getValue()) : std::forward<U>(value);
316 }
317
318 /// Apply a function to the value if present; otherwise return None.
319 template <class Function>
320 auto map(const Function &F) &&
321 -> Optional<decltype(F(std::move(*this).getValue()))> {
322 if (*this) return F(std::move(*this).getValue());
323 return None;
324 }
325#endif
326};
327
328template <class T> llvm::hash_code hash_value(const Optional<T> &O) {
329 return O ? hash_combine(true, *O) : hash_value(false);
330}
331
332template <typename T, typename U>
333constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) {
334 if (X && Y)
335 return *X == *Y;
336 return X.hasValue() == Y.hasValue();
337}
338
339template <typename T, typename U>
340constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
341 return !(X == Y);
342}
343
344template <typename T, typename U>
345constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) {
346 if (X && Y)
347 return *X < *Y;
348 return X.hasValue() < Y.hasValue();
349}
350
351template <typename T, typename U>
352constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
353 return !(Y < X);
354}
355
356template <typename T, typename U>
357constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) {
358 return Y < X;
359}
360
361template <typename T, typename U>
362constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
363 return !(X < Y);
364}
365
366template <typename T>
367constexpr bool operator==(const Optional<T> &X, NoneType) {
368 return !X;
369}
370
371template <typename T>
372constexpr bool operator==(NoneType, const Optional<T> &X) {
373 return X == None;
374}
375
376template <typename T>
377constexpr bool operator!=(const Optional<T> &X, NoneType) {
378 return !(X == None);
379}
380
381template <typename T>
382constexpr bool operator!=(NoneType, const Optional<T> &X) {
383 return X != None;
384}
385
386template <typename T> constexpr bool operator<(const Optional<T> &, NoneType) {
387 return false;
388}
389
390template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) {
391 return X.hasValue();
392}
393
394template <typename T>
395constexpr bool operator<=(const Optional<T> &X, NoneType) {
396 return !(None < X);
397}
398
399template <typename T>
400constexpr bool operator<=(NoneType, const Optional<T> &X) {
401 return !(X < None);
402}
403
404template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) {
405 return None < X;
406}
407
408template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) {
409 return X < None;
410}
411
412template <typename T>
413constexpr bool operator>=(const Optional<T> &X, NoneType) {
414 return None <= X;
415}
416
417template <typename T>
418constexpr bool operator>=(NoneType, const Optional<T> &X) {
419 return X <= None;
420}
421
422template <typename T>
423constexpr bool operator==(const Optional<T> &X, const T &Y) {
424 return X && *X == Y;
425}
426
427template <typename T>
428constexpr bool operator==(const T &X, const Optional<T> &Y) {
429 return Y && X == *Y;
430}
431
432template <typename T>
433constexpr bool operator!=(const Optional<T> &X, const T &Y) {
434 return !(X == Y);
435}
436
437template <typename T>
438constexpr bool operator!=(const T &X, const Optional<T> &Y) {
439 return !(X == Y);
440}
441
442template <typename T>
443constexpr bool operator<(const Optional<T> &X, const T &Y) {
444 return !X || *X < Y;
445}
446
447template <typename T>
448constexpr bool operator<(const T &X, const Optional<T> &Y) {
449 return Y && X < *Y;
450}
451
452template <typename T>
453constexpr bool operator<=(const Optional<T> &X, const T &Y) {
454 return !(Y < X);
455}
456
457template <typename T>
458constexpr bool operator<=(const T &X, const Optional<T> &Y) {
459 return !(Y < X);
460}
461
462template <typename T>
463constexpr bool operator>(const Optional<T> &X, const T &Y) {
464 return Y < X;
465}
466
467template <typename T>
468constexpr bool operator>(const T &X, const Optional<T> &Y) {
469 return Y < X;
470}
471
472template <typename T>
473constexpr bool operator>=(const Optional<T> &X, const T &Y) {
474 return !(X < Y);
475}
476
477template <typename T>
478constexpr bool operator>=(const T &X, const Optional<T> &Y) {
479 return !(X < Y);
480}
481
482raw_ostream &operator<<(raw_ostream &OS, NoneType);
483
484template <typename T, typename = decltype(std::declval<raw_ostream &>()
485 << std::declval<const T &>())>
486raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) {
487 if (O)
488 OS << *O;
489 else
490 OS << None;
491 return OS;
492}
493
494} // end namespace llvm
495
496#endif // LLVM_ADT_OPTIONAL_H