Bug Summary

File:llvm/lib/Transforms/Utils/SimplifyCFG.cpp
Warning:line 3106, column 11
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SimplifyCFG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/build-llvm/lib/Transforms/Utils -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-22-054259-40355-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp

/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp

1//===- SimplifyCFG.cpp - Code to perform CFG simplification ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Peephole optimize the CFG.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/MapVector.h"
17#include "llvm/ADT/Optional.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Sequence.h"
21#include "llvm/ADT/SetOperations.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/AssumptionCache.h"
28#include "llvm/Analysis/ConstantFolding.h"
29#include "llvm/Analysis/EHPersonalities.h"
30#include "llvm/Analysis/GuardUtils.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/MemorySSA.h"
33#include "llvm/Analysis/MemorySSAUpdater.h"
34#include "llvm/Analysis/TargetTransformInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/GlobalValue.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/IRBuilder.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Instructions.h"
51#include "llvm/IR/IntrinsicInst.h"
52#include "llvm/IR/Intrinsics.h"
53#include "llvm/IR/LLVMContext.h"
54#include "llvm/IR/MDBuilder.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/NoFolder.h"
58#include "llvm/IR/Operator.h"
59#include "llvm/IR/PatternMatch.h"
60#include "llvm/IR/Type.h"
61#include "llvm/IR/Use.h"
62#include "llvm/IR/User.h"
63#include "llvm/IR/Value.h"
64#include "llvm/Support/Casting.h"
65#include "llvm/Support/CommandLine.h"
66#include "llvm/Support/Debug.h"
67#include "llvm/Support/ErrorHandling.h"
68#include "llvm/Support/KnownBits.h"
69#include "llvm/Support/MathExtras.h"
70#include "llvm/Support/raw_ostream.h"
71#include "llvm/Transforms/Utils/BasicBlockUtils.h"
72#include "llvm/Transforms/Utils/Local.h"
73#include "llvm/Transforms/Utils/SSAUpdater.h"
74#include "llvm/Transforms/Utils/ValueMapper.h"
75#include <algorithm>
76#include <cassert>
77#include <climits>
78#include <cstddef>
79#include <cstdint>
80#include <iterator>
81#include <map>
82#include <set>
83#include <tuple>
84#include <utility>
85#include <vector>
86
87using namespace llvm;
88using namespace PatternMatch;
89
90#define DEBUG_TYPE"simplifycfg" "simplifycfg"
91
92cl::opt<bool> llvm::RequireAndPreserveDomTree(
93 "simplifycfg-require-and-preserve-domtree", cl::Hidden, cl::ZeroOrMore,
94 cl::init(false),
95 cl::desc("Temorary development switch used to gradually uplift SimplifyCFG "
96 "into preserving DomTree,"));
97
98// Chosen as 2 so as to be cheap, but still to have enough power to fold
99// a select, so the "clamp" idiom (of a min followed by a max) will be caught.
100// To catch this, we need to fold a compare and a select, hence '2' being the
101// minimum reasonable default.
102static cl::opt<unsigned> PHINodeFoldingThreshold(
103 "phi-node-folding-threshold", cl::Hidden, cl::init(2),
104 cl::desc(
105 "Control the amount of phi node folding to perform (default = 2)"));
106
107static cl::opt<unsigned> TwoEntryPHINodeFoldingThreshold(
108 "two-entry-phi-node-folding-threshold", cl::Hidden, cl::init(4),
109 cl::desc("Control the maximal total instruction cost that we are willing "
110 "to speculatively execute to fold a 2-entry PHI node into a "
111 "select (default = 4)"));
112
113static cl::opt<bool> DupRet(
114 "simplifycfg-dup-ret", cl::Hidden, cl::init(false),
115 cl::desc("Duplicate return instructions into unconditional branches"));
116
117static cl::opt<bool>
118 HoistCommon("simplifycfg-hoist-common", cl::Hidden, cl::init(true),
119 cl::desc("Hoist common instructions up to the parent block"));
120
121static cl::opt<bool>
122 SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
123 cl::desc("Sink common instructions down to the end block"));
124
125static cl::opt<bool> HoistCondStores(
126 "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
127 cl::desc("Hoist conditional stores if an unconditional store precedes"));
128
129static cl::opt<bool> MergeCondStores(
130 "simplifycfg-merge-cond-stores", cl::Hidden, cl::init(true),
131 cl::desc("Hoist conditional stores even if an unconditional store does not "
132 "precede - hoist multiple conditional stores into a single "
133 "predicated store"));
134
135static cl::opt<bool> MergeCondStoresAggressively(
136 "simplifycfg-merge-cond-stores-aggressively", cl::Hidden, cl::init(false),
137 cl::desc("When merging conditional stores, do so even if the resultant "
138 "basic blocks are unlikely to be if-converted as a result"));
139
140static cl::opt<bool> SpeculateOneExpensiveInst(
141 "speculate-one-expensive-inst", cl::Hidden, cl::init(true),
142 cl::desc("Allow exactly one expensive instruction to be speculatively "
143 "executed"));
144
145static cl::opt<unsigned> MaxSpeculationDepth(
146 "max-speculation-depth", cl::Hidden, cl::init(10),
147 cl::desc("Limit maximum recursion depth when calculating costs of "
148 "speculatively executed instructions"));
149
150static cl::opt<int>
151MaxSmallBlockSize("simplifycfg-max-small-block-size", cl::Hidden, cl::init(10),
152 cl::desc("Max size of a block which is still considered "
153 "small enough to thread through"));
154
155// Two is chosen to allow one negation and a logical combine.
156static cl::opt<unsigned>
157 BranchFoldThreshold("simplifycfg-branch-fold-threshold", cl::Hidden,
158 cl::init(2),
159 cl::desc("Maximum cost of combining conditions when "
160 "folding branches"));
161
162STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps")static llvm::Statistic NumBitMaps = {"simplifycfg", "NumBitMaps"
, "Number of switch instructions turned into bitmaps"}
;
163STATISTIC(NumLinearMaps,static llvm::Statistic NumLinearMaps = {"simplifycfg", "NumLinearMaps"
, "Number of switch instructions turned into linear mapping"}
164 "Number of switch instructions turned into linear mapping")static llvm::Statistic NumLinearMaps = {"simplifycfg", "NumLinearMaps"
, "Number of switch instructions turned into linear mapping"}
;
165STATISTIC(NumLookupTables,static llvm::Statistic NumLookupTables = {"simplifycfg", "NumLookupTables"
, "Number of switch instructions turned into lookup tables"}
166 "Number of switch instructions turned into lookup tables")static llvm::Statistic NumLookupTables = {"simplifycfg", "NumLookupTables"
, "Number of switch instructions turned into lookup tables"}
;
167STATISTIC(static llvm::Statistic NumLookupTablesHoles = {"simplifycfg",
"NumLookupTablesHoles", "Number of switch instructions turned into lookup tables (holes checked)"
}
168 NumLookupTablesHoles,static llvm::Statistic NumLookupTablesHoles = {"simplifycfg",
"NumLookupTablesHoles", "Number of switch instructions turned into lookup tables (holes checked)"
}
169 "Number of switch instructions turned into lookup tables (holes checked)")static llvm::Statistic NumLookupTablesHoles = {"simplifycfg",
"NumLookupTablesHoles", "Number of switch instructions turned into lookup tables (holes checked)"
}
;
170STATISTIC(NumTableCmpReuses, "Number of reused switch table lookup compares")static llvm::Statistic NumTableCmpReuses = {"simplifycfg", "NumTableCmpReuses"
, "Number of reused switch table lookup compares"}
;
171STATISTIC(NumFoldValueComparisonIntoPredecessors,static llvm::Statistic NumFoldValueComparisonIntoPredecessors
= {"simplifycfg", "NumFoldValueComparisonIntoPredecessors", "Number of value comparisons folded into predecessor basic blocks"
}
172 "Number of value comparisons folded into predecessor basic blocks")static llvm::Statistic NumFoldValueComparisonIntoPredecessors
= {"simplifycfg", "NumFoldValueComparisonIntoPredecessors", "Number of value comparisons folded into predecessor basic blocks"
}
;
173STATISTIC(NumFoldBranchToCommonDest,static llvm::Statistic NumFoldBranchToCommonDest = {"simplifycfg"
, "NumFoldBranchToCommonDest", "Number of branches folded into predecessor basic block"
}
174 "Number of branches folded into predecessor basic block")static llvm::Statistic NumFoldBranchToCommonDest = {"simplifycfg"
, "NumFoldBranchToCommonDest", "Number of branches folded into predecessor basic block"
}
;
175STATISTIC(static llvm::Statistic NumHoistCommonCode = {"simplifycfg", "NumHoistCommonCode"
, "Number of common instruction 'blocks' hoisted up to the begin block"
}
176 NumHoistCommonCode,static llvm::Statistic NumHoistCommonCode = {"simplifycfg", "NumHoistCommonCode"
, "Number of common instruction 'blocks' hoisted up to the begin block"
}
177 "Number of common instruction 'blocks' hoisted up to the begin block")static llvm::Statistic NumHoistCommonCode = {"simplifycfg", "NumHoistCommonCode"
, "Number of common instruction 'blocks' hoisted up to the begin block"
}
;
178STATISTIC(NumHoistCommonInstrs,static llvm::Statistic NumHoistCommonInstrs = {"simplifycfg",
"NumHoistCommonInstrs", "Number of common instructions hoisted up to the begin block"
}
179 "Number of common instructions hoisted up to the begin block")static llvm::Statistic NumHoistCommonInstrs = {"simplifycfg",
"NumHoistCommonInstrs", "Number of common instructions hoisted up to the begin block"
}
;
180STATISTIC(NumSinkCommonCode,static llvm::Statistic NumSinkCommonCode = {"simplifycfg", "NumSinkCommonCode"
, "Number of common instruction 'blocks' sunk down to the end block"
}
181 "Number of common instruction 'blocks' sunk down to the end block")static llvm::Statistic NumSinkCommonCode = {"simplifycfg", "NumSinkCommonCode"
, "Number of common instruction 'blocks' sunk down to the end block"
}
;
182STATISTIC(NumSinkCommonInstrs,static llvm::Statistic NumSinkCommonInstrs = {"simplifycfg", "NumSinkCommonInstrs"
, "Number of common instructions sunk down to the end block"}
183 "Number of common instructions sunk down to the end block")static llvm::Statistic NumSinkCommonInstrs = {"simplifycfg", "NumSinkCommonInstrs"
, "Number of common instructions sunk down to the end block"}
;
184STATISTIC(NumSpeculations, "Number of speculative executed instructions")static llvm::Statistic NumSpeculations = {"simplifycfg", "NumSpeculations"
, "Number of speculative executed instructions"}
;
185STATISTIC(NumInvokes,static llvm::Statistic NumInvokes = {"simplifycfg", "NumInvokes"
, "Number of invokes with empty resume blocks simplified into calls"
}
186 "Number of invokes with empty resume blocks simplified into calls")static llvm::Statistic NumInvokes = {"simplifycfg", "NumInvokes"
, "Number of invokes with empty resume blocks simplified into calls"
}
;
187
188namespace {
189
190// The first field contains the value that the switch produces when a certain
191// case group is selected, and the second field is a vector containing the
192// cases composing the case group.
193using SwitchCaseResultVectorTy =
194 SmallVector<std::pair<Constant *, SmallVector<ConstantInt *, 4>>, 2>;
195
196// The first field contains the phi node that generates a result of the switch
197// and the second field contains the value generated for a certain case in the
198// switch for that PHI.
199using SwitchCaseResultsTy = SmallVector<std::pair<PHINode *, Constant *>, 4>;
200
201/// ValueEqualityComparisonCase - Represents a case of a switch.
202struct ValueEqualityComparisonCase {
203 ConstantInt *Value;
204 BasicBlock *Dest;
205
206 ValueEqualityComparisonCase(ConstantInt *Value, BasicBlock *Dest)
207 : Value(Value), Dest(Dest) {}
208
209 bool operator<(ValueEqualityComparisonCase RHS) const {
210 // Comparing pointers is ok as we only rely on the order for uniquing.
211 return Value < RHS.Value;
212 }
213
214 bool operator==(BasicBlock *RHSDest) const { return Dest == RHSDest; }
215};
216
217class SimplifyCFGOpt {
218 const TargetTransformInfo &TTI;
219 DomTreeUpdater *DTU;
220 const DataLayout &DL;
221 SmallPtrSetImpl<BasicBlock *> *LoopHeaders;
222 const SimplifyCFGOptions &Options;
223 bool Resimplify;
224
225 Value *isValueEqualityComparison(Instruction *TI);
226 BasicBlock *GetValueEqualityComparisonCases(
227 Instruction *TI, std::vector<ValueEqualityComparisonCase> &Cases);
228 bool SimplifyEqualityComparisonWithOnlyPredecessor(Instruction *TI,
229 BasicBlock *Pred,
230 IRBuilder<> &Builder);
231 bool FoldValueComparisonIntoPredecessors(Instruction *TI,
232 IRBuilder<> &Builder);
233
234 bool simplifyReturn(ReturnInst *RI, IRBuilder<> &Builder);
235 bool simplifyResume(ResumeInst *RI, IRBuilder<> &Builder);
236 bool simplifySingleResume(ResumeInst *RI);
237 bool simplifyCommonResume(ResumeInst *RI);
238 bool simplifyCleanupReturn(CleanupReturnInst *RI);
239 bool simplifyUnreachable(UnreachableInst *UI);
240 bool simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder);
241 bool simplifyIndirectBr(IndirectBrInst *IBI);
242 bool simplifyBranch(BranchInst *Branch, IRBuilder<> &Builder);
243 bool simplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder);
244 bool simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder);
245 bool SimplifyCondBranchToTwoReturns(BranchInst *BI, IRBuilder<> &Builder);
246
247 bool tryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
248 IRBuilder<> &Builder);
249
250 bool HoistThenElseCodeToIf(BranchInst *BI, const TargetTransformInfo &TTI);
251 bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
252 const TargetTransformInfo &TTI);
253 bool SimplifyTerminatorOnSelect(Instruction *OldTerm, Value *Cond,
254 BasicBlock *TrueBB, BasicBlock *FalseBB,
255 uint32_t TrueWeight, uint32_t FalseWeight);
256 bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder,
257 const DataLayout &DL);
258 bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select);
259 bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI);
260 bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder);
261
262public:
263 SimplifyCFGOpt(const TargetTransformInfo &TTI, DomTreeUpdater *DTU,
264 const DataLayout &DL,
265 SmallPtrSetImpl<BasicBlock *> *LoopHeaders,
266 const SimplifyCFGOptions &Opts)
267 : TTI(TTI), DTU(DTU), DL(DL), LoopHeaders(LoopHeaders), Options(Opts) {
268 assert((!DTU || !DTU->hasPostDomTree()) &&(((!DTU || !DTU->hasPostDomTree()) && "SimplifyCFG is not yet capable of maintaining validity of a "
"PostDomTree, so don't ask for it.") ? static_cast<void>
(0) : __assert_fail ("(!DTU || !DTU->hasPostDomTree()) && \"SimplifyCFG is not yet capable of maintaining validity of a \" \"PostDomTree, so don't ask for it.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 270, __PRETTY_FUNCTION__))
269 "SimplifyCFG is not yet capable of maintaining validity of a "(((!DTU || !DTU->hasPostDomTree()) && "SimplifyCFG is not yet capable of maintaining validity of a "
"PostDomTree, so don't ask for it.") ? static_cast<void>
(0) : __assert_fail ("(!DTU || !DTU->hasPostDomTree()) && \"SimplifyCFG is not yet capable of maintaining validity of a \" \"PostDomTree, so don't ask for it.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 270, __PRETTY_FUNCTION__))
270 "PostDomTree, so don't ask for it.")(((!DTU || !DTU->hasPostDomTree()) && "SimplifyCFG is not yet capable of maintaining validity of a "
"PostDomTree, so don't ask for it.") ? static_cast<void>
(0) : __assert_fail ("(!DTU || !DTU->hasPostDomTree()) && \"SimplifyCFG is not yet capable of maintaining validity of a \" \"PostDomTree, so don't ask for it.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 270, __PRETTY_FUNCTION__))
;
271 }
272
273 bool simplifyOnce(BasicBlock *BB);
274 bool simplifyOnceImpl(BasicBlock *BB);
275 bool run(BasicBlock *BB);
276
277 // Helper to set Resimplify and return change indication.
278 bool requestResimplify() {
279 Resimplify = true;
280 return true;
281 }
282};
283
284} // end anonymous namespace
285
286/// Return true if it is safe to merge these two
287/// terminator instructions together.
288static bool
289SafeToMergeTerminators(Instruction *SI1, Instruction *SI2,
290 SmallSetVector<BasicBlock *, 4> *FailBlocks = nullptr) {
291 if (SI1 == SI2)
50
Assuming 'SI1' is not equal to 'SI2'
51
Taking false branch
292 return false; // Can't merge with self!
293
294 // It is not safe to merge these two switch instructions if they have a common
295 // successor, and if that successor has a PHI node, and if *that* PHI node has
296 // conflicting incoming values from the two switch blocks.
297 BasicBlock *SI1BB = SI1->getParent();
298 BasicBlock *SI2BB = SI2->getParent();
299
300 SmallPtrSet<BasicBlock *, 16> SI1Succs(succ_begin(SI1BB), succ_end(SI1BB));
301 bool Fail = false;
302 for (BasicBlock *Succ : successors(SI2BB))
303 if (SI1Succs.count(Succ))
304 for (BasicBlock::iterator BBI = Succ->begin(); isa<PHINode>(BBI); ++BBI) {
305 PHINode *PN = cast<PHINode>(BBI);
306 if (PN->getIncomingValueForBlock(SI1BB) !=
307 PN->getIncomingValueForBlock(SI2BB)) {
308 if (FailBlocks)
309 FailBlocks->insert(Succ);
310 Fail = true;
311 }
312 }
313
314 return !Fail;
52
Returning the value 1, which participates in a condition later
315}
316
317/// Return true if it is safe and profitable to merge these two terminator
318/// instructions together, where SI1 is an unconditional branch. PhiNodes will
319/// store all PHI nodes in common successors.
320static bool
321isProfitableToFoldUnconditional(BranchInst *SI1, BranchInst *SI2,
322 Instruction *Cond,
323 SmallVectorImpl<PHINode *> &PhiNodes) {
324 if (SI1 == SI2)
325 return false; // Can't merge with self!
326 assert(SI1->isUnconditional() && SI2->isConditional())((SI1->isUnconditional() && SI2->isConditional(
)) ? static_cast<void> (0) : __assert_fail ("SI1->isUnconditional() && SI2->isConditional()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 326, __PRETTY_FUNCTION__))
;
327
328 // We fold the unconditional branch if we can easily update all PHI nodes in
329 // common successors:
330 // 1> We have a constant incoming value for the conditional branch;
331 // 2> We have "Cond" as the incoming value for the unconditional branch;
332 // 3> SI2->getCondition() and Cond have same operands.
333 CmpInst *Ci2 = dyn_cast<CmpInst>(SI2->getCondition());
334 if (!Ci2)
335 return false;
336 if (!(Cond->getOperand(0) == Ci2->getOperand(0) &&
337 Cond->getOperand(1) == Ci2->getOperand(1)) &&
338 !(Cond->getOperand(0) == Ci2->getOperand(1) &&
339 Cond->getOperand(1) == Ci2->getOperand(0)))
340 return false;
341
342 BasicBlock *SI1BB = SI1->getParent();
343 BasicBlock *SI2BB = SI2->getParent();
344 SmallPtrSet<BasicBlock *, 16> SI1Succs(succ_begin(SI1BB), succ_end(SI1BB));
345 for (BasicBlock *Succ : successors(SI2BB))
346 if (SI1Succs.count(Succ))
347 for (BasicBlock::iterator BBI = Succ->begin(); isa<PHINode>(BBI); ++BBI) {
348 PHINode *PN = cast<PHINode>(BBI);
349 if (PN->getIncomingValueForBlock(SI1BB) != Cond ||
350 !isa<ConstantInt>(PN->getIncomingValueForBlock(SI2BB)))
351 return false;
352 PhiNodes.push_back(PN);
353 }
354 return true;
355}
356
357/// Update PHI nodes in Succ to indicate that there will now be entries in it
358/// from the 'NewPred' block. The values that will be flowing into the PHI nodes
359/// will be the same as those coming in from ExistPred, an existing predecessor
360/// of Succ.
361static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
362 BasicBlock *ExistPred,
363 MemorySSAUpdater *MSSAU = nullptr) {
364 for (PHINode &PN : Succ->phis())
365 PN.addIncoming(PN.getIncomingValueForBlock(ExistPred), NewPred);
366 if (MSSAU)
367 if (auto *MPhi = MSSAU->getMemorySSA()->getMemoryAccess(Succ))
368 MPhi->addIncoming(MPhi->getIncomingValueForBlock(ExistPred), NewPred);
369}
370
371/// Compute an abstract "cost" of speculating the given instruction,
372/// which is assumed to be safe to speculate. TCC_Free means cheap,
373/// TCC_Basic means less cheap, and TCC_Expensive means prohibitively
374/// expensive.
375static unsigned ComputeSpeculationCost(const User *I,
376 const TargetTransformInfo &TTI) {
377 assert(isSafeToSpeculativelyExecute(I) &&((isSafeToSpeculativelyExecute(I) && "Instruction is not safe to speculatively execute!"
) ? static_cast<void> (0) : __assert_fail ("isSafeToSpeculativelyExecute(I) && \"Instruction is not safe to speculatively execute!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 378, __PRETTY_FUNCTION__))
378 "Instruction is not safe to speculatively execute!")((isSafeToSpeculativelyExecute(I) && "Instruction is not safe to speculatively execute!"
) ? static_cast<void> (0) : __assert_fail ("isSafeToSpeculativelyExecute(I) && \"Instruction is not safe to speculatively execute!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 378, __PRETTY_FUNCTION__))
;
379 return TTI.getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency);
380}
381
382/// If we have a merge point of an "if condition" as accepted above,
383/// return true if the specified value dominates the block. We
384/// don't handle the true generality of domination here, just a special case
385/// which works well enough for us.
386///
387/// If AggressiveInsts is non-null, and if V does not dominate BB, we check to
388/// see if V (which must be an instruction) and its recursive operands
389/// that do not dominate BB have a combined cost lower than CostRemaining and
390/// are non-trapping. If both are true, the instruction is inserted into the
391/// set and true is returned.
392///
393/// The cost for most non-trapping instructions is defined as 1 except for
394/// Select whose cost is 2.
395///
396/// After this function returns, CostRemaining is decreased by the cost of
397/// V plus its non-dominating operands. If that cost is greater than
398/// CostRemaining, false is returned and CostRemaining is undefined.
399static bool DominatesMergePoint(Value *V, BasicBlock *BB,
400 SmallPtrSetImpl<Instruction *> &AggressiveInsts,
401 int &BudgetRemaining,
402 const TargetTransformInfo &TTI,
403 unsigned Depth = 0) {
404 // It is possible to hit a zero-cost cycle (phi/gep instructions for example),
405 // so limit the recursion depth.
406 // TODO: While this recursion limit does prevent pathological behavior, it
407 // would be better to track visited instructions to avoid cycles.
408 if (Depth == MaxSpeculationDepth)
409 return false;
410
411 Instruction *I = dyn_cast<Instruction>(V);
412 if (!I) {
413 // Non-instructions all dominate instructions, but not all constantexprs
414 // can be executed unconditionally.
415 if (ConstantExpr *C = dyn_cast<ConstantExpr>(V))
416 if (C->canTrap())
417 return false;
418 return true;
419 }
420 BasicBlock *PBB = I->getParent();
421
422 // We don't want to allow weird loops that might have the "if condition" in
423 // the bottom of this block.
424 if (PBB == BB)
425 return false;
426
427 // If this instruction is defined in a block that contains an unconditional
428 // branch to BB, then it must be in the 'conditional' part of the "if
429 // statement". If not, it definitely dominates the region.
430 BranchInst *BI = dyn_cast<BranchInst>(PBB->getTerminator());
431 if (!BI || BI->isConditional() || BI->getSuccessor(0) != BB)
432 return true;
433
434 // If we have seen this instruction before, don't count it again.
435 if (AggressiveInsts.count(I))
436 return true;
437
438 // Okay, it looks like the instruction IS in the "condition". Check to
439 // see if it's a cheap instruction to unconditionally compute, and if it
440 // only uses stuff defined outside of the condition. If so, hoist it out.
441 if (!isSafeToSpeculativelyExecute(I))
442 return false;
443
444 BudgetRemaining -= ComputeSpeculationCost(I, TTI);
445
446 // Allow exactly one instruction to be speculated regardless of its cost
447 // (as long as it is safe to do so).
448 // This is intended to flatten the CFG even if the instruction is a division
449 // or other expensive operation. The speculation of an expensive instruction
450 // is expected to be undone in CodeGenPrepare if the speculation has not
451 // enabled further IR optimizations.
452 if (BudgetRemaining < 0 &&
453 (!SpeculateOneExpensiveInst || !AggressiveInsts.empty() || Depth > 0))
454 return false;
455
456 // Okay, we can only really hoist these out if their operands do
457 // not take us over the cost threshold.
458 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
459 if (!DominatesMergePoint(*i, BB, AggressiveInsts, BudgetRemaining, TTI,
460 Depth + 1))
461 return false;
462 // Okay, it's safe to do this! Remember this instruction.
463 AggressiveInsts.insert(I);
464 return true;
465}
466
467/// Extract ConstantInt from value, looking through IntToPtr
468/// and PointerNullValue. Return NULL if value is not a constant int.
469static ConstantInt *GetConstantInt(Value *V, const DataLayout &DL) {
470 // Normal constant int.
471 ConstantInt *CI = dyn_cast<ConstantInt>(V);
472 if (CI || !isa<Constant>(V) || !V->getType()->isPointerTy())
473 return CI;
474
475 // This is some kind of pointer constant. Turn it into a pointer-sized
476 // ConstantInt if possible.
477 IntegerType *PtrTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
478
479 // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
480 if (isa<ConstantPointerNull>(V))
481 return ConstantInt::get(PtrTy, 0);
482
483 // IntToPtr const int.
484 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
485 if (CE->getOpcode() == Instruction::IntToPtr)
486 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(0))) {
487 // The constant is very likely to have the right type already.
488 if (CI->getType() == PtrTy)
489 return CI;
490 else
491 return cast<ConstantInt>(
492 ConstantExpr::getIntegerCast(CI, PtrTy, /*isSigned=*/false));
493 }
494 return nullptr;
495}
496
497namespace {
498
499/// Given a chain of or (||) or and (&&) comparison of a value against a
500/// constant, this will try to recover the information required for a switch
501/// structure.
502/// It will depth-first traverse the chain of comparison, seeking for patterns
503/// like %a == 12 or %a < 4 and combine them to produce a set of integer
504/// representing the different cases for the switch.
505/// Note that if the chain is composed of '||' it will build the set of elements
506/// that matches the comparisons (i.e. any of this value validate the chain)
507/// while for a chain of '&&' it will build the set elements that make the test
508/// fail.
509struct ConstantComparesGatherer {
510 const DataLayout &DL;
511
512 /// Value found for the switch comparison
513 Value *CompValue = nullptr;
514
515 /// Extra clause to be checked before the switch
516 Value *Extra = nullptr;
517
518 /// Set of integers to match in switch
519 SmallVector<ConstantInt *, 8> Vals;
520
521 /// Number of comparisons matched in the and/or chain
522 unsigned UsedICmps = 0;
523
524 /// Construct and compute the result for the comparison instruction Cond
525 ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL) : DL(DL) {
526 gather(Cond);
527 }
528
529 ConstantComparesGatherer(const ConstantComparesGatherer &) = delete;
530 ConstantComparesGatherer &
531 operator=(const ConstantComparesGatherer &) = delete;
532
533private:
534 /// Try to set the current value used for the comparison, it succeeds only if
535 /// it wasn't set before or if the new value is the same as the old one
536 bool setValueOnce(Value *NewVal) {
537 if (CompValue && CompValue != NewVal)
538 return false;
539 CompValue = NewVal;
540 return (CompValue != nullptr);
541 }
542
543 /// Try to match Instruction "I" as a comparison against a constant and
544 /// populates the array Vals with the set of values that match (or do not
545 /// match depending on isEQ).
546 /// Return false on failure. On success, the Value the comparison matched
547 /// against is placed in CompValue.
548 /// If CompValue is already set, the function is expected to fail if a match
549 /// is found but the value compared to is different.
550 bool matchInstruction(Instruction *I, bool isEQ) {
551 // If this is an icmp against a constant, handle this as one of the cases.
552 ICmpInst *ICI;
553 ConstantInt *C;
554 if (!((ICI = dyn_cast<ICmpInst>(I)) &&
555 (C = GetConstantInt(I->getOperand(1), DL)))) {
556 return false;
557 }
558
559 Value *RHSVal;
560 const APInt *RHSC;
561
562 // Pattern match a special case
563 // (x & ~2^z) == y --> x == y || x == y|2^z
564 // This undoes a transformation done by instcombine to fuse 2 compares.
565 if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
566 // It's a little bit hard to see why the following transformations are
567 // correct. Here is a CVC3 program to verify them for 64-bit values:
568
569 /*
570 ONE : BITVECTOR(64) = BVZEROEXTEND(0bin1, 63);
571 x : BITVECTOR(64);
572 y : BITVECTOR(64);
573 z : BITVECTOR(64);
574 mask : BITVECTOR(64) = BVSHL(ONE, z);
575 QUERY( (y & ~mask = y) =>
576 ((x & ~mask = y) <=> (x = y OR x = (y | mask)))
577 );
578 QUERY( (y | mask = y) =>
579 ((x | mask = y) <=> (x = y OR x = (y & ~mask)))
580 );
581 */
582
583 // Please note that each pattern must be a dual implication (<--> or
584 // iff). One directional implication can create spurious matches. If the
585 // implication is only one-way, an unsatisfiable condition on the left
586 // side can imply a satisfiable condition on the right side. Dual
587 // implication ensures that satisfiable conditions are transformed to
588 // other satisfiable conditions and unsatisfiable conditions are
589 // transformed to other unsatisfiable conditions.
590
591 // Here is a concrete example of a unsatisfiable condition on the left
592 // implying a satisfiable condition on the right:
593 //
594 // mask = (1 << z)
595 // (x & ~mask) == y --> (x == y || x == (y | mask))
596 //
597 // Substituting y = 3, z = 0 yields:
598 // (x & -2) == 3 --> (x == 3 || x == 2)
599
600 // Pattern match a special case:
601 /*
602 QUERY( (y & ~mask = y) =>
603 ((x & ~mask = y) <=> (x = y OR x = (y | mask)))
604 );
605 */
606 if (match(ICI->getOperand(0),
607 m_And(m_Value(RHSVal), m_APInt(RHSC)))) {
608 APInt Mask = ~*RHSC;
609 if (Mask.isPowerOf2() && (C->getValue() & ~Mask) == C->getValue()) {
610 // If we already have a value for the switch, it has to match!
611 if (!setValueOnce(RHSVal))
612 return false;
613
614 Vals.push_back(C);
615 Vals.push_back(
616 ConstantInt::get(C->getContext(),
617 C->getValue() | Mask));
618 UsedICmps++;
619 return true;
620 }
621 }
622
623 // Pattern match a special case:
624 /*
625 QUERY( (y | mask = y) =>
626 ((x | mask = y) <=> (x = y OR x = (y & ~mask)))
627 );
628 */
629 if (match(ICI->getOperand(0),
630 m_Or(m_Value(RHSVal), m_APInt(RHSC)))) {
631 APInt Mask = *RHSC;
632 if (Mask.isPowerOf2() && (C->getValue() | Mask) == C->getValue()) {
633 // If we already have a value for the switch, it has to match!
634 if (!setValueOnce(RHSVal))
635 return false;
636
637 Vals.push_back(C);
638 Vals.push_back(ConstantInt::get(C->getContext(),
639 C->getValue() & ~Mask));
640 UsedICmps++;
641 return true;
642 }
643 }
644
645 // If we already have a value for the switch, it has to match!
646 if (!setValueOnce(ICI->getOperand(0)))
647 return false;
648
649 UsedICmps++;
650 Vals.push_back(C);
651 return ICI->getOperand(0);
652 }
653
654 // If we have "x ult 3", for example, then we can add 0,1,2 to the set.
655 ConstantRange Span = ConstantRange::makeAllowedICmpRegion(
656 ICI->getPredicate(), C->getValue());
657
658 // Shift the range if the compare is fed by an add. This is the range
659 // compare idiom as emitted by instcombine.
660 Value *CandidateVal = I->getOperand(0);
661 if (match(I->getOperand(0), m_Add(m_Value(RHSVal), m_APInt(RHSC)))) {
662 Span = Span.subtract(*RHSC);
663 CandidateVal = RHSVal;
664 }
665
666 // If this is an and/!= check, then we are looking to build the set of
667 // value that *don't* pass the and chain. I.e. to turn "x ugt 2" into
668 // x != 0 && x != 1.
669 if (!isEQ)
670 Span = Span.inverse();
671
672 // If there are a ton of values, we don't want to make a ginormous switch.
673 if (Span.isSizeLargerThan(8) || Span.isEmptySet()) {
674 return false;
675 }
676
677 // If we already have a value for the switch, it has to match!
678 if (!setValueOnce(CandidateVal))
679 return false;
680
681 // Add all values from the range to the set
682 for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
683 Vals.push_back(ConstantInt::get(I->getContext(), Tmp));
684
685 UsedICmps++;
686 return true;
687 }
688
689 /// Given a potentially 'or'd or 'and'd together collection of icmp
690 /// eq/ne/lt/gt instructions that compare a value against a constant, extract
691 /// the value being compared, and stick the list constants into the Vals
692 /// vector.
693 /// One "Extra" case is allowed to differ from the other.
694 void gather(Value *V) {
695 bool isEQ = match(V, m_LogicalOr(m_Value(), m_Value()));
696
697 // Keep a stack (SmallVector for efficiency) for depth-first traversal
698 SmallVector<Value *, 8> DFT;
699 SmallPtrSet<Value *, 8> Visited;
700
701 // Initialize
702 Visited.insert(V);
703 DFT.push_back(V);
704
705 while (!DFT.empty()) {
706 V = DFT.pop_back_val();
707
708 if (Instruction *I = dyn_cast<Instruction>(V)) {
709 // If it is a || (or && depending on isEQ), process the operands.
710 Value *Op0, *Op1;
711 if (isEQ ? match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1)))
712 : match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
713 if (Visited.insert(Op1).second)
714 DFT.push_back(Op1);
715 if (Visited.insert(Op0).second)
716 DFT.push_back(Op0);
717
718 continue;
719 }
720
721 // Try to match the current instruction
722 if (matchInstruction(I, isEQ))
723 // Match succeed, continue the loop
724 continue;
725 }
726
727 // One element of the sequence of || (or &&) could not be match as a
728 // comparison against the same value as the others.
729 // We allow only one "Extra" case to be checked before the switch
730 if (!Extra) {
731 Extra = V;
732 continue;
733 }
734 // Failed to parse a proper sequence, abort now
735 CompValue = nullptr;
736 break;
737 }
738 }
739};
740
741} // end anonymous namespace
742
743static void EraseTerminatorAndDCECond(Instruction *TI,
744 MemorySSAUpdater *MSSAU = nullptr) {
745 Instruction *Cond = nullptr;
746 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
747 Cond = dyn_cast<Instruction>(SI->getCondition());
748 } else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
749 if (BI->isConditional())
750 Cond = dyn_cast<Instruction>(BI->getCondition());
751 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(TI)) {
752 Cond = dyn_cast<Instruction>(IBI->getAddress());
753 }
754
755 TI->eraseFromParent();
756 if (Cond)
757 RecursivelyDeleteTriviallyDeadInstructions(Cond, nullptr, MSSAU);
758}
759
760/// Return true if the specified terminator checks
761/// to see if a value is equal to constant integer value.
762Value *SimplifyCFGOpt::isValueEqualityComparison(Instruction *TI) {
763 Value *CV = nullptr;
764 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
765 // Do not permit merging of large switch instructions into their
766 // predecessors unless there is only one predecessor.
767 if (!SI->getParent()->hasNPredecessorsOrMore(128 / SI->getNumSuccessors()))
768 CV = SI->getCondition();
769 } else if (BranchInst *BI = dyn_cast<BranchInst>(TI))
770 if (BI->isConditional() && BI->getCondition()->hasOneUse())
771 if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) {
772 if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL))
773 CV = ICI->getOperand(0);
774 }
775
776 // Unwrap any lossless ptrtoint cast.
777 if (CV) {
778 if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {
779 Value *Ptr = PTII->getPointerOperand();
780 if (PTII->getType() == DL.getIntPtrType(Ptr->getType()))
781 CV = Ptr;
782 }
783 }
784 return CV;
785}
786
787/// Given a value comparison instruction,
788/// decode all of the 'cases' that it represents and return the 'default' block.
789BasicBlock *SimplifyCFGOpt::GetValueEqualityComparisonCases(
790 Instruction *TI, std::vector<ValueEqualityComparisonCase> &Cases) {
791 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
792 Cases.reserve(SI->getNumCases());
793 for (auto Case : SI->cases())
794 Cases.push_back(ValueEqualityComparisonCase(Case.getCaseValue(),
795 Case.getCaseSuccessor()));
796 return SI->getDefaultDest();
797 }
798
799 BranchInst *BI = cast<BranchInst>(TI);
800 ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
801 BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE);
802 Cases.push_back(ValueEqualityComparisonCase(
803 GetConstantInt(ICI->getOperand(1), DL), Succ));
804 return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
805}
806
807/// Given a vector of bb/value pairs, remove any entries
808/// in the list that match the specified block.
809static void
810EliminateBlockCases(BasicBlock *BB,
811 std::vector<ValueEqualityComparisonCase> &Cases) {
812 llvm::erase_value(Cases, BB);
813}
814
815/// Return true if there are any keys in C1 that exist in C2 as well.
816static bool ValuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
817 std::vector<ValueEqualityComparisonCase> &C2) {
818 std::vector<ValueEqualityComparisonCase> *V1 = &C1, *V2 = &C2;
819
820 // Make V1 be smaller than V2.
821 if (V1->size() > V2->size())
822 std::swap(V1, V2);
823
824 if (V1->empty())
825 return false;
826 if (V1->size() == 1) {
827 // Just scan V2.
828 ConstantInt *TheVal = (*V1)[0].Value;
829 for (unsigned i = 0, e = V2->size(); i != e; ++i)
830 if (TheVal == (*V2)[i].Value)
831 return true;
832 }
833
834 // Otherwise, just sort both lists and compare element by element.
835 array_pod_sort(V1->begin(), V1->end());
836 array_pod_sort(V2->begin(), V2->end());
837 unsigned i1 = 0, i2 = 0, e1 = V1->size(), e2 = V2->size();
838 while (i1 != e1 && i2 != e2) {
839 if ((*V1)[i1].Value == (*V2)[i2].Value)
840 return true;
841 if ((*V1)[i1].Value < (*V2)[i2].Value)
842 ++i1;
843 else
844 ++i2;
845 }
846 return false;
847}
848
849// Set branch weights on SwitchInst. This sets the metadata if there is at
850// least one non-zero weight.
851static void setBranchWeights(SwitchInst *SI, ArrayRef<uint32_t> Weights) {
852 // Check that there is at least one non-zero weight. Otherwise, pass
853 // nullptr to setMetadata which will erase the existing metadata.
854 MDNode *N = nullptr;
855 if (llvm::any_of(Weights, [](uint32_t W) { return W != 0; }))
856 N = MDBuilder(SI->getParent()->getContext()).createBranchWeights(Weights);
857 SI->setMetadata(LLVMContext::MD_prof, N);
858}
859
860// Similar to the above, but for branch and select instructions that take
861// exactly 2 weights.
862static void setBranchWeights(Instruction *I, uint32_t TrueWeight,
863 uint32_t FalseWeight) {
864 assert(isa<BranchInst>(I) || isa<SelectInst>(I))((isa<BranchInst>(I) || isa<SelectInst>(I)) ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(I) || isa<SelectInst>(I)"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 864, __PRETTY_FUNCTION__))
;
865 // Check that there is at least one non-zero weight. Otherwise, pass
866 // nullptr to setMetadata which will erase the existing metadata.
867 MDNode *N = nullptr;
868 if (TrueWeight || FalseWeight)
869 N = MDBuilder(I->getParent()->getContext())
870 .createBranchWeights(TrueWeight, FalseWeight);
871 I->setMetadata(LLVMContext::MD_prof, N);
872}
873
874/// If TI is known to be a terminator instruction and its block is known to
875/// only have a single predecessor block, check to see if that predecessor is
876/// also a value comparison with the same value, and if that comparison
877/// determines the outcome of this comparison. If so, simplify TI. This does a
878/// very limited form of jump threading.
879bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor(
880 Instruction *TI, BasicBlock *Pred, IRBuilder<> &Builder) {
881 Value *PredVal = isValueEqualityComparison(Pred->getTerminator());
882 if (!PredVal)
883 return false; // Not a value comparison in predecessor.
884
885 Value *ThisVal = isValueEqualityComparison(TI);
886 assert(ThisVal && "This isn't a value comparison!!")((ThisVal && "This isn't a value comparison!!") ? static_cast
<void> (0) : __assert_fail ("ThisVal && \"This isn't a value comparison!!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 886, __PRETTY_FUNCTION__))
;
887 if (ThisVal != PredVal)
888 return false; // Different predicates.
889
890 // TODO: Preserve branch weight metadata, similarly to how
891 // FoldValueComparisonIntoPredecessors preserves it.
892
893 // Find out information about when control will move from Pred to TI's block.
894 std::vector<ValueEqualityComparisonCase> PredCases;
895 BasicBlock *PredDef =
896 GetValueEqualityComparisonCases(Pred->getTerminator(), PredCases);
897 EliminateBlockCases(PredDef, PredCases); // Remove default from cases.
898
899 // Find information about how control leaves this block.
900 std::vector<ValueEqualityComparisonCase> ThisCases;
901 BasicBlock *ThisDef = GetValueEqualityComparisonCases(TI, ThisCases);
902 EliminateBlockCases(ThisDef, ThisCases); // Remove default from cases.
903
904 // If TI's block is the default block from Pred's comparison, potentially
905 // simplify TI based on this knowledge.
906 if (PredDef == TI->getParent()) {
907 // If we are here, we know that the value is none of those cases listed in
908 // PredCases. If there are any cases in ThisCases that are in PredCases, we
909 // can simplify TI.
910 if (!ValuesOverlap(PredCases, ThisCases))
911 return false;
912
913 if (isa<BranchInst>(TI)) {
914 // Okay, one of the successors of this condbr is dead. Convert it to a
915 // uncond br.
916 assert(ThisCases.size() == 1 && "Branch can only have one case!")((ThisCases.size() == 1 && "Branch can only have one case!"
) ? static_cast<void> (0) : __assert_fail ("ThisCases.size() == 1 && \"Branch can only have one case!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 916, __PRETTY_FUNCTION__))
;
917 // Insert the new branch.
918 Instruction *NI = Builder.CreateBr(ThisDef);
919 (void)NI;
920
921 // Remove PHI node entries for the dead edge.
922 ThisCases[0].Dest->removePredecessor(PredDef);
923
924 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
925 << "Through successor TI: " << *TI << "Leaving: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
926 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
;
927
928 EraseTerminatorAndDCECond(TI);
929
930 if (DTU)
931 DTU->applyUpdates(
932 {{DominatorTree::Delete, PredDef, ThisCases[0].Dest}});
933
934 return true;
935 }
936
937 SwitchInstProfUpdateWrapper SI = *cast<SwitchInst>(TI);
938 // Okay, TI has cases that are statically dead, prune them away.
939 SmallPtrSet<Constant *, 16> DeadCases;
940 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
941 DeadCases.insert(PredCases[i].Value);
942
943 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI; } } while (false)
944 << "Through successor TI: " << *TI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI; } } while (false)
;
945
946 SmallMapVector<BasicBlock *, int, 8> NumPerSuccessorCases;
947 for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) {
948 --i;
949 auto *Successor = i->getCaseSuccessor();
950 ++NumPerSuccessorCases[Successor];
951 if (DeadCases.count(i->getCaseValue())) {
952 Successor->removePredecessor(PredDef);
953 SI.removeCase(i);
954 --NumPerSuccessorCases[Successor];
955 }
956 }
957
958 std::vector<DominatorTree::UpdateType> Updates;
959 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
960 if (I.second == 0)
961 Updates.push_back({DominatorTree::Delete, PredDef, I.first});
962 if (DTU)
963 DTU->applyUpdates(Updates);
964
965 LLVM_DEBUG(dbgs() << "Leaving: " << *TI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Leaving: " << *TI <<
"\n"; } } while (false)
;
966 return true;
967 }
968
969 // Otherwise, TI's block must correspond to some matched value. Find out
970 // which value (or set of values) this is.
971 ConstantInt *TIV = nullptr;
972 BasicBlock *TIBB = TI->getParent();
973 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
974 if (PredCases[i].Dest == TIBB) {
975 if (TIV)
976 return false; // Cannot handle multiple values coming to this block.
977 TIV = PredCases[i].Value;
978 }
979 assert(TIV && "No edge from pred to succ?")((TIV && "No edge from pred to succ?") ? static_cast<
void> (0) : __assert_fail ("TIV && \"No edge from pred to succ?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 979, __PRETTY_FUNCTION__))
;
980
981 // Okay, we found the one constant that our value can be if we get into TI's
982 // BB. Find out which successor will unconditionally be branched to.
983 BasicBlock *TheRealDest = nullptr;
984 for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
985 if (ThisCases[i].Value == TIV) {
986 TheRealDest = ThisCases[i].Dest;
987 break;
988 }
989
990 // If not handled by any explicit cases, it is handled by the default case.
991 if (!TheRealDest)
992 TheRealDest = ThisDef;
993
994 SmallSetVector<BasicBlock *, 2> RemovedSuccs;
995
996 // Remove PHI node entries for dead edges.
997 BasicBlock *CheckEdge = TheRealDest;
998 for (BasicBlock *Succ : successors(TIBB))
999 if (Succ != CheckEdge) {
1000 if (Succ != TheRealDest)
1001 RemovedSuccs.insert(Succ);
1002 Succ->removePredecessor(TIBB);
1003 } else
1004 CheckEdge = nullptr;
1005
1006 // Insert the new branch.
1007 Instruction *NI = Builder.CreateBr(TheRealDest);
1008 (void)NI;
1009
1010 LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
1011 << "Through successor TI: " << *TI << "Leaving: " << *NIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
1012 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Threading pred instr: " <<
*Pred->getTerminator() << "Through successor TI: " <<
*TI << "Leaving: " << *NI << "\n"; } } while
(false)
;
1013
1014 EraseTerminatorAndDCECond(TI);
1015 if (DTU) {
1016 SmallVector<DominatorTree::UpdateType, 2> Updates;
1017 Updates.reserve(RemovedSuccs.size());
1018 for (auto *RemovedSucc : RemovedSuccs)
1019 Updates.push_back({DominatorTree::Delete, TIBB, RemovedSucc});
1020 DTU->applyUpdates(Updates);
1021 }
1022 return true;
1023}
1024
1025namespace {
1026
1027/// This class implements a stable ordering of constant
1028/// integers that does not depend on their address. This is important for
1029/// applications that sort ConstantInt's to ensure uniqueness.
1030struct ConstantIntOrdering {
1031 bool operator()(const ConstantInt *LHS, const ConstantInt *RHS) const {
1032 return LHS->getValue().ult(RHS->getValue());
1033 }
1034};
1035
1036} // end anonymous namespace
1037
1038static int ConstantIntSortPredicate(ConstantInt *const *P1,
1039 ConstantInt *const *P2) {
1040 const ConstantInt *LHS = *P1;
1041 const ConstantInt *RHS = *P2;
1042 if (LHS == RHS)
1043 return 0;
1044 return LHS->getValue().ult(RHS->getValue()) ? 1 : -1;
1045}
1046
1047static inline bool HasBranchWeights(const Instruction *I) {
1048 MDNode *ProfMD = I->getMetadata(LLVMContext::MD_prof);
1049 if (ProfMD && ProfMD->getOperand(0))
1050 if (MDString *MDS = dyn_cast<MDString>(ProfMD->getOperand(0)))
1051 return MDS->getString().equals("branch_weights");
1052
1053 return false;
1054}
1055
1056/// Get Weights of a given terminator, the default weight is at the front
1057/// of the vector. If TI is a conditional eq, we need to swap the branch-weight
1058/// metadata.
1059static void GetBranchWeights(Instruction *TI,
1060 SmallVectorImpl<uint64_t> &Weights) {
1061 MDNode *MD = TI->getMetadata(LLVMContext::MD_prof);
1062 assert(MD)((MD) ? static_cast<void> (0) : __assert_fail ("MD", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1062, __PRETTY_FUNCTION__))
;
1063 for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) {
1064 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(i));
1065 Weights.push_back(CI->getValue().getZExtValue());
1066 }
1067
1068 // If TI is a conditional eq, the default case is the false case,
1069 // and the corresponding branch-weight data is at index 2. We swap the
1070 // default weight to be the first entry.
1071 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1072 assert(Weights.size() == 2)((Weights.size() == 2) ? static_cast<void> (0) : __assert_fail
("Weights.size() == 2", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1072, __PRETTY_FUNCTION__))
;
1073 ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
1074 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
1075 std::swap(Weights.front(), Weights.back());
1076 }
1077}
1078
1079/// Keep halving the weights until all can fit in uint32_t.
1080static void FitWeights(MutableArrayRef<uint64_t> Weights) {
1081 uint64_t Max = *std::max_element(Weights.begin(), Weights.end());
1082 if (Max > UINT_MAX(2147483647 *2U +1U)) {
1083 unsigned Offset = 32 - countLeadingZeros(Max);
1084 for (uint64_t &I : Weights)
1085 I >>= Offset;
1086 }
1087}
1088
1089/// The specified terminator is a value equality comparison instruction
1090/// (either a switch or a branch on "X == c").
1091/// See if any of the predecessors of the terminator block are value comparisons
1092/// on the same value. If so, and if safe to do so, fold them together.
1093bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(Instruction *TI,
1094 IRBuilder<> &Builder) {
1095 BasicBlock *BB = TI->getParent();
1096 Value *CV = isValueEqualityComparison(TI); // CondVal
1097 assert(CV && "Not a comparison?")((CV && "Not a comparison?") ? static_cast<void>
(0) : __assert_fail ("CV && \"Not a comparison?\"", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1097, __PRETTY_FUNCTION__))
;
1098
1099 bool Changed = false;
1100
1101 auto _ = make_scope_exit([&]() {
1102 if (Changed)
1103 ++NumFoldValueComparisonIntoPredecessors;
1104 });
1105
1106 SmallSetVector<BasicBlock *, 16> Preds(pred_begin(BB), pred_end(BB));
1107 while (!Preds.empty()) {
1108 BasicBlock *Pred = Preds.pop_back_val();
1109
1110 // See if the predecessor is a comparison with the same value.
1111 Instruction *PTI = Pred->getTerminator();
1112 Value *PCV = isValueEqualityComparison(PTI); // PredCondVal
1113
1114 if (PCV == CV && TI != PTI) {
1115 SmallSetVector<BasicBlock*, 4> FailBlocks;
1116 if (!SafeToMergeTerminators(TI, PTI, &FailBlocks)) {
1117 for (auto *Succ : FailBlocks) {
1118 if (!SplitBlockPredecessors(Succ, TI->getParent(), ".fold.split",
1119 DTU))
1120 return false;
1121 }
1122 }
1123
1124 std::vector<DominatorTree::UpdateType> Updates;
1125
1126 // Figure out which 'cases' to copy from SI to PSI.
1127 std::vector<ValueEqualityComparisonCase> BBCases;
1128 BasicBlock *BBDefault = GetValueEqualityComparisonCases(TI, BBCases);
1129
1130 std::vector<ValueEqualityComparisonCase> PredCases;
1131 BasicBlock *PredDefault = GetValueEqualityComparisonCases(PTI, PredCases);
1132
1133 // Based on whether the default edge from PTI goes to BB or not, fill in
1134 // PredCases and PredDefault with the new switch cases we would like to
1135 // build.
1136 SmallMapVector<BasicBlock *, int, 8> NewSuccessors;
1137
1138 // Update the branch weight metadata along the way
1139 SmallVector<uint64_t, 8> Weights;
1140 bool PredHasWeights = HasBranchWeights(PTI);
1141 bool SuccHasWeights = HasBranchWeights(TI);
1142
1143 if (PredHasWeights) {
1144 GetBranchWeights(PTI, Weights);
1145 // branch-weight metadata is inconsistent here.
1146 if (Weights.size() != 1 + PredCases.size())
1147 PredHasWeights = SuccHasWeights = false;
1148 } else if (SuccHasWeights)
1149 // If there are no predecessor weights but there are successor weights,
1150 // populate Weights with 1, which will later be scaled to the sum of
1151 // successor's weights
1152 Weights.assign(1 + PredCases.size(), 1);
1153
1154 SmallVector<uint64_t, 8> SuccWeights;
1155 if (SuccHasWeights) {
1156 GetBranchWeights(TI, SuccWeights);
1157 // branch-weight metadata is inconsistent here.
1158 if (SuccWeights.size() != 1 + BBCases.size())
1159 PredHasWeights = SuccHasWeights = false;
1160 } else if (PredHasWeights)
1161 SuccWeights.assign(1 + BBCases.size(), 1);
1162
1163 if (PredDefault == BB) {
1164 // If this is the default destination from PTI, only the edges in TI
1165 // that don't occur in PTI, or that branch to BB will be activated.
1166 std::set<ConstantInt *, ConstantIntOrdering> PTIHandled;
1167 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
1168 if (PredCases[i].Dest != BB)
1169 PTIHandled.insert(PredCases[i].Value);
1170 else {
1171 // The default destination is BB, we don't need explicit targets.
1172 std::swap(PredCases[i], PredCases.back());
1173
1174 if (PredHasWeights || SuccHasWeights) {
1175 // Increase weight for the default case.
1176 Weights[0] += Weights[i + 1];
1177 std::swap(Weights[i + 1], Weights.back());
1178 Weights.pop_back();
1179 }
1180
1181 PredCases.pop_back();
1182 --i;
1183 --e;
1184 }
1185
1186 // Reconstruct the new switch statement we will be building.
1187 if (PredDefault != BBDefault) {
1188 PredDefault->removePredecessor(Pred);
1189 if (PredDefault != BB)
1190 Updates.push_back({DominatorTree::Delete, Pred, PredDefault});
1191 PredDefault = BBDefault;
1192 ++NewSuccessors[BBDefault];
1193 }
1194
1195 unsigned CasesFromPred = Weights.size();
1196 uint64_t ValidTotalSuccWeight = 0;
1197 for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
1198 if (!PTIHandled.count(BBCases[i].Value) &&
1199 BBCases[i].Dest != BBDefault) {
1200 PredCases.push_back(BBCases[i]);
1201 ++NewSuccessors[BBCases[i].Dest];
1202 if (SuccHasWeights || PredHasWeights) {
1203 // The default weight is at index 0, so weight for the ith case
1204 // should be at index i+1. Scale the cases from successor by
1205 // PredDefaultWeight (Weights[0]).
1206 Weights.push_back(Weights[0] * SuccWeights[i + 1]);
1207 ValidTotalSuccWeight += SuccWeights[i + 1];
1208 }
1209 }
1210
1211 if (SuccHasWeights || PredHasWeights) {
1212 ValidTotalSuccWeight += SuccWeights[0];
1213 // Scale the cases from predecessor by ValidTotalSuccWeight.
1214 for (unsigned i = 1; i < CasesFromPred; ++i)
1215 Weights[i] *= ValidTotalSuccWeight;
1216 // Scale the default weight by SuccDefaultWeight (SuccWeights[0]).
1217 Weights[0] *= SuccWeights[0];
1218 }
1219 } else {
1220 // If this is not the default destination from PSI, only the edges
1221 // in SI that occur in PSI with a destination of BB will be
1222 // activated.
1223 std::set<ConstantInt *, ConstantIntOrdering> PTIHandled;
1224 std::map<ConstantInt *, uint64_t> WeightsForHandled;
1225 for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
1226 if (PredCases[i].Dest == BB) {
1227 PTIHandled.insert(PredCases[i].Value);
1228
1229 if (PredHasWeights || SuccHasWeights) {
1230 WeightsForHandled[PredCases[i].Value] = Weights[i + 1];
1231 std::swap(Weights[i + 1], Weights.back());
1232 Weights.pop_back();
1233 }
1234
1235 std::swap(PredCases[i], PredCases.back());
1236 PredCases.pop_back();
1237 --i;
1238 --e;
1239 }
1240
1241 // Okay, now we know which constants were sent to BB from the
1242 // predecessor. Figure out where they will all go now.
1243 for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
1244 if (PTIHandled.count(BBCases[i].Value)) {
1245 // If this is one we are capable of getting...
1246 if (PredHasWeights || SuccHasWeights)
1247 Weights.push_back(WeightsForHandled[BBCases[i].Value]);
1248 PredCases.push_back(BBCases[i]);
1249 ++NewSuccessors[BBCases[i].Dest];
1250 PTIHandled.erase(
1251 BBCases[i].Value); // This constant is taken care of
1252 }
1253
1254 // If there are any constants vectored to BB that TI doesn't handle,
1255 // they must go to the default destination of TI.
1256 for (ConstantInt *I : PTIHandled) {
1257 if (PredHasWeights || SuccHasWeights)
1258 Weights.push_back(WeightsForHandled[I]);
1259 PredCases.push_back(ValueEqualityComparisonCase(I, BBDefault));
1260 ++NewSuccessors[BBDefault];
1261 }
1262 }
1263
1264 // Okay, at this point, we know which new successor Pred will get. Make
1265 // sure we update the number of entries in the PHI nodes for these
1266 // successors.
1267 for (const std::pair<BasicBlock *, int /*Num*/> &NewSuccessor :
1268 NewSuccessors) {
1269 for (auto I : seq(0, NewSuccessor.second)) {
1270 (void)I;
1271 AddPredecessorToBlock(NewSuccessor.first, Pred, BB);
1272 }
1273 if (!is_contained(successors(Pred), NewSuccessor.first))
1274 Updates.push_back({DominatorTree::Insert, Pred, NewSuccessor.first});
1275 }
1276
1277 Builder.SetInsertPoint(PTI);
1278 // Convert pointer to int before we switch.
1279 if (CV->getType()->isPointerTy()) {
1280 CV = Builder.CreatePtrToInt(CV, DL.getIntPtrType(CV->getType()),
1281 "magicptr");
1282 }
1283
1284 // Now that the successors are updated, create the new Switch instruction.
1285 SwitchInst *NewSI =
1286 Builder.CreateSwitch(CV, PredDefault, PredCases.size());
1287 NewSI->setDebugLoc(PTI->getDebugLoc());
1288 for (ValueEqualityComparisonCase &V : PredCases)
1289 NewSI->addCase(V.Value, V.Dest);
1290
1291 if (PredHasWeights || SuccHasWeights) {
1292 // Halve the weights if any of them cannot fit in an uint32_t
1293 FitWeights(Weights);
1294
1295 SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
1296
1297 setBranchWeights(NewSI, MDWeights);
1298 }
1299
1300 EraseTerminatorAndDCECond(PTI);
1301
1302 // Okay, last check. If BB is still a successor of PSI, then we must
1303 // have an infinite loop case. If so, add an infinitely looping block
1304 // to handle the case to preserve the behavior of the code.
1305 BasicBlock *InfLoopBlock = nullptr;
1306 for (unsigned i = 0, e = NewSI->getNumSuccessors(); i != e; ++i)
1307 if (NewSI->getSuccessor(i) == BB) {
1308 if (!InfLoopBlock) {
1309 // Insert it at the end of the function, because it's either code,
1310 // or it won't matter if it's hot. :)
1311 InfLoopBlock = BasicBlock::Create(BB->getContext(), "infloop",
1312 BB->getParent());
1313 BranchInst::Create(InfLoopBlock, InfLoopBlock);
1314 Updates.push_back(
1315 {DominatorTree::Insert, InfLoopBlock, InfLoopBlock});
1316 }
1317 NewSI->setSuccessor(i, InfLoopBlock);
1318 }
1319
1320 if (InfLoopBlock)
1321 Updates.push_back({DominatorTree::Insert, Pred, InfLoopBlock});
1322
1323 Updates.push_back({DominatorTree::Delete, Pred, BB});
1324
1325 if (DTU)
1326 DTU->applyUpdates(Updates);
1327
1328 Changed = true;
1329 }
1330 }
1331 return Changed;
1332}
1333
1334// If we would need to insert a select that uses the value of this invoke
1335// (comments in HoistThenElseCodeToIf explain why we would need to do this), we
1336// can't hoist the invoke, as there is nowhere to put the select in this case.
1337static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2,
1338 Instruction *I1, Instruction *I2) {
1339 for (BasicBlock *Succ : successors(BB1)) {
1340 for (const PHINode &PN : Succ->phis()) {
1341 Value *BB1V = PN.getIncomingValueForBlock(BB1);
1342 Value *BB2V = PN.getIncomingValueForBlock(BB2);
1343 if (BB1V != BB2V && (BB1V == I1 || BB2V == I2)) {
1344 return false;
1345 }
1346 }
1347 }
1348 return true;
1349}
1350
1351static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified = false);
1352
1353/// Given a conditional branch that goes to BB1 and BB2, hoist any common code
1354/// in the two blocks up into the branch block. The caller of this function
1355/// guarantees that BI's block dominates BB1 and BB2.
1356bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI,
1357 const TargetTransformInfo &TTI) {
1358 // This does very trivial matching, with limited scanning, to find identical
1359 // instructions in the two blocks. In particular, we don't want to get into
1360 // O(M*N) situations here where M and N are the sizes of BB1 and BB2. As
1361 // such, we currently just scan for obviously identical instructions in an
1362 // identical order.
1363 BasicBlock *BB1 = BI->getSuccessor(0); // The true destination.
1364 BasicBlock *BB2 = BI->getSuccessor(1); // The false destination
1365
1366 BasicBlock::iterator BB1_Itr = BB1->begin();
1367 BasicBlock::iterator BB2_Itr = BB2->begin();
1368
1369 Instruction *I1 = &*BB1_Itr++, *I2 = &*BB2_Itr++;
1370 // Skip debug info if it is not identical.
1371 DbgInfoIntrinsic *DBI1 = dyn_cast<DbgInfoIntrinsic>(I1);
1372 DbgInfoIntrinsic *DBI2 = dyn_cast<DbgInfoIntrinsic>(I2);
1373 if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) {
1374 while (isa<DbgInfoIntrinsic>(I1))
1375 I1 = &*BB1_Itr++;
1376 while (isa<DbgInfoIntrinsic>(I2))
1377 I2 = &*BB2_Itr++;
1378 }
1379 // FIXME: Can we define a safety predicate for CallBr?
1380 if (isa<PHINode>(I1) || !I1->isIdenticalToWhenDefined(I2) ||
1381 (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) ||
1382 isa<CallBrInst>(I1))
1383 return false;
1384
1385 BasicBlock *BIParent = BI->getParent();
1386
1387 bool Changed = false;
1388
1389 auto _ = make_scope_exit([&]() {
1390 if (Changed)
1391 ++NumHoistCommonCode;
1392 });
1393
1394 do {
1395 // If we are hoisting the terminator instruction, don't move one (making a
1396 // broken BB), instead clone it, and remove BI.
1397 if (I1->isTerminator())
1398 goto HoistTerminator;
1399
1400 // If we're going to hoist a call, make sure that the two instructions we're
1401 // commoning/hoisting are both marked with musttail, or neither of them is
1402 // marked as such. Otherwise, we might end up in a situation where we hoist
1403 // from a block where the terminator is a `ret` to a block where the terminator
1404 // is a `br`, and `musttail` calls expect to be followed by a return.
1405 auto *C1 = dyn_cast<CallInst>(I1);
1406 auto *C2 = dyn_cast<CallInst>(I2);
1407 if (C1 && C2)
1408 if (C1->isMustTailCall() != C2->isMustTailCall())
1409 return Changed;
1410
1411 if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2))
1412 return Changed;
1413
1414 // If any of the two call sites has nomerge attribute, stop hoisting.
1415 if (const auto *CB1 = dyn_cast<CallBase>(I1))
1416 if (CB1->cannotMerge())
1417 return Changed;
1418 if (const auto *CB2 = dyn_cast<CallBase>(I2))
1419 if (CB2->cannotMerge())
1420 return Changed;
1421
1422 if (isa<DbgInfoIntrinsic>(I1) || isa<DbgInfoIntrinsic>(I2)) {
1423 assert (isa<DbgInfoIntrinsic>(I1) && isa<DbgInfoIntrinsic>(I2))((isa<DbgInfoIntrinsic>(I1) && isa<DbgInfoIntrinsic
>(I2)) ? static_cast<void> (0) : __assert_fail ("isa<DbgInfoIntrinsic>(I1) && isa<DbgInfoIntrinsic>(I2)"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1423, __PRETTY_FUNCTION__))
;
1424 // The debug location is an integral part of a debug info intrinsic
1425 // and can't be separated from it or replaced. Instead of attempting
1426 // to merge locations, simply hoist both copies of the intrinsic.
1427 BIParent->getInstList().splice(BI->getIterator(),
1428 BB1->getInstList(), I1);
1429 BIParent->getInstList().splice(BI->getIterator(),
1430 BB2->getInstList(), I2);
1431 Changed = true;
1432 } else {
1433 // For a normal instruction, we just move one to right before the branch,
1434 // then replace all uses of the other with the first. Finally, we remove
1435 // the now redundant second instruction.
1436 BIParent->getInstList().splice(BI->getIterator(),
1437 BB1->getInstList(), I1);
1438 if (!I2->use_empty())
1439 I2->replaceAllUsesWith(I1);
1440 I1->andIRFlags(I2);
1441 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
1442 LLVMContext::MD_range,
1443 LLVMContext::MD_fpmath,
1444 LLVMContext::MD_invariant_load,
1445 LLVMContext::MD_nonnull,
1446 LLVMContext::MD_invariant_group,
1447 LLVMContext::MD_align,
1448 LLVMContext::MD_dereferenceable,
1449 LLVMContext::MD_dereferenceable_or_null,
1450 LLVMContext::MD_mem_parallel_loop_access,
1451 LLVMContext::MD_access_group,
1452 LLVMContext::MD_preserve_access_index};
1453 combineMetadata(I1, I2, KnownIDs, true);
1454
1455 // I1 and I2 are being combined into a single instruction. Its debug
1456 // location is the merged locations of the original instructions.
1457 I1->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc());
1458
1459 I2->eraseFromParent();
1460 Changed = true;
1461 }
1462 ++NumHoistCommonInstrs;
1463
1464 I1 = &*BB1_Itr++;
1465 I2 = &*BB2_Itr++;
1466 // Skip debug info if it is not identical.
1467 DbgInfoIntrinsic *DBI1 = dyn_cast<DbgInfoIntrinsic>(I1);
1468 DbgInfoIntrinsic *DBI2 = dyn_cast<DbgInfoIntrinsic>(I2);
1469 if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) {
1470 while (isa<DbgInfoIntrinsic>(I1))
1471 I1 = &*BB1_Itr++;
1472 while (isa<DbgInfoIntrinsic>(I2))
1473 I2 = &*BB2_Itr++;
1474 }
1475 } while (I1->isIdenticalToWhenDefined(I2));
1476
1477 return true;
1478
1479HoistTerminator:
1480 // It may not be possible to hoist an invoke.
1481 // FIXME: Can we define a safety predicate for CallBr?
1482 if (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))
1483 return Changed;
1484
1485 // TODO: callbr hoisting currently disabled pending further study.
1486 if (isa<CallBrInst>(I1))
1487 return Changed;
1488
1489 for (BasicBlock *Succ : successors(BB1)) {
1490 for (PHINode &PN : Succ->phis()) {
1491 Value *BB1V = PN.getIncomingValueForBlock(BB1);
1492 Value *BB2V = PN.getIncomingValueForBlock(BB2);
1493 if (BB1V == BB2V)
1494 continue;
1495
1496 // Check for passingValueIsAlwaysUndefined here because we would rather
1497 // eliminate undefined control flow then converting it to a select.
1498 if (passingValueIsAlwaysUndefined(BB1V, &PN) ||
1499 passingValueIsAlwaysUndefined(BB2V, &PN))
1500 return Changed;
1501
1502 if (isa<ConstantExpr>(BB1V) && !isSafeToSpeculativelyExecute(BB1V))
1503 return Changed;
1504 if (isa<ConstantExpr>(BB2V) && !isSafeToSpeculativelyExecute(BB2V))
1505 return Changed;
1506 }
1507 }
1508
1509 // Okay, it is safe to hoist the terminator.
1510 Instruction *NT = I1->clone();
1511 BIParent->getInstList().insert(BI->getIterator(), NT);
1512 if (!NT->getType()->isVoidTy()) {
1513 I1->replaceAllUsesWith(NT);
1514 I2->replaceAllUsesWith(NT);
1515 NT->takeName(I1);
1516 }
1517 Changed = true;
1518 ++NumHoistCommonInstrs;
1519
1520 // Ensure terminator gets a debug location, even an unknown one, in case
1521 // it involves inlinable calls.
1522 NT->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc());
1523
1524 // PHIs created below will adopt NT's merged DebugLoc.
1525 IRBuilder<NoFolder> Builder(NT);
1526
1527 // Hoisting one of the terminators from our successor is a great thing.
1528 // Unfortunately, the successors of the if/else blocks may have PHI nodes in
1529 // them. If they do, all PHI entries for BB1/BB2 must agree for all PHI
1530 // nodes, so we insert select instruction to compute the final result.
1531 std::map<std::pair<Value *, Value *>, SelectInst *> InsertedSelects;
1532 for (BasicBlock *Succ : successors(BB1)) {
1533 for (PHINode &PN : Succ->phis()) {
1534 Value *BB1V = PN.getIncomingValueForBlock(BB1);
1535 Value *BB2V = PN.getIncomingValueForBlock(BB2);
1536 if (BB1V == BB2V)
1537 continue;
1538
1539 // These values do not agree. Insert a select instruction before NT
1540 // that determines the right value.
1541 SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
1542 if (!SI) {
1543 // Propagate fast-math-flags from phi node to its replacement select.
1544 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
1545 if (isa<FPMathOperator>(PN))
1546 Builder.setFastMathFlags(PN.getFastMathFlags());
1547
1548 SI = cast<SelectInst>(
1549 Builder.CreateSelect(BI->getCondition(), BB1V, BB2V,
1550 BB1V->getName() + "." + BB2V->getName(), BI));
1551 }
1552
1553 // Make the PHI node use the select for all incoming values for BB1/BB2
1554 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1555 if (PN.getIncomingBlock(i) == BB1 || PN.getIncomingBlock(i) == BB2)
1556 PN.setIncomingValue(i, SI);
1557 }
1558 }
1559
1560 SmallVector<DominatorTree::UpdateType, 4> Updates;
1561
1562 // Update any PHI nodes in our new successors.
1563 for (BasicBlock *Succ : successors(BB1)) {
1564 AddPredecessorToBlock(Succ, BIParent, BB1);
1565 Updates.push_back({DominatorTree::Insert, BIParent, Succ});
1566 }
1567 for (BasicBlock *Succ : successors(BI))
1568 Updates.push_back({DominatorTree::Delete, BIParent, Succ});
1569
1570 EraseTerminatorAndDCECond(BI);
1571 if (DTU)
1572 DTU->applyUpdates(Updates);
1573 return Changed;
1574}
1575
1576// Check lifetime markers.
1577static bool isLifeTimeMarker(const Instruction *I) {
1578 if (auto II = dyn_cast<IntrinsicInst>(I)) {
1579 switch (II->getIntrinsicID()) {
1580 default:
1581 break;
1582 case Intrinsic::lifetime_start:
1583 case Intrinsic::lifetime_end:
1584 return true;
1585 }
1586 }
1587 return false;
1588}
1589
1590// TODO: Refine this. This should avoid cases like turning constant memcpy sizes
1591// into variables.
1592static bool replacingOperandWithVariableIsCheap(const Instruction *I,
1593 int OpIdx) {
1594 return !isa<IntrinsicInst>(I);
1595}
1596
1597// All instructions in Insts belong to different blocks that all unconditionally
1598// branch to a common successor. Analyze each instruction and return true if it
1599// would be possible to sink them into their successor, creating one common
1600// instruction instead. For every value that would be required to be provided by
1601// PHI node (because an operand varies in each input block), add to PHIOperands.
1602static bool canSinkInstructions(
1603 ArrayRef<Instruction *> Insts,
1604 DenseMap<Instruction *, SmallVector<Value *, 4>> &PHIOperands) {
1605 // Prune out obviously bad instructions to move. Each instruction must have
1606 // exactly zero or one use, and we check later that use is by a single, common
1607 // PHI instruction in the successor.
1608 bool HasUse = !Insts.front()->user_empty();
1609 for (auto *I : Insts) {
1610 // These instructions may change or break semantics if moved.
1611 if (isa<PHINode>(I) || I->isEHPad() || isa<AllocaInst>(I) ||
1612 I->getType()->isTokenTy())
1613 return false;
1614
1615 // Conservatively return false if I is an inline-asm instruction. Sinking
1616 // and merging inline-asm instructions can potentially create arguments
1617 // that cannot satisfy the inline-asm constraints.
1618 // If the instruction has nomerge attribute, return false.
1619 if (const auto *C = dyn_cast<CallBase>(I))
1620 if (C->isInlineAsm() || C->cannotMerge())
1621 return false;
1622
1623 // Each instruction must have zero or one use.
1624 if (HasUse && !I->hasOneUse())
1625 return false;
1626 if (!HasUse && !I->user_empty())
1627 return false;
1628 }
1629
1630 const Instruction *I0 = Insts.front();
1631 for (auto *I : Insts)
1632 if (!I->isSameOperationAs(I0))
1633 return false;
1634
1635 // All instructions in Insts are known to be the same opcode. If they have a
1636 // use, check that the only user is a PHI or in the same block as the
1637 // instruction, because if a user is in the same block as an instruction we're
1638 // contemplating sinking, it must already be determined to be sinkable.
1639 if (HasUse) {
1640 auto *PNUse = dyn_cast<PHINode>(*I0->user_begin());
1641 auto *Succ = I0->getParent()->getTerminator()->getSuccessor(0);
1642 if (!all_of(Insts, [&PNUse,&Succ](const Instruction *I) -> bool {
1643 auto *U = cast<Instruction>(*I->user_begin());
1644 return (PNUse &&
1645 PNUse->getParent() == Succ &&
1646 PNUse->getIncomingValueForBlock(I->getParent()) == I) ||
1647 U->getParent() == I->getParent();
1648 }))
1649 return false;
1650 }
1651
1652 // Because SROA can't handle speculating stores of selects, try not to sink
1653 // loads, stores or lifetime markers of allocas when we'd have to create a
1654 // PHI for the address operand. Also, because it is likely that loads or
1655 // stores of allocas will disappear when Mem2Reg/SROA is run, don't sink
1656 // them.
1657 // This can cause code churn which can have unintended consequences down
1658 // the line - see https://llvm.org/bugs/show_bug.cgi?id=30244.
1659 // FIXME: This is a workaround for a deficiency in SROA - see
1660 // https://llvm.org/bugs/show_bug.cgi?id=30188
1661 if (isa<StoreInst>(I0) && any_of(Insts, [](const Instruction *I) {
1662 return isa<AllocaInst>(I->getOperand(1)->stripPointerCasts());
1663 }))
1664 return false;
1665 if (isa<LoadInst>(I0) && any_of(Insts, [](const Instruction *I) {
1666 return isa<AllocaInst>(I->getOperand(0)->stripPointerCasts());
1667 }))
1668 return false;
1669 if (isLifeTimeMarker(I0) && any_of(Insts, [](const Instruction *I) {
1670 return isa<AllocaInst>(I->getOperand(1)->stripPointerCasts());
1671 }))
1672 return false;
1673
1674 for (unsigned OI = 0, OE = I0->getNumOperands(); OI != OE; ++OI) {
1675 Value *Op = I0->getOperand(OI);
1676 if (Op->getType()->isTokenTy())
1677 // Don't touch any operand of token type.
1678 return false;
1679
1680 auto SameAsI0 = [&I0, OI](const Instruction *I) {
1681 assert(I->getNumOperands() == I0->getNumOperands())((I->getNumOperands() == I0->getNumOperands()) ? static_cast
<void> (0) : __assert_fail ("I->getNumOperands() == I0->getNumOperands()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1681, __PRETTY_FUNCTION__))
;
1682 return I->getOperand(OI) == I0->getOperand(OI);
1683 };
1684 if (!all_of(Insts, SameAsI0)) {
1685 if ((isa<Constant>(Op) && !replacingOperandWithVariableIsCheap(I0, OI)) ||
1686 !canReplaceOperandWithVariable(I0, OI))
1687 // We can't create a PHI from this GEP.
1688 return false;
1689 // Don't create indirect calls! The called value is the final operand.
1690 if (isa<CallBase>(I0) && OI == OE - 1) {
1691 // FIXME: if the call was *already* indirect, we should do this.
1692 return false;
1693 }
1694 for (auto *I : Insts)
1695 PHIOperands[I].push_back(I->getOperand(OI));
1696 }
1697 }
1698 return true;
1699}
1700
1701// Assuming canSinkLastInstruction(Blocks) has returned true, sink the last
1702// instruction of every block in Blocks to their common successor, commoning
1703// into one instruction.
1704static bool sinkLastInstruction(ArrayRef<BasicBlock*> Blocks) {
1705 auto *BBEnd = Blocks[0]->getTerminator()->getSuccessor(0);
1706
1707 // canSinkLastInstruction returning true guarantees that every block has at
1708 // least one non-terminator instruction.
1709 SmallVector<Instruction*,4> Insts;
1710 for (auto *BB : Blocks) {
1711 Instruction *I = BB->getTerminator();
1712 do {
1713 I = I->getPrevNode();
1714 } while (isa<DbgInfoIntrinsic>(I) && I != &BB->front());
1715 if (!isa<DbgInfoIntrinsic>(I))
1716 Insts.push_back(I);
1717 }
1718
1719 // The only checking we need to do now is that all users of all instructions
1720 // are the same PHI node. canSinkLastInstruction should have checked this but
1721 // it is slightly over-aggressive - it gets confused by commutative instructions
1722 // so double-check it here.
1723 Instruction *I0 = Insts.front();
1724 if (!I0->user_empty()) {
1725 auto *PNUse = dyn_cast<PHINode>(*I0->user_begin());
1726 if (!all_of(Insts, [&PNUse](const Instruction *I) -> bool {
1727 auto *U = cast<Instruction>(*I->user_begin());
1728 return U == PNUse;
1729 }))
1730 return false;
1731 }
1732
1733 // We don't need to do any more checking here; canSinkLastInstruction should
1734 // have done it all for us.
1735 SmallVector<Value*, 4> NewOperands;
1736 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
1737 // This check is different to that in canSinkLastInstruction. There, we
1738 // cared about the global view once simplifycfg (and instcombine) have
1739 // completed - it takes into account PHIs that become trivially
1740 // simplifiable. However here we need a more local view; if an operand
1741 // differs we create a PHI and rely on instcombine to clean up the very
1742 // small mess we may make.
1743 bool NeedPHI = any_of(Insts, [&I0, O](const Instruction *I) {
1744 return I->getOperand(O) != I0->getOperand(O);
1745 });
1746 if (!NeedPHI) {
1747 NewOperands.push_back(I0->getOperand(O));
1748 continue;
1749 }
1750
1751 // Create a new PHI in the successor block and populate it.
1752 auto *Op = I0->getOperand(O);
1753 assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!")((!Op->getType()->isTokenTy() && "Can't PHI tokens!"
) ? static_cast<void> (0) : __assert_fail ("!Op->getType()->isTokenTy() && \"Can't PHI tokens!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 1753, __PRETTY_FUNCTION__))
;
1754 auto *PN = PHINode::Create(Op->getType(), Insts.size(),
1755 Op->getName() + ".sink", &BBEnd->front());
1756 for (auto *I : Insts)
1757 PN->addIncoming(I->getOperand(O), I->getParent());
1758 NewOperands.push_back(PN);
1759 }
1760
1761 // Arbitrarily use I0 as the new "common" instruction; remap its operands
1762 // and move it to the start of the successor block.
1763 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O)
1764 I0->getOperandUse(O).set(NewOperands[O]);
1765 I0->moveBefore(&*BBEnd->getFirstInsertionPt());
1766
1767 // Update metadata and IR flags, and merge debug locations.
1768 for (auto *I : Insts)
1769 if (I != I0) {
1770 // The debug location for the "common" instruction is the merged locations
1771 // of all the commoned instructions. We start with the original location
1772 // of the "common" instruction and iteratively merge each location in the
1773 // loop below.
1774 // This is an N-way merge, which will be inefficient if I0 is a CallInst.
1775 // However, as N-way merge for CallInst is rare, so we use simplified API
1776 // instead of using complex API for N-way merge.
1777 I0->applyMergedLocation(I0->getDebugLoc(), I->getDebugLoc());
1778 combineMetadataForCSE(I0, I, true);
1779 I0->andIRFlags(I);
1780 }
1781
1782 if (!I0->user_empty()) {
1783 // canSinkLastInstruction checked that all instructions were used by
1784 // one and only one PHI node. Find that now, RAUW it to our common
1785 // instruction and nuke it.
1786 auto *PN = cast<PHINode>(*I0->user_begin());
1787 PN->replaceAllUsesWith(I0);
1788 PN->eraseFromParent();
1789 }
1790
1791 // Finally nuke all instructions apart from the common instruction.
1792 for (auto *I : Insts)
1793 if (I != I0)
1794 I->eraseFromParent();
1795
1796 return true;
1797}
1798
1799namespace {
1800
1801 // LockstepReverseIterator - Iterates through instructions
1802 // in a set of blocks in reverse order from the first non-terminator.
1803 // For example (assume all blocks have size n):
1804 // LockstepReverseIterator I([B1, B2, B3]);
1805 // *I-- = [B1[n], B2[n], B3[n]];
1806 // *I-- = [B1[n-1], B2[n-1], B3[n-1]];
1807 // *I-- = [B1[n-2], B2[n-2], B3[n-2]];
1808 // ...
1809 class LockstepReverseIterator {
1810 ArrayRef<BasicBlock*> Blocks;
1811 SmallVector<Instruction*,4> Insts;
1812 bool Fail;
1813
1814 public:
1815 LockstepReverseIterator(ArrayRef<BasicBlock*> Blocks) : Blocks(Blocks) {
1816 reset();
1817 }
1818
1819 void reset() {
1820 Fail = false;
1821 Insts.clear();
1822 for (auto *BB : Blocks) {
1823 Instruction *Inst = BB->getTerminator();
1824 for (Inst = Inst->getPrevNode(); Inst && isa<DbgInfoIntrinsic>(Inst);)
1825 Inst = Inst->getPrevNode();
1826 if (!Inst) {
1827 // Block wasn't big enough.
1828 Fail = true;
1829 return;
1830 }
1831 Insts.push_back(Inst);
1832 }
1833 }
1834
1835 bool isValid() const {
1836 return !Fail;
1837 }
1838
1839 void operator--() {
1840 if (Fail)
1841 return;
1842 for (auto *&Inst : Insts) {
1843 for (Inst = Inst->getPrevNode(); Inst && isa<DbgInfoIntrinsic>(Inst);)
1844 Inst = Inst->getPrevNode();
1845 // Already at beginning of block.
1846 if (!Inst) {
1847 Fail = true;
1848 return;
1849 }
1850 }
1851 }
1852
1853 ArrayRef<Instruction*> operator * () const {
1854 return Insts;
1855 }
1856 };
1857
1858} // end anonymous namespace
1859
1860/// Check whether BB's predecessors end with unconditional branches. If it is
1861/// true, sink any common code from the predecessors to BB.
1862/// We also allow one predecessor to end with conditional branch (but no more
1863/// than one).
1864static bool SinkCommonCodeFromPredecessors(BasicBlock *BB,
1865 DomTreeUpdater *DTU) {
1866 // We support two situations:
1867 // (1) all incoming arcs are unconditional
1868 // (2) one incoming arc is conditional
1869 //
1870 // (2) is very common in switch defaults and
1871 // else-if patterns;
1872 //
1873 // if (a) f(1);
1874 // else if (b) f(2);
1875 //
1876 // produces:
1877 //
1878 // [if]
1879 // / \
1880 // [f(1)] [if]
1881 // | | \
1882 // | | |
1883 // | [f(2)]|
1884 // \ | /
1885 // [ end ]
1886 //
1887 // [end] has two unconditional predecessor arcs and one conditional. The
1888 // conditional refers to the implicit empty 'else' arc. This conditional
1889 // arc can also be caused by an empty default block in a switch.
1890 //
1891 // In this case, we attempt to sink code from all *unconditional* arcs.
1892 // If we can sink instructions from these arcs (determined during the scan
1893 // phase below) we insert a common successor for all unconditional arcs and
1894 // connect that to [end], to enable sinking:
1895 //
1896 // [if]
1897 // / \
1898 // [x(1)] [if]
1899 // | | \
1900 // | | \
1901 // | [x(2)] |
1902 // \ / |
1903 // [sink.split] |
1904 // \ /
1905 // [ end ]
1906 //
1907 SmallVector<BasicBlock*,4> UnconditionalPreds;
1908 Instruction *Cond = nullptr;
1909 for (auto *B : predecessors(BB)) {
1910 auto *T = B->getTerminator();
1911 if (isa<BranchInst>(T) && cast<BranchInst>(T)->isUnconditional())
1912 UnconditionalPreds.push_back(B);
1913 else if ((isa<BranchInst>(T) || isa<SwitchInst>(T)) && !Cond)
1914 Cond = T;
1915 else
1916 return false;
1917 }
1918 if (UnconditionalPreds.size() < 2)
1919 return false;
1920
1921 // We take a two-step approach to tail sinking. First we scan from the end of
1922 // each block upwards in lockstep. If the n'th instruction from the end of each
1923 // block can be sunk, those instructions are added to ValuesToSink and we
1924 // carry on. If we can sink an instruction but need to PHI-merge some operands
1925 // (because they're not identical in each instruction) we add these to
1926 // PHIOperands.
1927 unsigned ScanIdx = 0;
1928 SmallPtrSet<Value*,4> InstructionsToSink;
1929 DenseMap<Instruction*, SmallVector<Value*,4>> PHIOperands;
1930 LockstepReverseIterator LRI(UnconditionalPreds);
1931 while (LRI.isValid() &&
1932 canSinkInstructions(*LRI, PHIOperands)) {
1933 LLVM_DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: instruction can be sunk: "
<< *(*LRI)[0] << "\n"; } } while (false)
1934 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: instruction can be sunk: "
<< *(*LRI)[0] << "\n"; } } while (false)
;
1935 InstructionsToSink.insert((*LRI).begin(), (*LRI).end());
1936 ++ScanIdx;
1937 --LRI;
1938 }
1939
1940 // If no instructions can be sunk, early-return.
1941 if (ScanIdx == 0)
1942 return false;
1943
1944 bool Changed = false;
1945
1946 auto ProfitableToSinkInstruction = [&](LockstepReverseIterator &LRI) {
1947 unsigned NumPHIdValues = 0;
1948 for (auto *I : *LRI)
1949 for (auto *V : PHIOperands[I])
1950 if (InstructionsToSink.count(V) == 0)
1951 ++NumPHIdValues;
1952 LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: #phid values: " <<
NumPHIdValues << "\n"; } } while (false)
;
1953 unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size();
1954 if ((NumPHIdValues % UnconditionalPreds.size()) != 0)
1955 NumPHIInsts++;
1956
1957 return NumPHIInsts <= 1;
1958 };
1959
1960 if (Cond) {
1961 // Check if we would actually sink anything first! This mutates the CFG and
1962 // adds an extra block. The goal in doing this is to allow instructions that
1963 // couldn't be sunk before to be sunk - obviously, speculatable instructions
1964 // (such as trunc, add) can be sunk and predicated already. So we check that
1965 // we're going to sink at least one non-speculatable instruction.
1966 LRI.reset();
1967 unsigned Idx = 0;
1968 bool Profitable = false;
1969 while (ProfitableToSinkInstruction(LRI) && Idx < ScanIdx) {
1970 if (!isSafeToSpeculativelyExecute((*LRI)[0])) {
1971 Profitable = true;
1972 break;
1973 }
1974 --LRI;
1975 ++Idx;
1976 }
1977 if (!Profitable)
1978 return false;
1979
1980 LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: Splitting edge\n"; }
} while (false)
;
1981 // We have a conditional edge and we're going to sink some instructions.
1982 // Insert a new block postdominating all blocks we're going to sink from.
1983 if (!SplitBlockPredecessors(BB, UnconditionalPreds, ".sink.split", DTU))
1984 // Edges couldn't be split.
1985 return false;
1986 Changed = true;
1987 }
1988
1989 // Now that we've analyzed all potential sinking candidates, perform the
1990 // actual sink. We iteratively sink the last non-terminator of the source
1991 // blocks into their common successor unless doing so would require too
1992 // many PHI instructions to be generated (currently only one PHI is allowed
1993 // per sunk instruction).
1994 //
1995 // We can use InstructionsToSink to discount values needing PHI-merging that will
1996 // actually be sunk in a later iteration. This allows us to be more
1997 // aggressive in what we sink. This does allow a false positive where we
1998 // sink presuming a later value will also be sunk, but stop half way through
1999 // and never actually sink it which means we produce more PHIs than intended.
2000 // This is unlikely in practice though.
2001 unsigned SinkIdx = 0;
2002 for (; SinkIdx != ScanIdx; ++SinkIdx) {
2003 LLVM_DEBUG(dbgs() << "SINK: Sink: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: Sink: " << *UnconditionalPreds
[0]->getTerminator()->getPrevNode() << "\n"; } } while
(false)
2004 << *UnconditionalPreds[0]->getTerminator()->getPrevNode()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: Sink: " << *UnconditionalPreds
[0]->getTerminator()->getPrevNode() << "\n"; } } while
(false)
2005 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: Sink: " << *UnconditionalPreds
[0]->getTerminator()->getPrevNode() << "\n"; } } while
(false)
;
2006
2007 // Because we've sunk every instruction in turn, the current instruction to
2008 // sink is always at index 0.
2009 LRI.reset();
2010 if (!ProfitableToSinkInstruction(LRI)) {
2011 // Too many PHIs would be created.
2012 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: stopping here, too many PHIs would be created!\n"
; } } while (false)
2013 dbgs() << "SINK: stopping here, too many PHIs would be created!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: stopping here, too many PHIs would be created!\n"
; } } while (false)
;
2014 break;
2015 }
2016
2017 if (!sinkLastInstruction(UnconditionalPreds)) {
2018 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: stopping here, failed to actually sink instruction!\n"
; } } while (false)
2019 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: stopping here, failed to actually sink instruction!\n"
; } } while (false)
2020 << "SINK: stopping here, failed to actually sink instruction!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SINK: stopping here, failed to actually sink instruction!\n"
; } } while (false)
;
2021 break;
2022 }
2023
2024 NumSinkCommonInstrs++;
2025 Changed = true;
2026 }
2027 if (SinkIdx != 0)
2028 ++NumSinkCommonCode;
2029 return Changed;
2030}
2031
2032/// Determine if we can hoist sink a sole store instruction out of a
2033/// conditional block.
2034///
2035/// We are looking for code like the following:
2036/// BrBB:
2037/// store i32 %add, i32* %arrayidx2
2038/// ... // No other stores or function calls (we could be calling a memory
2039/// ... // function).
2040/// %cmp = icmp ult %x, %y
2041/// br i1 %cmp, label %EndBB, label %ThenBB
2042/// ThenBB:
2043/// store i32 %add5, i32* %arrayidx2
2044/// br label EndBB
2045/// EndBB:
2046/// ...
2047/// We are going to transform this into:
2048/// BrBB:
2049/// store i32 %add, i32* %arrayidx2
2050/// ... //
2051/// %cmp = icmp ult %x, %y
2052/// %add.add5 = select i1 %cmp, i32 %add, %add5
2053/// store i32 %add.add5, i32* %arrayidx2
2054/// ...
2055///
2056/// \return The pointer to the value of the previous store if the store can be
2057/// hoisted into the predecessor block. 0 otherwise.
2058static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
2059 BasicBlock *StoreBB, BasicBlock *EndBB) {
2060 StoreInst *StoreToHoist = dyn_cast<StoreInst>(I);
2061 if (!StoreToHoist)
2062 return nullptr;
2063
2064 // Volatile or atomic.
2065 if (!StoreToHoist->isSimple())
2066 return nullptr;
2067
2068 Value *StorePtr = StoreToHoist->getPointerOperand();
2069
2070 // Look for a store to the same pointer in BrBB.
2071 unsigned MaxNumInstToLookAt = 9;
2072 // Skip pseudo probe intrinsic calls which are not really killing any memory
2073 // accesses.
2074 for (Instruction &CurI : reverse(BrBB->instructionsWithoutDebug(true))) {
2075 if (!MaxNumInstToLookAt)
2076 break;
2077 --MaxNumInstToLookAt;
2078
2079 // Could be calling an instruction that affects memory like free().
2080 if (CurI.mayHaveSideEffects() && !isa<StoreInst>(CurI))
2081 return nullptr;
2082
2083 if (auto *SI = dyn_cast<StoreInst>(&CurI)) {
2084 // Found the previous store make sure it stores to the same location.
2085 if (SI->getPointerOperand() == StorePtr)
2086 // Found the previous store, return its value operand.
2087 return SI->getValueOperand();
2088 return nullptr; // Unknown store.
2089 }
2090 }
2091
2092 return nullptr;
2093}
2094
2095/// Estimate the cost of the insertion(s) and check that the PHI nodes can be
2096/// converted to selects.
2097static bool validateAndCostRequiredSelects(BasicBlock *BB, BasicBlock *ThenBB,
2098 BasicBlock *EndBB,
2099 unsigned &SpeculatedInstructions,
2100 int &BudgetRemaining,
2101 const TargetTransformInfo &TTI) {
2102 TargetTransformInfo::TargetCostKind CostKind =
2103 BB->getParent()->hasMinSize()
2104 ? TargetTransformInfo::TCK_CodeSize
2105 : TargetTransformInfo::TCK_SizeAndLatency;
2106
2107 bool HaveRewritablePHIs = false;
2108 for (PHINode &PN : EndBB->phis()) {
2109 Value *OrigV = PN.getIncomingValueForBlock(BB);
2110 Value *ThenV = PN.getIncomingValueForBlock(ThenBB);
2111
2112 // FIXME: Try to remove some of the duplication with HoistThenElseCodeToIf.
2113 // Skip PHIs which are trivial.
2114 if (ThenV == OrigV)
2115 continue;
2116
2117 BudgetRemaining -=
2118 TTI.getCmpSelInstrCost(Instruction::Select, PN.getType(), nullptr,
2119 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2120
2121 // Don't convert to selects if we could remove undefined behavior instead.
2122 if (passingValueIsAlwaysUndefined(OrigV, &PN) ||
2123 passingValueIsAlwaysUndefined(ThenV, &PN))
2124 return false;
2125
2126 HaveRewritablePHIs = true;
2127 ConstantExpr *OrigCE = dyn_cast<ConstantExpr>(OrigV);
2128 ConstantExpr *ThenCE = dyn_cast<ConstantExpr>(ThenV);
2129 if (!OrigCE && !ThenCE)
2130 continue; // Known safe and cheap.
2131
2132 if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE)) ||
2133 (OrigCE && !isSafeToSpeculativelyExecute(OrigCE)))
2134 return false;
2135 unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE, TTI) : 0;
2136 unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE, TTI) : 0;
2137 unsigned MaxCost =
2138 2 * PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic;
2139 if (OrigCost + ThenCost > MaxCost)
2140 return false;
2141
2142 // Account for the cost of an unfolded ConstantExpr which could end up
2143 // getting expanded into Instructions.
2144 // FIXME: This doesn't account for how many operations are combined in the
2145 // constant expression.
2146 ++SpeculatedInstructions;
2147 if (SpeculatedInstructions > 1)
2148 return false;
2149 }
2150
2151 return HaveRewritablePHIs;
2152}
2153
2154/// Speculate a conditional basic block flattening the CFG.
2155///
2156/// Note that this is a very risky transform currently. Speculating
2157/// instructions like this is most often not desirable. Instead, there is an MI
2158/// pass which can do it with full awareness of the resource constraints.
2159/// However, some cases are "obvious" and we should do directly. An example of
2160/// this is speculating a single, reasonably cheap instruction.
2161///
2162/// There is only one distinct advantage to flattening the CFG at the IR level:
2163/// it makes very common but simplistic optimizations such as are common in
2164/// instcombine and the DAG combiner more powerful by removing CFG edges and
2165/// modeling their effects with easier to reason about SSA value graphs.
2166///
2167///
2168/// An illustration of this transform is turning this IR:
2169/// \code
2170/// BB:
2171/// %cmp = icmp ult %x, %y
2172/// br i1 %cmp, label %EndBB, label %ThenBB
2173/// ThenBB:
2174/// %sub = sub %x, %y
2175/// br label BB2
2176/// EndBB:
2177/// %phi = phi [ %sub, %ThenBB ], [ 0, %EndBB ]
2178/// ...
2179/// \endcode
2180///
2181/// Into this IR:
2182/// \code
2183/// BB:
2184/// %cmp = icmp ult %x, %y
2185/// %sub = sub %x, %y
2186/// %cond = select i1 %cmp, 0, %sub
2187/// ...
2188/// \endcode
2189///
2190/// \returns true if the conditional block is removed.
2191bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
2192 const TargetTransformInfo &TTI) {
2193 // Be conservative for now. FP select instruction can often be expensive.
2194 Value *BrCond = BI->getCondition();
2195 if (isa<FCmpInst>(BrCond))
2196 return false;
2197
2198 BasicBlock *BB = BI->getParent();
2199 BasicBlock *EndBB = ThenBB->getTerminator()->getSuccessor(0);
2200 int BudgetRemaining =
2201 PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic;
2202
2203 // If ThenBB is actually on the false edge of the conditional branch, remember
2204 // to swap the select operands later.
2205 bool Invert = false;
2206 if (ThenBB != BI->getSuccessor(0)) {
2207 assert(ThenBB == BI->getSuccessor(1) && "No edge from 'if' block?")((ThenBB == BI->getSuccessor(1) && "No edge from 'if' block?"
) ? static_cast<void> (0) : __assert_fail ("ThenBB == BI->getSuccessor(1) && \"No edge from 'if' block?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 2207, __PRETTY_FUNCTION__))
;
2208 Invert = true;
2209 }
2210 assert(EndBB == BI->getSuccessor(!Invert) && "No edge from to end block")((EndBB == BI->getSuccessor(!Invert) && "No edge from to end block"
) ? static_cast<void> (0) : __assert_fail ("EndBB == BI->getSuccessor(!Invert) && \"No edge from to end block\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 2210, __PRETTY_FUNCTION__))
;
2211
2212 // Keep a count of how many times instructions are used within ThenBB when
2213 // they are candidates for sinking into ThenBB. Specifically:
2214 // - They are defined in BB, and
2215 // - They have no side effects, and
2216 // - All of their uses are in ThenBB.
2217 SmallDenseMap<Instruction *, unsigned, 4> SinkCandidateUseCounts;
2218
2219 SmallVector<Instruction *, 4> SpeculatedDbgIntrinsics;
2220
2221 unsigned SpeculatedInstructions = 0;
2222 Value *SpeculatedStoreValue = nullptr;
2223 StoreInst *SpeculatedStore = nullptr;
2224 for (BasicBlock::iterator BBI = ThenBB->begin(),
2225 BBE = std::prev(ThenBB->end());
2226 BBI != BBE; ++BBI) {
2227 Instruction *I = &*BBI;
2228 // Skip debug info.
2229 if (isa<DbgInfoIntrinsic>(I)) {
2230 SpeculatedDbgIntrinsics.push_back(I);
2231 continue;
2232 }
2233
2234 // Skip pseudo probes. The consequence is we lose track of the branch
2235 // probability for ThenBB, which is fine since the optimization here takes
2236 // place regardless of the branch probability.
2237 if (isa<PseudoProbeInst>(I)) {
2238 SpeculatedDbgIntrinsics.push_back(I);
2239 continue;
2240 }
2241
2242 // Only speculatively execute a single instruction (not counting the
2243 // terminator) for now.
2244 ++SpeculatedInstructions;
2245 if (SpeculatedInstructions > 1)
2246 return false;
2247
2248 // Don't hoist the instruction if it's unsafe or expensive.
2249 if (!isSafeToSpeculativelyExecute(I) &&
2250 !(HoistCondStores && (SpeculatedStoreValue = isSafeToSpeculateStore(
2251 I, BB, ThenBB, EndBB))))
2252 return false;
2253 if (!SpeculatedStoreValue &&
2254 ComputeSpeculationCost(I, TTI) >
2255 PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic)
2256 return false;
2257
2258 // Store the store speculation candidate.
2259 if (SpeculatedStoreValue)
2260 SpeculatedStore = cast<StoreInst>(I);
2261
2262 // Do not hoist the instruction if any of its operands are defined but not
2263 // used in BB. The transformation will prevent the operand from
2264 // being sunk into the use block.
2265 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
2266 Instruction *OpI = dyn_cast<Instruction>(*i);
2267 if (!OpI || OpI->getParent() != BB || OpI->mayHaveSideEffects())
2268 continue; // Not a candidate for sinking.
2269
2270 ++SinkCandidateUseCounts[OpI];
2271 }
2272 }
2273
2274 // Consider any sink candidates which are only used in ThenBB as costs for
2275 // speculation. Note, while we iterate over a DenseMap here, we are summing
2276 // and so iteration order isn't significant.
2277 for (SmallDenseMap<Instruction *, unsigned, 4>::iterator
2278 I = SinkCandidateUseCounts.begin(),
2279 E = SinkCandidateUseCounts.end();
2280 I != E; ++I)
2281 if (I->first->hasNUses(I->second)) {
2282 ++SpeculatedInstructions;
2283 if (SpeculatedInstructions > 1)
2284 return false;
2285 }
2286
2287 // Check that we can insert the selects and that it's not too expensive to do
2288 // so.
2289 bool Convert = SpeculatedStore != nullptr;
2290 Convert |= validateAndCostRequiredSelects(BB, ThenBB, EndBB,
2291 SpeculatedInstructions,
2292 BudgetRemaining, TTI);
2293 if (!Convert || BudgetRemaining < 0)
2294 return false;
2295
2296 // If we get here, we can hoist the instruction and if-convert.
2297 LLVM_DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SPECULATIVELY EXECUTING BB"
<< *ThenBB << "\n";; } } while (false)
;
2298
2299 // Insert a select of the value of the speculated store.
2300 if (SpeculatedStoreValue) {
2301 IRBuilder<NoFolder> Builder(BI);
2302 Value *TrueV = SpeculatedStore->getValueOperand();
2303 Value *FalseV = SpeculatedStoreValue;
2304 if (Invert)
2305 std::swap(TrueV, FalseV);
2306 Value *S = Builder.CreateSelect(
2307 BrCond, TrueV, FalseV, "spec.store.select", BI);
2308 SpeculatedStore->setOperand(0, S);
2309 SpeculatedStore->applyMergedLocation(BI->getDebugLoc(),
2310 SpeculatedStore->getDebugLoc());
2311 }
2312
2313 // Metadata can be dependent on the condition we are hoisting above.
2314 // Conservatively strip all metadata on the instruction. Drop the debug loc
2315 // to avoid making it appear as if the condition is a constant, which would
2316 // be misleading while debugging.
2317 for (auto &I : *ThenBB) {
2318 if (!SpeculatedStoreValue || &I != SpeculatedStore)
2319 I.setDebugLoc(DebugLoc());
2320 I.dropUnknownNonDebugMetadata();
2321 }
2322
2323 // Hoist the instructions.
2324 BB->getInstList().splice(BI->getIterator(), ThenBB->getInstList(),
2325 ThenBB->begin(), std::prev(ThenBB->end()));
2326
2327 // Insert selects and rewrite the PHI operands.
2328 IRBuilder<NoFolder> Builder(BI);
2329 for (PHINode &PN : EndBB->phis()) {
2330 unsigned OrigI = PN.getBasicBlockIndex(BB);
2331 unsigned ThenI = PN.getBasicBlockIndex(ThenBB);
2332 Value *OrigV = PN.getIncomingValue(OrigI);
2333 Value *ThenV = PN.getIncomingValue(ThenI);
2334
2335 // Skip PHIs which are trivial.
2336 if (OrigV == ThenV)
2337 continue;
2338
2339 // Create a select whose true value is the speculatively executed value and
2340 // false value is the pre-existing value. Swap them if the branch
2341 // destinations were inverted.
2342 Value *TrueV = ThenV, *FalseV = OrigV;
2343 if (Invert)
2344 std::swap(TrueV, FalseV);
2345 Value *V = Builder.CreateSelect(BrCond, TrueV, FalseV, "spec.select", BI);
2346 PN.setIncomingValue(OrigI, V);
2347 PN.setIncomingValue(ThenI, V);
2348 }
2349
2350 // Remove speculated dbg intrinsics.
2351 // FIXME: Is it possible to do this in a more elegant way? Moving/merging the
2352 // dbg value for the different flows and inserting it after the select.
2353 for (Instruction *I : SpeculatedDbgIntrinsics)
2354 I->eraseFromParent();
2355
2356 ++NumSpeculations;
2357 return true;
2358}
2359
2360/// Return true if we can thread a branch across this block.
2361static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
2362 int Size = 0;
2363
2364 for (Instruction &I : BB->instructionsWithoutDebug()) {
2365 if (Size > MaxSmallBlockSize)
2366 return false; // Don't clone large BB's.
2367
2368 // Can't fold blocks that contain noduplicate or convergent calls.
2369 if (CallInst *CI = dyn_cast<CallInst>(&I))
2370 if (CI->cannotDuplicate() || CI->isConvergent())
2371 return false;
2372
2373 // We will delete Phis while threading, so Phis should not be accounted in
2374 // block's size
2375 if (!isa<PHINode>(I))
2376 ++Size;
2377
2378 // We can only support instructions that do not define values that are
2379 // live outside of the current basic block.
2380 for (User *U : I.users()) {
2381 Instruction *UI = cast<Instruction>(U);
2382 if (UI->getParent() != BB || isa<PHINode>(UI))
2383 return false;
2384 }
2385
2386 // Looks ok, continue checking.
2387 }
2388
2389 return true;
2390}
2391
2392/// If we have a conditional branch on a PHI node value that is defined in the
2393/// same block as the branch and if any PHI entries are constants, thread edges
2394/// corresponding to that entry to be branches to their ultimate destination.
2395static bool FoldCondBranchOnPHI(BranchInst *BI, DomTreeUpdater *DTU,
2396 const DataLayout &DL, AssumptionCache *AC) {
2397 BasicBlock *BB = BI->getParent();
2398 PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
2399 // NOTE: we currently cannot transform this case if the PHI node is used
2400 // outside of the block.
2401 if (!PN || PN->getParent() != BB || !PN->hasOneUse())
2402 return false;
2403
2404 // Degenerate case of a single entry PHI.
2405 if (PN->getNumIncomingValues() == 1) {
2406 FoldSingleEntryPHINodes(PN->getParent());
2407 return true;
2408 }
2409
2410 // Now we know that this block has multiple preds and two succs.
2411 if (!BlockIsSimpleEnoughToThreadThrough(BB))
2412 return false;
2413
2414 // Okay, this is a simple enough basic block. See if any phi values are
2415 // constants.
2416 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2417 ConstantInt *CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i));
2418 if (!CB || !CB->getType()->isIntegerTy(1))
2419 continue;
2420
2421 // Okay, we now know that all edges from PredBB should be revectored to
2422 // branch to RealDest.
2423 BasicBlock *PredBB = PN->getIncomingBlock(i);
2424 BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
2425
2426 if (RealDest == BB)
2427 continue; // Skip self loops.
2428 // Skip if the predecessor's terminator is an indirect branch.
2429 if (isa<IndirectBrInst>(PredBB->getTerminator()))
2430 continue;
2431
2432 SmallVector<DominatorTree::UpdateType, 3> Updates;
2433
2434 // The dest block might have PHI nodes, other predecessors and other
2435 // difficult cases. Instead of being smart about this, just insert a new
2436 // block that jumps to the destination block, effectively splitting
2437 // the edge we are about to create.
2438 BasicBlock *EdgeBB =
2439 BasicBlock::Create(BB->getContext(), RealDest->getName() + ".critedge",
2440 RealDest->getParent(), RealDest);
2441 BranchInst *CritEdgeBranch = BranchInst::Create(RealDest, EdgeBB);
2442 Updates.push_back({DominatorTree::Insert, EdgeBB, RealDest});
2443 CritEdgeBranch->setDebugLoc(BI->getDebugLoc());
2444
2445 // Update PHI nodes.
2446 AddPredecessorToBlock(RealDest, EdgeBB, BB);
2447
2448 // BB may have instructions that are being threaded over. Clone these
2449 // instructions into EdgeBB. We know that there will be no uses of the
2450 // cloned instructions outside of EdgeBB.
2451 BasicBlock::iterator InsertPt = EdgeBB->begin();
2452 DenseMap<Value *, Value *> TranslateMap; // Track translated values.
2453 for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) {
2454 if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
2455 TranslateMap[PN] = PN->getIncomingValueForBlock(PredBB);
2456 continue;
2457 }
2458 // Clone the instruction.
2459 Instruction *N = BBI->clone();
2460 if (BBI->hasName())
2461 N->setName(BBI->getName() + ".c");
2462
2463 // Update operands due to translation.
2464 for (User::op_iterator i = N->op_begin(), e = N->op_end(); i != e; ++i) {
2465 DenseMap<Value *, Value *>::iterator PI = TranslateMap.find(*i);
2466 if (PI != TranslateMap.end())
2467 *i = PI->second;
2468 }
2469
2470 // Check for trivial simplification.
2471 if (Value *V = SimplifyInstruction(N, {DL, nullptr, nullptr, AC})) {
2472 if (!BBI->use_empty())
2473 TranslateMap[&*BBI] = V;
2474 if (!N->mayHaveSideEffects()) {
2475 N->deleteValue(); // Instruction folded away, don't need actual inst
2476 N = nullptr;
2477 }
2478 } else {
2479 if (!BBI->use_empty())
2480 TranslateMap[&*BBI] = N;
2481 }
2482 if (N) {
2483 // Insert the new instruction into its new home.
2484 EdgeBB->getInstList().insert(InsertPt, N);
2485
2486 // Register the new instruction with the assumption cache if necessary.
2487 if (AC && match(N, m_Intrinsic<Intrinsic::assume>()))
2488 AC->registerAssumption(cast<IntrinsicInst>(N));
2489 }
2490 }
2491
2492 // Loop over all of the edges from PredBB to BB, changing them to branch
2493 // to EdgeBB instead.
2494 Instruction *PredBBTI = PredBB->getTerminator();
2495 for (unsigned i = 0, e = PredBBTI->getNumSuccessors(); i != e; ++i)
2496 if (PredBBTI->getSuccessor(i) == BB) {
2497 BB->removePredecessor(PredBB);
2498 PredBBTI->setSuccessor(i, EdgeBB);
2499 }
2500
2501 Updates.push_back({DominatorTree::Insert, PredBB, EdgeBB});
2502 Updates.push_back({DominatorTree::Delete, PredBB, BB});
2503
2504 if (DTU)
2505 DTU->applyUpdates(Updates);
2506
2507 // Recurse, simplifying any other constants.
2508 return FoldCondBranchOnPHI(BI, DTU, DL, AC) || true;
2509 }
2510
2511 return false;
2512}
2513
2514/// Given a BB that starts with the specified two-entry PHI node,
2515/// see if we can eliminate it.
2516static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI,
2517 DomTreeUpdater *DTU, const DataLayout &DL) {
2518 // Ok, this is a two entry PHI node. Check to see if this is a simple "if
2519 // statement", which has a very simple dominance structure. Basically, we
2520 // are trying to find the condition that is being branched on, which
2521 // subsequently causes this merge to happen. We really want control
2522 // dependence information for this check, but simplifycfg can't keep it up
2523 // to date, and this catches most of the cases we care about anyway.
2524 BasicBlock *BB = PN->getParent();
2525
2526 BasicBlock *IfTrue, *IfFalse;
2527 Value *IfCond = GetIfCondition(BB, IfTrue, IfFalse);
2528 if (!IfCond ||
2529 // Don't bother if the branch will be constant folded trivially.
2530 isa<ConstantInt>(IfCond))
2531 return false;
2532
2533 // Okay, we found that we can merge this two-entry phi node into a select.
2534 // Doing so would require us to fold *all* two entry phi nodes in this block.
2535 // At some point this becomes non-profitable (particularly if the target
2536 // doesn't support cmov's). Only do this transformation if there are two or
2537 // fewer PHI nodes in this block.
2538 unsigned NumPhis = 0;
2539 for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++NumPhis, ++I)
2540 if (NumPhis > 2)
2541 return false;
2542
2543 // Loop over the PHI's seeing if we can promote them all to select
2544 // instructions. While we are at it, keep track of the instructions
2545 // that need to be moved to the dominating block.
2546 SmallPtrSet<Instruction *, 4> AggressiveInsts;
2547 int BudgetRemaining =
2548 TwoEntryPHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic;
2549
2550 bool Changed = false;
2551 for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
2552 PHINode *PN = cast<PHINode>(II++);
2553 if (Value *V = SimplifyInstruction(PN, {DL, PN})) {
2554 PN->replaceAllUsesWith(V);
2555 PN->eraseFromParent();
2556 Changed = true;
2557 continue;
2558 }
2559
2560 if (!DominatesMergePoint(PN->getIncomingValue(0), BB, AggressiveInsts,
2561 BudgetRemaining, TTI) ||
2562 !DominatesMergePoint(PN->getIncomingValue(1), BB, AggressiveInsts,
2563 BudgetRemaining, TTI))
2564 return Changed;
2565 }
2566
2567 // If we folded the first phi, PN dangles at this point. Refresh it. If
2568 // we ran out of PHIs then we simplified them all.
2569 PN = dyn_cast<PHINode>(BB->begin());
2570 if (!PN)
2571 return true;
2572
2573 // Return true if at least one of these is a 'not', and another is either
2574 // a 'not' too, or a constant.
2575 auto CanHoistNotFromBothValues = [](Value *V0, Value *V1) {
2576 if (!match(V0, m_Not(m_Value())))
2577 std::swap(V0, V1);
2578 auto Invertible = m_CombineOr(m_Not(m_Value()), m_AnyIntegralConstant());
2579 return match(V0, m_Not(m_Value())) && match(V1, Invertible);
2580 };
2581
2582 // Don't fold i1 branches on PHIs which contain binary operators, unless one
2583 // of the incoming values is an 'not' and another one is freely invertible.
2584 // These can often be turned into switches and other things.
2585 if (PN->getType()->isIntegerTy(1) &&
2586 (isa<BinaryOperator>(PN->getIncomingValue(0)) ||
2587 isa<BinaryOperator>(PN->getIncomingValue(1)) ||
2588 isa<BinaryOperator>(IfCond)) &&
2589 !CanHoistNotFromBothValues(PN->getIncomingValue(0),
2590 PN->getIncomingValue(1)))
2591 return Changed;
2592
2593 // If all PHI nodes are promotable, check to make sure that all instructions
2594 // in the predecessor blocks can be promoted as well. If not, we won't be able
2595 // to get rid of the control flow, so it's not worth promoting to select
2596 // instructions.
2597 BasicBlock *DomBlock = nullptr;
2598 BasicBlock *IfBlock1 = PN->getIncomingBlock(0);
2599 BasicBlock *IfBlock2 = PN->getIncomingBlock(1);
2600 if (cast<BranchInst>(IfBlock1->getTerminator())->isConditional()) {
2601 IfBlock1 = nullptr;
2602 } else {
2603 DomBlock = *pred_begin(IfBlock1);
2604 for (BasicBlock::iterator I = IfBlock1->begin(); !I->isTerminator(); ++I)
2605 if (!AggressiveInsts.count(&*I) && !isa<DbgInfoIntrinsic>(I) &&
2606 !isa<PseudoProbeInst>(I)) {
2607 // This is not an aggressive instruction that we can promote.
2608 // Because of this, we won't be able to get rid of the control flow, so
2609 // the xform is not worth it.
2610 return Changed;
2611 }
2612 }
2613
2614 if (cast<BranchInst>(IfBlock2->getTerminator())->isConditional()) {
2615 IfBlock2 = nullptr;
2616 } else {
2617 DomBlock = *pred_begin(IfBlock2);
2618 for (BasicBlock::iterator I = IfBlock2->begin(); !I->isTerminator(); ++I)
2619 if (!AggressiveInsts.count(&*I) && !isa<DbgInfoIntrinsic>(I) &&
2620 !isa<PseudoProbeInst>(I)) {
2621 // This is not an aggressive instruction that we can promote.
2622 // Because of this, we won't be able to get rid of the control flow, so
2623 // the xform is not worth it.
2624 return Changed;
2625 }
2626 }
2627 assert(DomBlock && "Failed to find root DomBlock")((DomBlock && "Failed to find root DomBlock") ? static_cast
<void> (0) : __assert_fail ("DomBlock && \"Failed to find root DomBlock\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 2627, __PRETTY_FUNCTION__))
;
2628
2629 LLVM_DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfConddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOUND IF CONDITION! " <<
*IfCond << " T: " << IfTrue->getName() <<
" F: " << IfFalse->getName() << "\n"; } } while
(false)
2630 << " T: " << IfTrue->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOUND IF CONDITION! " <<
*IfCond << " T: " << IfTrue->getName() <<
" F: " << IfFalse->getName() << "\n"; } } while
(false)
2631 << " F: " << IfFalse->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOUND IF CONDITION! " <<
*IfCond << " T: " << IfTrue->getName() <<
" F: " << IfFalse->getName() << "\n"; } } while
(false)
;
2632
2633 // If we can still promote the PHI nodes after this gauntlet of tests,
2634 // do all of the PHI's now.
2635 Instruction *InsertPt = DomBlock->getTerminator();
2636 IRBuilder<NoFolder> Builder(InsertPt);
2637
2638 // Move all 'aggressive' instructions, which are defined in the
2639 // conditional parts of the if's up to the dominating block.
2640 if (IfBlock1)
2641 hoistAllInstructionsInto(DomBlock, InsertPt, IfBlock1);
2642 if (IfBlock2)
2643 hoistAllInstructionsInto(DomBlock, InsertPt, IfBlock2);
2644
2645 // Propagate fast-math-flags from phi nodes to replacement selects.
2646 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2647 while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
2648 if (isa<FPMathOperator>(PN))
2649 Builder.setFastMathFlags(PN->getFastMathFlags());
2650
2651 // Change the PHI node into a select instruction.
2652 Value *TrueVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfFalse);
2653 Value *FalseVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfTrue);
2654
2655 Value *Sel = Builder.CreateSelect(IfCond, TrueVal, FalseVal, "", InsertPt);
2656 PN->replaceAllUsesWith(Sel);
2657 Sel->takeName(PN);
2658 PN->eraseFromParent();
2659 }
2660
2661 // At this point, IfBlock1 and IfBlock2 are both empty, so our if statement
2662 // has been flattened. Change DomBlock to jump directly to our new block to
2663 // avoid other simplifycfg's kicking in on the diamond.
2664 Instruction *OldTI = DomBlock->getTerminator();
2665 Builder.SetInsertPoint(OldTI);
2666 Builder.CreateBr(BB);
2667
2668 SmallVector<DominatorTree::UpdateType, 3> Updates;
2669 if (DTU) {
2670 Updates.push_back({DominatorTree::Insert, DomBlock, BB});
2671 for (auto *Successor : successors(DomBlock))
2672 Updates.push_back({DominatorTree::Delete, DomBlock, Successor});
2673 }
2674
2675 OldTI->eraseFromParent();
2676 if (DTU)
2677 DTU->applyUpdates(Updates);
2678
2679 return true;
2680}
2681
2682/// If we found a conditional branch that goes to two returning blocks,
2683/// try to merge them together into one return,
2684/// introducing a select if the return values disagree.
2685bool SimplifyCFGOpt::SimplifyCondBranchToTwoReturns(BranchInst *BI,
2686 IRBuilder<> &Builder) {
2687 auto *BB = BI->getParent();
2688 assert(BI->isConditional() && "Must be a conditional branch")((BI->isConditional() && "Must be a conditional branch"
) ? static_cast<void> (0) : __assert_fail ("BI->isConditional() && \"Must be a conditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 2688, __PRETTY_FUNCTION__))
;
2689 BasicBlock *TrueSucc = BI->getSuccessor(0);
2690 BasicBlock *FalseSucc = BI->getSuccessor(1);
2691 // NOTE: destinations may match, this could be degenerate uncond branch.
2692 ReturnInst *TrueRet = cast<ReturnInst>(TrueSucc->getTerminator());
2693 ReturnInst *FalseRet = cast<ReturnInst>(FalseSucc->getTerminator());
2694
2695 // Check to ensure both blocks are empty (just a return) or optionally empty
2696 // with PHI nodes. If there are other instructions, merging would cause extra
2697 // computation on one path or the other.
2698 if (!TrueSucc->getFirstNonPHIOrDbg()->isTerminator())
2699 return false;
2700 if (!FalseSucc->getFirstNonPHIOrDbg()->isTerminator())
2701 return false;
2702
2703 Builder.SetInsertPoint(BI);
2704 // Okay, we found a branch that is going to two return nodes. If
2705 // there is no return value for this function, just change the
2706 // branch into a return.
2707 if (FalseRet->getNumOperands() == 0) {
2708 TrueSucc->removePredecessor(BB);
2709 FalseSucc->removePredecessor(BB);
2710 Builder.CreateRetVoid();
2711 EraseTerminatorAndDCECond(BI);
2712 if (DTU) {
2713 SmallVector<DominatorTree::UpdateType, 2> Updates;
2714 Updates.push_back({DominatorTree::Delete, BB, TrueSucc});
2715 if (TrueSucc != FalseSucc)
2716 Updates.push_back({DominatorTree::Delete, BB, FalseSucc});
2717 DTU->applyUpdates(Updates);
2718 }
2719 return true;
2720 }
2721
2722 // Otherwise, figure out what the true and false return values are
2723 // so we can insert a new select instruction.
2724 Value *TrueValue = TrueRet->getReturnValue();
2725 Value *FalseValue = FalseRet->getReturnValue();
2726
2727 // Unwrap any PHI nodes in the return blocks.
2728 if (PHINode *TVPN = dyn_cast_or_null<PHINode>(TrueValue))
2729 if (TVPN->getParent() == TrueSucc)
2730 TrueValue = TVPN->getIncomingValueForBlock(BB);
2731 if (PHINode *FVPN = dyn_cast_or_null<PHINode>(FalseValue))
2732 if (FVPN->getParent() == FalseSucc)
2733 FalseValue = FVPN->getIncomingValueForBlock(BB);
2734
2735 // In order for this transformation to be safe, we must be able to
2736 // unconditionally execute both operands to the return. This is
2737 // normally the case, but we could have a potentially-trapping
2738 // constant expression that prevents this transformation from being
2739 // safe.
2740 if (ConstantExpr *TCV = dyn_cast_or_null<ConstantExpr>(TrueValue))
2741 if (TCV->canTrap())
2742 return false;
2743 if (ConstantExpr *FCV = dyn_cast_or_null<ConstantExpr>(FalseValue))
2744 if (FCV->canTrap())
2745 return false;
2746
2747 // Okay, we collected all the mapped values and checked them for sanity, and
2748 // defined to really do this transformation. First, update the CFG.
2749 TrueSucc->removePredecessor(BB);
2750 FalseSucc->removePredecessor(BB);
2751
2752 // Insert select instructions where needed.
2753 Value *BrCond = BI->getCondition();
2754 if (TrueValue) {
2755 // Insert a select if the results differ.
2756 if (TrueValue == FalseValue || isa<UndefValue>(FalseValue)) {
2757 } else if (isa<UndefValue>(TrueValue)) {
2758 TrueValue = FalseValue;
2759 } else {
2760 TrueValue =
2761 Builder.CreateSelect(BrCond, TrueValue, FalseValue, "retval", BI);
2762 }
2763 }
2764
2765 Value *RI =
2766 !TrueValue ? Builder.CreateRetVoid() : Builder.CreateRet(TrueValue);
2767
2768 (void)RI;
2769
2770 LLVM_DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
<< "\n " << *BI << "\nNewRet = " <<
*RI << "\nTRUEBLOCK: " << *TrueSucc << "\nFALSEBLOCK: "
<< *FalseSucc; } } while (false)
2771 << "\n " << *BI << "\nNewRet = " << *RI << "\nTRUEBLOCK: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
<< "\n " << *BI << "\nNewRet = " <<
*RI << "\nTRUEBLOCK: " << *TrueSucc << "\nFALSEBLOCK: "
<< *FalseSucc; } } while (false)
2772 << *TrueSucc << "\nFALSEBLOCK: " << *FalseSucc)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
<< "\n " << *BI << "\nNewRet = " <<
*RI << "\nTRUEBLOCK: " << *TrueSucc << "\nFALSEBLOCK: "
<< *FalseSucc; } } while (false)
;
2773
2774 EraseTerminatorAndDCECond(BI);
2775 if (DTU) {
2776 SmallVector<DominatorTree::UpdateType, 2> Updates;
2777 Updates.push_back({DominatorTree::Delete, BB, TrueSucc});
2778 if (TrueSucc != FalseSucc)
2779 Updates.push_back({DominatorTree::Delete, BB, FalseSucc});
2780 DTU->applyUpdates(Updates);
2781 }
2782
2783 return true;
2784}
2785
2786/// Return true if the given instruction is available
2787/// in its predecessor block. If yes, the instruction will be removed.
2788static bool tryCSEWithPredecessor(Instruction *Inst, BasicBlock *PB) {
2789 if (!isa<BinaryOperator>(Inst) && !isa<CmpInst>(Inst))
2790 return false;
2791 for (Instruction &I : *PB) {
2792 Instruction *PBI = &I;
2793 // Check whether Inst and PBI generate the same value.
2794 if (Inst->isIdenticalTo(PBI)) {
2795 Inst->replaceAllUsesWith(PBI);
2796 Inst->eraseFromParent();
2797 return true;
2798 }
2799 }
2800 return false;
2801}
2802
2803/// Return true if either PBI or BI has branch weight available, and store
2804/// the weights in {Pred|Succ}{True|False}Weight. If one of PBI and BI does
2805/// not have branch weight, use 1:1 as its weight.
2806static bool extractPredSuccWeights(BranchInst *PBI, BranchInst *BI,
2807 uint64_t &PredTrueWeight,
2808 uint64_t &PredFalseWeight,
2809 uint64_t &SuccTrueWeight,
2810 uint64_t &SuccFalseWeight) {
2811 bool PredHasWeights =
2812 PBI->extractProfMetadata(PredTrueWeight, PredFalseWeight);
2813 bool SuccHasWeights =
2814 BI->extractProfMetadata(SuccTrueWeight, SuccFalseWeight);
2815 if (PredHasWeights || SuccHasWeights) {
2816 if (!PredHasWeights)
2817 PredTrueWeight = PredFalseWeight = 1;
2818 if (!SuccHasWeights)
2819 SuccTrueWeight = SuccFalseWeight = 1;
2820 return true;
2821 } else {
2822 return false;
2823 }
2824}
2825
2826/// If this basic block is simple enough, and if a predecessor branches to us
2827/// and one of our successors, fold the block into the predecessor and use
2828/// logical operations to pick the right destination.
2829bool llvm::FoldBranchToCommonDest(BranchInst *BI, DomTreeUpdater *DTU,
2830 MemorySSAUpdater *MSSAU,
2831 const TargetTransformInfo *TTI,
2832 unsigned BonusInstThreshold) {
2833 BasicBlock *BB = BI->getParent();
2834
2835 const unsigned PredCount = pred_size(BB);
2836
2837 bool Changed = false;
2838
2839 auto _ = make_scope_exit([&]() {
2840 if (Changed)
2841 ++NumFoldBranchToCommonDest;
2842 });
2843
2844 TargetTransformInfo::TargetCostKind CostKind =
2845 BB->getParent()->hasMinSize() ? TargetTransformInfo::TCK_CodeSize
1
Assuming the condition is false
2
'?' condition is false
2846 : TargetTransformInfo::TCK_SizeAndLatency;
2847
2848 Instruction *Cond = nullptr;
2849 if (BI->isConditional())
3
Calling 'BranchInst::isConditional'
6
Returning from 'BranchInst::isConditional'
7
Taking true branch
2850 Cond = dyn_cast<Instruction>(BI->getCondition());
8
Assuming the object is a 'Instruction'
2851 else {
2852 // For unconditional branch, check for a simple CFG pattern, where
2853 // BB has a single predecessor and BB's successor is also its predecessor's
2854 // successor. If such pattern exists, check for CSE between BB and its
2855 // predecessor.
2856 if (BasicBlock *PB = BB->getSinglePredecessor())
2857 if (BranchInst *PBI = dyn_cast<BranchInst>(PB->getTerminator()))
2858 if (PBI->isConditional() &&
2859 (BI->getSuccessor(0) == PBI->getSuccessor(0) ||
2860 BI->getSuccessor(0) == PBI->getSuccessor(1))) {
2861 for (auto I = BB->instructionsWithoutDebug().begin(),
2862 E = BB->instructionsWithoutDebug().end();
2863 I != E;) {
2864 Instruction *Curr = &*I++;
2865 if (isa<CmpInst>(Curr)) {
2866 Cond = Curr;
2867 break;
2868 }
2869 // Quit if we can't remove this instruction.
2870 if (!tryCSEWithPredecessor(Curr, PB))
2871 return Changed;
2872 Changed = true;
2873 }
2874 }
2875
2876 if (!Cond)
2877 return Changed;
2878 }
2879
2880 if (!Cond
8.1
'Cond' is non-null
8.1
'Cond' is non-null
8.1
'Cond' is non-null
8.1
'Cond' is non-null
8.1
'Cond' is non-null
8.1
'Cond' is non-null
|| (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
9
Assuming 'Cond' is not a 'CmpInst'
10
Assuming 'Cond' is a 'BinaryOperator'
20
Taking false branch
2881 Cond->getParent() != BB || !Cond->hasOneUse())
11
Assuming the condition is false
12
Calling 'Value::hasOneUse'
18
Returning from 'Value::hasOneUse'
19
Assuming the condition is false
2882 return Changed;
2883
2884 // Only allow this transformation if computing the condition doesn't involve
2885 // too many instructions and these involved instructions can be executed
2886 // unconditionally. We denote all involved instructions except the condition
2887 // as "bonus instructions", and only allow this transformation when the
2888 // number of the bonus instructions we'll need to create when cloning into
2889 // each predecessor does not exceed a certain threshold.
2890 unsigned NumBonusInsts = 0;
2891 for (Instruction &I : *BB) {
2892 // Don't check the branch condition comparison itself.
2893 if (&I == Cond)
2894 continue;
2895 // Ignore dbg intrinsics, and the terminator.
2896 if (isa<DbgInfoIntrinsic>(I) || isa<BranchInst>(I))
2897 continue;
2898 // I must be safe to execute unconditionally.
2899 if (!isSafeToSpeculativelyExecute(&I))
2900 return Changed;
2901
2902 // Account for the cost of duplicating this instruction into each
2903 // predecessor.
2904 NumBonusInsts += PredCount;
2905 // Early exits once we reach the limit.
2906 if (NumBonusInsts > BonusInstThreshold)
2907 return Changed;
2908 }
2909
2910 // Also, for now, all liveout uses of bonus instructions must be in PHI nodes
2911 // in successor blocks as incoming values from the bonus instructions's block,
2912 // otherwise we'll fail to update them.
2913 // FIXME: We could lift this restriction, but we need to form PHI nodes and
2914 // rewrite offending uses, but we can't do that without having a domtree.
2915 if (any_of(*BB, [BB](Instruction &I) {
21
Calling 'any_of<llvm::BasicBlock &, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
32
Returning from 'any_of<llvm::BasicBlock &, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
33
Taking false branch
2916 return any_of(I.uses(), [BB](Use &U) {
2917 auto *User = cast<Instruction>(U.getUser());
2918 if (User->getParent() == BB)
2919 return false; // Not an external use.
2920 auto *PN = dyn_cast<PHINode>(User);
2921 return !PN || PN->getIncomingBlock(U) != BB;
2922 });
2923 }))
2924 return Changed;
2925
2926 // Cond is known to be a compare or binary operator. Check to make sure that
2927 // neither operand is a potentially-trapping constant expression.
2928 if (ConstantExpr *CE
34.1
'CE' is null
34.1
'CE' is null
34.1
'CE' is null
34.1
'CE' is null
34.1
'CE' is null
34.1
'CE' is null
= dyn_cast<ConstantExpr>(Cond->getOperand(0)))
34
Assuming the object is not a 'ConstantExpr'
35
Taking false branch
2929 if (CE->canTrap())
2930 return Changed;
2931 if (ConstantExpr *CE
36.1
'CE' is null
36.1
'CE' is null
36.1
'CE' is null
36.1
'CE' is null
36.1
'CE' is null
36.1
'CE' is null
= dyn_cast<ConstantExpr>(Cond->getOperand(1)))
36
Assuming the object is not a 'ConstantExpr'
37
Taking false branch
2932 if (CE->canTrap())
2933 return Changed;
2934
2935 // Finally, don't infinitely unroll conditional loops.
2936 BasicBlock *TrueDest = BI->getSuccessor(0);
2937 BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : nullptr;
38
'?' condition is true
2938 if (TrueDest
38.1
'TrueDest' is not equal to 'BB'
38.1
'TrueDest' is not equal to 'BB'
38.1
'TrueDest' is not equal to 'BB'
38.1
'TrueDest' is not equal to 'BB'
38.1
'TrueDest' is not equal to 'BB'
38.1
'TrueDest' is not equal to 'BB'
== BB || FalseDest
38.2
'FalseDest' is not equal to 'BB'
38.2
'FalseDest' is not equal to 'BB'
38.2
'FalseDest' is not equal to 'BB'
38.2
'FalseDest' is not equal to 'BB'
38.2
'FalseDest' is not equal to 'BB'
38.2
'FalseDest' is not equal to 'BB'
== BB)
39
Taking false branch
2939 return Changed;
2940
2941 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
40
Loop condition is true. Entering loop body
2942 BasicBlock *PredBlock = *PI;
2943 BranchInst *PBI = dyn_cast<BranchInst>(PredBlock->getTerminator());
41
Assuming the object is a 'BranchInst'
2944
2945 // Check that we have two conditional branches. If there is a PHI node in
2946 // the common successor, verify that the same value flows in from both
2947 // blocks.
2948 SmallVector<PHINode *, 4> PHIs;
2949 if (!PBI
41.1
'PBI' is non-null
41.1
'PBI' is non-null
41.1
'PBI' is non-null
41.1
'PBI' is non-null
41.1
'PBI' is non-null
41.1
'PBI' is non-null
|| PBI->isUnconditional() ||
42
Calling 'BranchInst::isUnconditional'
45
Returning from 'BranchInst::isUnconditional'
2950 (BI->isConditional() && !SafeToMergeTerminators(BI, PBI)) ||
46
Calling 'BranchInst::isConditional'
48
Returning from 'BranchInst::isConditional'
49
Calling 'SafeToMergeTerminators'
53
Returning from 'SafeToMergeTerminators'
2951 (!BI->isConditional() &&
54
Calling 'BranchInst::isConditional'
56
Returning from 'BranchInst::isConditional'
2952 !isProfitableToFoldUnconditional(BI, PBI, Cond, PHIs)))
2953 continue;
2954
2955 // Determine if the two branches share a common destination.
2956 Instruction::BinaryOps Opc = Instruction::BinaryOpsEnd;
2957 bool InvertPredCond = false;
2958
2959 if (BI->isConditional()) {
57
Calling 'BranchInst::isConditional'
59
Returning from 'BranchInst::isConditional'
60
Taking true branch
2960 if (PBI->getSuccessor(0) == TrueDest) {
61
Taking true branch
2961 Opc = Instruction::Or;
2962 } else if (PBI->getSuccessor(1) == FalseDest) {
2963 Opc = Instruction::And;
2964 } else if (PBI->getSuccessor(0) == FalseDest) {
2965 Opc = Instruction::And;
2966 InvertPredCond = true;
2967 } else if (PBI->getSuccessor(1) == TrueDest) {
2968 Opc = Instruction::Or;
2969 InvertPredCond = true;
2970 } else {
2971 continue;
2972 }
2973 } else {
2974 if (PBI->getSuccessor(0) != TrueDest && PBI->getSuccessor(1) != TrueDest)
2975 continue;
2976 }
2977
2978 // Check the cost of inserting the necessary logic before performing the
2979 // transformation.
2980 if (TTI && Opc != Instruction::BinaryOpsEnd) {
62
Assuming 'TTI' is null
2981 Type *Ty = BI->getCondition()->getType();
2982 unsigned Cost = TTI->getArithmeticInstrCost(Opc, Ty, CostKind);
2983 if (InvertPredCond && (!PBI->getCondition()->hasOneUse() ||
2984 !isa<CmpInst>(PBI->getCondition())))
2985 Cost += TTI->getArithmeticInstrCost(Instruction::Xor, Ty, CostKind);
2986
2987 if (Cost > BranchFoldThreshold)
2988 continue;
2989 }
2990
2991 LLVM_DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOLDING BRANCH TO COMMON DEST:\n"
<< *PBI << *BB; } } while (false)
;
63
Assuming 'DebugFlag' is false
64
Loop condition is false. Exiting loop
2992 Changed = true;
2993
2994 SmallVector<DominatorTree::UpdateType, 3> Updates;
2995
2996 IRBuilder<> Builder(PBI);
2997 // The builder is used to create instructions to eliminate the branch in BB.
2998 // If BB's terminator has !annotation metadata, add it to the new
2999 // instructions.
3000 Builder.CollectMetadataToCopy(BB->getTerminator(),
3001 {LLVMContext::MD_annotation});
3002
3003 // If we need to invert the condition in the pred block to match, do so now.
3004 if (InvertPredCond
64.1
'InvertPredCond' is false
64.1
'InvertPredCond' is false
64.1
'InvertPredCond' is false
64.1
'InvertPredCond' is false
64.1
'InvertPredCond' is false
64.1
'InvertPredCond' is false
) {
65
Taking false branch
3005 Value *NewCond = PBI->getCondition();
3006 if (NewCond->hasOneUse() && isa<CmpInst>(NewCond)) {
3007 CmpInst *CI = cast<CmpInst>(NewCond);
3008 CI->setPredicate(CI->getInversePredicate());
3009 } else {
3010 NewCond =
3011 Builder.CreateNot(NewCond, PBI->getCondition()->getName() + ".not");
3012 }
3013
3014 PBI->setCondition(NewCond);
3015 PBI->swapSuccessors();
3016 }
3017
3018 BasicBlock *UniqueSucc =
3019 BI->isConditional()
66
'?' condition is true
3020 ? (PBI->getSuccessor(0) == BB ? TrueDest : FalseDest)
67
'?' condition is false
3021 : TrueDest;
3022
3023 // Before cloning instructions, notify the successor basic block that it
3024 // is about to have a new predecessor. This will update PHI nodes,
3025 // which will allow us to update live-out uses of bonus instructions.
3026 if (BI->isConditional())
68
Taking true branch
3027 AddPredecessorToBlock(UniqueSucc, PredBlock, BB, MSSAU);
3028
3029 // If we have bonus instructions, clone them into the predecessor block.
3030 // Note that there may be multiple predecessor blocks, so we cannot move
3031 // bonus instructions to a predecessor block.
3032 ValueToValueMapTy VMap; // maps original values to cloned values
3033 Instruction *CondInPred;
69
'CondInPred' declared without an initial value
3034 for (Instruction &BonusInst : *BB) {
3035 if (isa<DbgInfoIntrinsic>(BonusInst) || isa<BranchInst>(BonusInst))
3036 continue;
3037
3038 Instruction *NewBonusInst = BonusInst.clone();
3039
3040 if (&BonusInst == Cond)
3041 CondInPred = NewBonusInst;
3042
3043 if (PBI->getDebugLoc() != NewBonusInst->getDebugLoc()) {
3044 // Unless the instruction has the same !dbg location as the original
3045 // branch, drop it. When we fold the bonus instructions we want to make
3046 // sure we reset their debug locations in order to avoid stepping on
3047 // dead code caused by folding dead branches.
3048 NewBonusInst->setDebugLoc(DebugLoc());
3049 }
3050
3051 RemapInstruction(NewBonusInst, VMap,
3052 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
3053 VMap[&BonusInst] = NewBonusInst;
3054
3055 // If we moved a load, we cannot any longer claim any knowledge about
3056 // its potential value. The previous information might have been valid
3057 // only given the branch precondition.
3058 // For an analogous reason, we must also drop all the metadata whose
3059 // semantics we don't understand. We *can* preserve !annotation, because
3060 // it is tied to the instruction itself, not the value or position.
3061 NewBonusInst->dropUnknownNonDebugMetadata(LLVMContext::MD_annotation);
3062
3063 PredBlock->getInstList().insert(PBI->getIterator(), NewBonusInst);
3064 NewBonusInst->takeName(&BonusInst);
3065 BonusInst.setName(BonusInst.getName() + ".old");
3066 BonusInst.replaceUsesWithIf(
3067 NewBonusInst, [BB, BI, UniqueSucc, PredBlock](Use &U) {
3068 auto *User = cast<Instruction>(U.getUser());
3069 // Ignore non-external uses of bonus instructions.
3070 if (User->getParent() == BB) {
3071 assert(!isa<PHINode>(User) &&((!isa<PHINode>(User) && "Non-external users are never PHI instructions."
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(User) && \"Non-external users are never PHI instructions.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3072, __PRETTY_FUNCTION__))
3072 "Non-external users are never PHI instructions.")((!isa<PHINode>(User) && "Non-external users are never PHI instructions."
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(User) && \"Non-external users are never PHI instructions.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3072, __PRETTY_FUNCTION__))
;
3073 return false;
3074 }
3075 if (User->getParent() == PredBlock) {
3076 // The "exteral" use is in the block into which we just cloned the
3077 // bonus instruction. This means two things: 1. we are in an
3078 // unreachable block 2. the instruction is self-referencing.
3079 // So let's just rewrite it...
3080 return true;
3081 }
3082 (void)BI;
3083 assert(isa<PHINode>(User) && "All external users must be PHI's.")((isa<PHINode>(User) && "All external users must be PHI's."
) ? static_cast<void> (0) : __assert_fail ("isa<PHINode>(User) && \"All external users must be PHI's.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3083, __PRETTY_FUNCTION__))
;
3084 auto *PN = cast<PHINode>(User);
3085 assert(is_contained(successors(BB), User->getParent()) &&((is_contained(successors(BB), User->getParent()) &&
"All external users must be in successors of BB.") ? static_cast
<void> (0) : __assert_fail ("is_contained(successors(BB), User->getParent()) && \"All external users must be in successors of BB.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3086, __PRETTY_FUNCTION__))
3086 "All external users must be in successors of BB.")((is_contained(successors(BB), User->getParent()) &&
"All external users must be in successors of BB.") ? static_cast
<void> (0) : __assert_fail ("is_contained(successors(BB), User->getParent()) && \"All external users must be in successors of BB.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3086, __PRETTY_FUNCTION__))
;
3087 assert((PN->getIncomingBlock(U) == BB ||(((PN->getIncomingBlock(U) == BB || PN->getIncomingBlock
(U) == PredBlock) && "The incoming block for that incoming value external use "
"must be either the original block with bonus instructions, "
"or the new predecessor block.") ? static_cast<void> (
0) : __assert_fail ("(PN->getIncomingBlock(U) == BB || PN->getIncomingBlock(U) == PredBlock) && \"The incoming block for that incoming value external use \" \"must be either the original block with bonus instructions, \" \"or the new predecessor block.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3091, __PRETTY_FUNCTION__))
3088 PN->getIncomingBlock(U) == PredBlock) &&(((PN->getIncomingBlock(U) == BB || PN->getIncomingBlock
(U) == PredBlock) && "The incoming block for that incoming value external use "
"must be either the original block with bonus instructions, "
"or the new predecessor block.") ? static_cast<void> (
0) : __assert_fail ("(PN->getIncomingBlock(U) == BB || PN->getIncomingBlock(U) == PredBlock) && \"The incoming block for that incoming value external use \" \"must be either the original block with bonus instructions, \" \"or the new predecessor block.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3091, __PRETTY_FUNCTION__))
3089 "The incoming block for that incoming value external use "(((PN->getIncomingBlock(U) == BB || PN->getIncomingBlock
(U) == PredBlock) && "The incoming block for that incoming value external use "
"must be either the original block with bonus instructions, "
"or the new predecessor block.") ? static_cast<void> (
0) : __assert_fail ("(PN->getIncomingBlock(U) == BB || PN->getIncomingBlock(U) == PredBlock) && \"The incoming block for that incoming value external use \" \"must be either the original block with bonus instructions, \" \"or the new predecessor block.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3091, __PRETTY_FUNCTION__))
3090 "must be either the original block with bonus instructions, "(((PN->getIncomingBlock(U) == BB || PN->getIncomingBlock
(U) == PredBlock) && "The incoming block for that incoming value external use "
"must be either the original block with bonus instructions, "
"or the new predecessor block.") ? static_cast<void> (
0) : __assert_fail ("(PN->getIncomingBlock(U) == BB || PN->getIncomingBlock(U) == PredBlock) && \"The incoming block for that incoming value external use \" \"must be either the original block with bonus instructions, \" \"or the new predecessor block.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3091, __PRETTY_FUNCTION__))
3091 "or the new predecessor block.")(((PN->getIncomingBlock(U) == BB || PN->getIncomingBlock
(U) == PredBlock) && "The incoming block for that incoming value external use "
"must be either the original block with bonus instructions, "
"or the new predecessor block.") ? static_cast<void> (
0) : __assert_fail ("(PN->getIncomingBlock(U) == BB || PN->getIncomingBlock(U) == PredBlock) && \"The incoming block for that incoming value external use \" \"must be either the original block with bonus instructions, \" \"or the new predecessor block.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3091, __PRETTY_FUNCTION__))
;
3092 // UniqueSucc is the block for which we change it's predecessors,
3093 // so it is the only block in which we'll need to update PHI nodes.
3094 if (User->getParent() != UniqueSucc)
3095 return false;
3096 // Update the incoming value for the new predecessor.
3097 return PN->getIncomingBlock(U) ==
3098 (BI->isConditional() ? PredBlock : BB);
3099 });
3100 }
3101
3102 // Now that the Cond was cloned into the predecessor basic block,
3103 // or/and the two conditions together.
3104 if (BI->isConditional()) {
70
Calling 'BranchInst::isConditional'
72
Returning from 'BranchInst::isConditional'
73
Taking true branch
3105 Instruction *NewCond = cast<Instruction>(
3106 Builder.CreateBinOp(Opc, PBI->getCondition(), CondInPred, "or.cond"));
74
3rd function call argument is an uninitialized value
3107 PBI->setCondition(NewCond);
3108
3109 uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
3110 bool HasWeights =
3111 extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight,
3112 SuccTrueWeight, SuccFalseWeight);
3113 SmallVector<uint64_t, 8> NewWeights;
3114
3115 if (PBI->getSuccessor(0) == BB) {
3116 if (HasWeights) {
3117 // PBI: br i1 %x, BB, FalseDest
3118 // BI: br i1 %y, UniqueSucc, FalseDest
3119 // TrueWeight is TrueWeight for PBI * TrueWeight for BI.
3120 NewWeights.push_back(PredTrueWeight * SuccTrueWeight);
3121 // FalseWeight is FalseWeight for PBI * TotalWeight for BI +
3122 // TrueWeight for PBI * FalseWeight for BI.
3123 // We assume that total weights of a BranchInst can fit into 32 bits.
3124 // Therefore, we will not have overflow using 64-bit arithmetic.
3125 NewWeights.push_back(PredFalseWeight *
3126 (SuccFalseWeight + SuccTrueWeight) +
3127 PredTrueWeight * SuccFalseWeight);
3128 }
3129 PBI->setSuccessor(0, UniqueSucc);
3130 }
3131 if (PBI->getSuccessor(1) == BB) {
3132 if (HasWeights) {
3133 // PBI: br i1 %x, TrueDest, BB
3134 // BI: br i1 %y, TrueDest, UniqueSucc
3135 // TrueWeight is TrueWeight for PBI * TotalWeight for BI +
3136 // FalseWeight for PBI * TrueWeight for BI.
3137 NewWeights.push_back(PredTrueWeight *
3138 (SuccFalseWeight + SuccTrueWeight) +
3139 PredFalseWeight * SuccTrueWeight);
3140 // FalseWeight is FalseWeight for PBI * FalseWeight for BI.
3141 NewWeights.push_back(PredFalseWeight * SuccFalseWeight);
3142 }
3143 PBI->setSuccessor(1, UniqueSucc);
3144 }
3145 if (NewWeights.size() == 2) {
3146 // Halve the weights if any of them cannot fit in an uint32_t
3147 FitWeights(NewWeights);
3148
3149 SmallVector<uint32_t, 8> MDWeights(NewWeights.begin(),
3150 NewWeights.end());
3151 setBranchWeights(PBI, MDWeights[0], MDWeights[1]);
3152 } else
3153 PBI->setMetadata(LLVMContext::MD_prof, nullptr);
3154
3155 Updates.push_back({DominatorTree::Insert, PredBlock, UniqueSucc});
3156 Updates.push_back({DominatorTree::Delete, PredBlock, BB});
3157 } else {
3158 // Update PHI nodes in the common successors.
3159 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
3160 ConstantInt *PBI_C = cast<ConstantInt>(
3161 PHIs[i]->getIncomingValueForBlock(PBI->getParent()));
3162 assert(PBI_C->getType()->isIntegerTy(1))((PBI_C->getType()->isIntegerTy(1)) ? static_cast<void
> (0) : __assert_fail ("PBI_C->getType()->isIntegerTy(1)"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3162, __PRETTY_FUNCTION__))
;
3163 Instruction *MergedCond = nullptr;
3164 if (PBI->getSuccessor(0) == UniqueSucc) {
3165 Updates.push_back(
3166 {DominatorTree::Delete, PredBlock, PBI->getSuccessor(1)});
3167 // Create (PBI_Cond and PBI_C) or (!PBI_Cond and BI_Value)
3168 // PBI_C is true: PBI_Cond or (!PBI_Cond and BI_Value)
3169 // is false: !PBI_Cond and BI_Value
3170 Instruction *NotCond = cast<Instruction>(
3171 Builder.CreateNot(PBI->getCondition(), "not.cond"));
3172 MergedCond = cast<Instruction>(
3173 Builder.CreateBinOp(Instruction::And, NotCond, CondInPred,
3174 "and.cond"));
3175 if (PBI_C->isOne())
3176 MergedCond = cast<Instruction>(Builder.CreateBinOp(
3177 Instruction::Or, PBI->getCondition(), MergedCond, "or.cond"));
3178 } else {
3179 assert(PBI->getSuccessor(1) == UniqueSucc && "Unexpected branch")((PBI->getSuccessor(1) == UniqueSucc && "Unexpected branch"
) ? static_cast<void> (0) : __assert_fail ("PBI->getSuccessor(1) == UniqueSucc && \"Unexpected branch\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3179, __PRETTY_FUNCTION__))
;
3180 Updates.push_back(
3181 {DominatorTree::Delete, PredBlock, PBI->getSuccessor(0)});
3182 // Create (PBI_Cond and BI_Value) or (!PBI_Cond and PBI_C)
3183 // PBI_C is true: (PBI_Cond and BI_Value) or (!PBI_Cond)
3184 // is false: PBI_Cond and BI_Value
3185 MergedCond = cast<Instruction>(Builder.CreateBinOp(
3186 Instruction::And, PBI->getCondition(), CondInPred, "and.cond"));
3187 if (PBI_C->isOne()) {
3188 Instruction *NotCond = cast<Instruction>(
3189 Builder.CreateNot(PBI->getCondition(), "not.cond"));
3190 MergedCond = cast<Instruction>(Builder.CreateBinOp(
3191 Instruction::Or, NotCond, MergedCond, "or.cond"));
3192 }
3193 }
3194 // Update PHI Node.
3195 PHIs[i]->setIncomingValueForBlock(PBI->getParent(), MergedCond);
3196 }
3197
3198 // PBI is changed to branch to UniqueSucc below. Remove itself from
3199 // potential phis from all other successors.
3200 if (MSSAU)
3201 MSSAU->changeCondBranchToUnconditionalTo(PBI, UniqueSucc);
3202
3203 // Change PBI from Conditional to Unconditional.
3204 BranchInst *New_PBI = BranchInst::Create(UniqueSucc, PBI);
3205 EraseTerminatorAndDCECond(PBI, MSSAU);
3206 PBI = New_PBI;
3207 }
3208
3209 if (DTU)
3210 DTU->applyUpdates(Updates);
3211
3212 // If BI was a loop latch, it may have had associated loop metadata.
3213 // We need to copy it to the new latch, that is, PBI.
3214 if (MDNode *LoopMD = BI->getMetadata(LLVMContext::MD_loop))
3215 PBI->setMetadata(LLVMContext::MD_loop, LoopMD);
3216
3217 // TODO: If BB is reachable from all paths through PredBlock, then we
3218 // could replace PBI's branch probabilities with BI's.
3219
3220 // Copy any debug value intrinsics into the end of PredBlock.
3221 for (Instruction &I : *BB) {
3222 if (isa<DbgInfoIntrinsic>(I)) {
3223 Instruction *NewI = I.clone();
3224 RemapInstruction(NewI, VMap,
3225 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
3226 NewI->insertBefore(PBI);
3227 }
3228 }
3229
3230 return Changed;
3231 }
3232 return Changed;
3233}
3234
3235// If there is only one store in BB1 and BB2, return it, otherwise return
3236// nullptr.
3237static StoreInst *findUniqueStoreInBlocks(BasicBlock *BB1, BasicBlock *BB2) {
3238 StoreInst *S = nullptr;
3239 for (auto *BB : {BB1, BB2}) {
3240 if (!BB)
3241 continue;
3242 for (auto &I : *BB)
3243 if (auto *SI = dyn_cast<StoreInst>(&I)) {
3244 if (S)
3245 // Multiple stores seen.
3246 return nullptr;
3247 else
3248 S = SI;
3249 }
3250 }
3251 return S;
3252}
3253
3254static Value *ensureValueAvailableInSuccessor(Value *V, BasicBlock *BB,
3255 Value *AlternativeV = nullptr) {
3256 // PHI is going to be a PHI node that allows the value V that is defined in
3257 // BB to be referenced in BB's only successor.
3258 //
3259 // If AlternativeV is nullptr, the only value we care about in PHI is V. It
3260 // doesn't matter to us what the other operand is (it'll never get used). We
3261 // could just create a new PHI with an undef incoming value, but that could
3262 // increase register pressure if EarlyCSE/InstCombine can't fold it with some
3263 // other PHI. So here we directly look for some PHI in BB's successor with V
3264 // as an incoming operand. If we find one, we use it, else we create a new
3265 // one.
3266 //
3267 // If AlternativeV is not nullptr, we care about both incoming values in PHI.
3268 // PHI must be exactly: phi <ty> [ %BB, %V ], [ %OtherBB, %AlternativeV]
3269 // where OtherBB is the single other predecessor of BB's only successor.
3270 PHINode *PHI = nullptr;
3271 BasicBlock *Succ = BB->getSingleSuccessor();
3272
3273 for (auto I = Succ->begin(); isa<PHINode>(I); ++I)
3274 if (cast<PHINode>(I)->getIncomingValueForBlock(BB) == V) {
3275 PHI = cast<PHINode>(I);
3276 if (!AlternativeV)
3277 break;
3278
3279 assert(Succ->hasNPredecessors(2))((Succ->hasNPredecessors(2)) ? static_cast<void> (0)
: __assert_fail ("Succ->hasNPredecessors(2)", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3279, __PRETTY_FUNCTION__))
;
3280 auto PredI = pred_begin(Succ);
3281 BasicBlock *OtherPredBB = *PredI == BB ? *++PredI : *PredI;
3282 if (PHI->getIncomingValueForBlock(OtherPredBB) == AlternativeV)
3283 break;
3284 PHI = nullptr;
3285 }
3286 if (PHI)
3287 return PHI;
3288
3289 // If V is not an instruction defined in BB, just return it.
3290 if (!AlternativeV &&
3291 (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB))
3292 return V;
3293
3294 PHI = PHINode::Create(V->getType(), 2, "simplifycfg.merge", &Succ->front());
3295 PHI->addIncoming(V, BB);
3296 for (BasicBlock *PredBB : predecessors(Succ))
3297 if (PredBB != BB)
3298 PHI->addIncoming(
3299 AlternativeV ? AlternativeV : UndefValue::get(V->getType()), PredBB);
3300 return PHI;
3301}
3302
3303static bool mergeConditionalStoreToAddress(
3304 BasicBlock *PTB, BasicBlock *PFB, BasicBlock *QTB, BasicBlock *QFB,
3305 BasicBlock *PostBB, Value *Address, bool InvertPCond, bool InvertQCond,
3306 DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) {
3307 // For every pointer, there must be exactly two stores, one coming from
3308 // PTB or PFB, and the other from QTB or QFB. We don't support more than one
3309 // store (to any address) in PTB,PFB or QTB,QFB.
3310 // FIXME: We could relax this restriction with a bit more work and performance
3311 // testing.
3312 StoreInst *PStore = findUniqueStoreInBlocks(PTB, PFB);
3313 StoreInst *QStore = findUniqueStoreInBlocks(QTB, QFB);
3314 if (!PStore || !QStore)
3315 return false;
3316
3317 // Now check the stores are compatible.
3318 if (!QStore->isUnordered() || !PStore->isUnordered())
3319 return false;
3320
3321 // Check that sinking the store won't cause program behavior changes. Sinking
3322 // the store out of the Q blocks won't change any behavior as we're sinking
3323 // from a block to its unconditional successor. But we're moving a store from
3324 // the P blocks down through the middle block (QBI) and past both QFB and QTB.
3325 // So we need to check that there are no aliasing loads or stores in
3326 // QBI, QTB and QFB. We also need to check there are no conflicting memory
3327 // operations between PStore and the end of its parent block.
3328 //
3329 // The ideal way to do this is to query AliasAnalysis, but we don't
3330 // preserve AA currently so that is dangerous. Be super safe and just
3331 // check there are no other memory operations at all.
3332 for (auto &I : *QFB->getSinglePredecessor())
3333 if (I.mayReadOrWriteMemory())
3334 return false;
3335 for (auto &I : *QFB)
3336 if (&I != QStore && I.mayReadOrWriteMemory())
3337 return false;
3338 if (QTB)
3339 for (auto &I : *QTB)
3340 if (&I != QStore && I.mayReadOrWriteMemory())
3341 return false;
3342 for (auto I = BasicBlock::iterator(PStore), E = PStore->getParent()->end();
3343 I != E; ++I)
3344 if (&*I != PStore && I->mayReadOrWriteMemory())
3345 return false;
3346
3347 // If we're not in aggressive mode, we only optimize if we have some
3348 // confidence that by optimizing we'll allow P and/or Q to be if-converted.
3349 auto IsWorthwhile = [&](BasicBlock *BB, ArrayRef<StoreInst *> FreeStores) {
3350 if (!BB)
3351 return true;
3352 // Heuristic: if the block can be if-converted/phi-folded and the
3353 // instructions inside are all cheap (arithmetic/GEPs), it's worthwhile to
3354 // thread this store.
3355 int BudgetRemaining =
3356 PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic;
3357 for (auto &I : BB->instructionsWithoutDebug()) {
3358 // Consider terminator instruction to be free.
3359 if (I.isTerminator())
3360 continue;
3361 // If this is one the stores that we want to speculate out of this BB,
3362 // then don't count it's cost, consider it to be free.
3363 if (auto *S = dyn_cast<StoreInst>(&I))
3364 if (llvm::find(FreeStores, S))
3365 continue;
3366 // Else, we have a white-list of instructions that we are ak speculating.
3367 if (!isa<BinaryOperator>(I) && !isa<GetElementPtrInst>(I))
3368 return false; // Not in white-list - not worthwhile folding.
3369 // And finally, if this is a non-free instruction that we are okay
3370 // speculating, ensure that we consider the speculation budget.
3371 BudgetRemaining -= TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
3372 if (BudgetRemaining < 0)
3373 return false; // Eagerly refuse to fold as soon as we're out of budget.
3374 }
3375 assert(BudgetRemaining >= 0 &&((BudgetRemaining >= 0 && "When we run out of budget we will eagerly return from within the "
"per-instruction loop.") ? static_cast<void> (0) : __assert_fail
("BudgetRemaining >= 0 && \"When we run out of budget we will eagerly return from within the \" \"per-instruction loop.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3377, __PRETTY_FUNCTION__))
3376 "When we run out of budget we will eagerly return from within the "((BudgetRemaining >= 0 && "When we run out of budget we will eagerly return from within the "
"per-instruction loop.") ? static_cast<void> (0) : __assert_fail
("BudgetRemaining >= 0 && \"When we run out of budget we will eagerly return from within the \" \"per-instruction loop.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3377, __PRETTY_FUNCTION__))
3377 "per-instruction loop.")((BudgetRemaining >= 0 && "When we run out of budget we will eagerly return from within the "
"per-instruction loop.") ? static_cast<void> (0) : __assert_fail
("BudgetRemaining >= 0 && \"When we run out of budget we will eagerly return from within the \" \"per-instruction loop.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3377, __PRETTY_FUNCTION__))
;
3378 return true;
3379 };
3380
3381 const std::array<StoreInst *, 2> FreeStores = {PStore, QStore};
3382 if (!MergeCondStoresAggressively &&
3383 (!IsWorthwhile(PTB, FreeStores) || !IsWorthwhile(PFB, FreeStores) ||
3384 !IsWorthwhile(QTB, FreeStores) || !IsWorthwhile(QFB, FreeStores)))
3385 return false;
3386
3387 // If PostBB has more than two predecessors, we need to split it so we can
3388 // sink the store.
3389 if (std::next(pred_begin(PostBB), 2) != pred_end(PostBB)) {
3390 // We know that QFB's only successor is PostBB. And QFB has a single
3391 // predecessor. If QTB exists, then its only successor is also PostBB.
3392 // If QTB does not exist, then QFB's only predecessor has a conditional
3393 // branch to QFB and PostBB.
3394 BasicBlock *TruePred = QTB ? QTB : QFB->getSinglePredecessor();
3395 BasicBlock *NewBB =
3396 SplitBlockPredecessors(PostBB, {QFB, TruePred}, "condstore.split", DTU);
3397 if (!NewBB)
3398 return false;
3399 PostBB = NewBB;
3400 }
3401
3402 // OK, we're going to sink the stores to PostBB. The store has to be
3403 // conditional though, so first create the predicate.
3404 Value *PCond = cast<BranchInst>(PFB->getSinglePredecessor()->getTerminator())
3405 ->getCondition();
3406 Value *QCond = cast<BranchInst>(QFB->getSinglePredecessor()->getTerminator())
3407 ->getCondition();
3408
3409 Value *PPHI = ensureValueAvailableInSuccessor(PStore->getValueOperand(),
3410 PStore->getParent());
3411 Value *QPHI = ensureValueAvailableInSuccessor(QStore->getValueOperand(),
3412 QStore->getParent(), PPHI);
3413
3414 IRBuilder<> QB(&*PostBB->getFirstInsertionPt());
3415
3416 Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond);
3417 Value *QPred = QStore->getParent() == QTB ? QCond : QB.CreateNot(QCond);
3418
3419 if (InvertPCond)
3420 PPred = QB.CreateNot(PPred);
3421 if (InvertQCond)
3422 QPred = QB.CreateNot(QPred);
3423 Value *CombinedPred = QB.CreateOr(PPred, QPred);
3424
3425 auto *T = SplitBlockAndInsertIfThen(CombinedPred, &*QB.GetInsertPoint(),
3426 /*Unreachable=*/false,
3427 /*BranchWeights=*/nullptr, DTU);
3428 QB.SetInsertPoint(T);
3429 StoreInst *SI = cast<StoreInst>(QB.CreateStore(QPHI, Address));
3430 AAMDNodes AAMD;
3431 PStore->getAAMetadata(AAMD, /*Merge=*/false);
3432 PStore->getAAMetadata(AAMD, /*Merge=*/true);
3433 SI->setAAMetadata(AAMD);
3434 // Choose the minimum alignment. If we could prove both stores execute, we
3435 // could use biggest one. In this case, though, we only know that one of the
3436 // stores executes. And we don't know it's safe to take the alignment from a
3437 // store that doesn't execute.
3438 SI->setAlignment(std::min(PStore->getAlign(), QStore->getAlign()));
3439
3440 QStore->eraseFromParent();
3441 PStore->eraseFromParent();
3442
3443 return true;
3444}
3445
3446static bool mergeConditionalStores(BranchInst *PBI, BranchInst *QBI,
3447 DomTreeUpdater *DTU, const DataLayout &DL,
3448 const TargetTransformInfo &TTI) {
3449 // The intention here is to find diamonds or triangles (see below) where each
3450 // conditional block contains a store to the same address. Both of these
3451 // stores are conditional, so they can't be unconditionally sunk. But it may
3452 // be profitable to speculatively sink the stores into one merged store at the
3453 // end, and predicate the merged store on the union of the two conditions of
3454 // PBI and QBI.
3455 //
3456 // This can reduce the number of stores executed if both of the conditions are
3457 // true, and can allow the blocks to become small enough to be if-converted.
3458 // This optimization will also chain, so that ladders of test-and-set
3459 // sequences can be if-converted away.
3460 //
3461 // We only deal with simple diamonds or triangles:
3462 //
3463 // PBI or PBI or a combination of the two
3464 // / \ | \
3465 // PTB PFB | PFB
3466 // \ / | /
3467 // QBI QBI
3468 // / \ | \
3469 // QTB QFB | QFB
3470 // \ / | /
3471 // PostBB PostBB
3472 //
3473 // We model triangles as a type of diamond with a nullptr "true" block.
3474 // Triangles are canonicalized so that the fallthrough edge is represented by
3475 // a true condition, as in the diagram above.
3476 BasicBlock *PTB = PBI->getSuccessor(0);
3477 BasicBlock *PFB = PBI->getSuccessor(1);
3478 BasicBlock *QTB = QBI->getSuccessor(0);
3479 BasicBlock *QFB = QBI->getSuccessor(1);
3480 BasicBlock *PostBB = QFB->getSingleSuccessor();
3481
3482 // Make sure we have a good guess for PostBB. If QTB's only successor is
3483 // QFB, then QFB is a better PostBB.
3484 if (QTB->getSingleSuccessor() == QFB)
3485 PostBB = QFB;
3486
3487 // If we couldn't find a good PostBB, stop.
3488 if (!PostBB)
3489 return false;
3490
3491 bool InvertPCond = false, InvertQCond = false;
3492 // Canonicalize fallthroughs to the true branches.
3493 if (PFB == QBI->getParent()) {
3494 std::swap(PFB, PTB);
3495 InvertPCond = true;
3496 }
3497 if (QFB == PostBB) {
3498 std::swap(QFB, QTB);
3499 InvertQCond = true;
3500 }
3501
3502 // From this point on we can assume PTB or QTB may be fallthroughs but PFB
3503 // and QFB may not. Model fallthroughs as a nullptr block.
3504 if (PTB == QBI->getParent())
3505 PTB = nullptr;
3506 if (QTB == PostBB)
3507 QTB = nullptr;
3508
3509 // Legality bailouts. We must have at least the non-fallthrough blocks and
3510 // the post-dominating block, and the non-fallthroughs must only have one
3511 // predecessor.
3512 auto HasOnePredAndOneSucc = [](BasicBlock *BB, BasicBlock *P, BasicBlock *S) {
3513 return BB->getSinglePredecessor() == P && BB->getSingleSuccessor() == S;
3514 };
3515 if (!HasOnePredAndOneSucc(PFB, PBI->getParent(), QBI->getParent()) ||
3516 !HasOnePredAndOneSucc(QFB, QBI->getParent(), PostBB))
3517 return false;
3518 if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) ||
3519 (QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB)))
3520 return false;
3521 if (!QBI->getParent()->hasNUses(2))
3522 return false;
3523
3524 // OK, this is a sequence of two diamonds or triangles.
3525 // Check if there are stores in PTB or PFB that are repeated in QTB or QFB.
3526 SmallPtrSet<Value *, 4> PStoreAddresses, QStoreAddresses;
3527 for (auto *BB : {PTB, PFB}) {
3528 if (!BB)
3529 continue;
3530 for (auto &I : *BB)
3531 if (StoreInst *SI = dyn_cast<StoreInst>(&I))
3532 PStoreAddresses.insert(SI->getPointerOperand());
3533 }
3534 for (auto *BB : {QTB, QFB}) {
3535 if (!BB)
3536 continue;
3537 for (auto &I : *BB)
3538 if (StoreInst *SI = dyn_cast<StoreInst>(&I))
3539 QStoreAddresses.insert(SI->getPointerOperand());
3540 }
3541
3542 set_intersect(PStoreAddresses, QStoreAddresses);
3543 // set_intersect mutates PStoreAddresses in place. Rename it here to make it
3544 // clear what it contains.
3545 auto &CommonAddresses = PStoreAddresses;
3546
3547 bool Changed = false;
3548 for (auto *Address : CommonAddresses)
3549 Changed |=
3550 mergeConditionalStoreToAddress(PTB, PFB, QTB, QFB, PostBB, Address,
3551 InvertPCond, InvertQCond, DTU, DL, TTI);
3552 return Changed;
3553}
3554
3555/// If the previous block ended with a widenable branch, determine if reusing
3556/// the target block is profitable and legal. This will have the effect of
3557/// "widening" PBI, but doesn't require us to reason about hosting safety.
3558static bool tryWidenCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
3559 DomTreeUpdater *DTU) {
3560 // TODO: This can be generalized in two important ways:
3561 // 1) We can allow phi nodes in IfFalseBB and simply reuse all the input
3562 // values from the PBI edge.
3563 // 2) We can sink side effecting instructions into BI's fallthrough
3564 // successor provided they doesn't contribute to computation of
3565 // BI's condition.
3566 Value *CondWB, *WC;
3567 BasicBlock *IfTrueBB, *IfFalseBB;
3568 if (!parseWidenableBranch(PBI, CondWB, WC, IfTrueBB, IfFalseBB) ||
3569 IfTrueBB != BI->getParent() || !BI->getParent()->getSinglePredecessor())
3570 return false;
3571 if (!IfFalseBB->phis().empty())
3572 return false; // TODO
3573 // Use lambda to lazily compute expensive condition after cheap ones.
3574 auto NoSideEffects = [](BasicBlock &BB) {
3575 return !llvm::any_of(BB, [](const Instruction &I) {
3576 return I.mayWriteToMemory() || I.mayHaveSideEffects();
3577 });
3578 };
3579 if (BI->getSuccessor(1) != IfFalseBB && // no inf looping
3580 BI->getSuccessor(1)->getTerminatingDeoptimizeCall() && // profitability
3581 NoSideEffects(*BI->getParent())) {
3582 auto *OldSuccessor = BI->getSuccessor(1);
3583 OldSuccessor->removePredecessor(BI->getParent());
3584 BI->setSuccessor(1, IfFalseBB);
3585 if (DTU)
3586 DTU->applyUpdates(
3587 {{DominatorTree::Insert, BI->getParent(), IfFalseBB},
3588 {DominatorTree::Delete, BI->getParent(), OldSuccessor}});
3589 return true;
3590 }
3591 if (BI->getSuccessor(0) != IfFalseBB && // no inf looping
3592 BI->getSuccessor(0)->getTerminatingDeoptimizeCall() && // profitability
3593 NoSideEffects(*BI->getParent())) {
3594 auto *OldSuccessor = BI->getSuccessor(0);
3595 OldSuccessor->removePredecessor(BI->getParent());
3596 BI->setSuccessor(0, IfFalseBB);
3597 if (DTU)
3598 DTU->applyUpdates(
3599 {{DominatorTree::Insert, BI->getParent(), IfFalseBB},
3600 {DominatorTree::Delete, BI->getParent(), OldSuccessor}});
3601 return true;
3602 }
3603 return false;
3604}
3605
3606/// If we have a conditional branch as a predecessor of another block,
3607/// this function tries to simplify it. We know
3608/// that PBI and BI are both conditional branches, and BI is in one of the
3609/// successor blocks of PBI - PBI branches to BI.
3610static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
3611 DomTreeUpdater *DTU,
3612 const DataLayout &DL,
3613 const TargetTransformInfo &TTI) {
3614 assert(PBI->isConditional() && BI->isConditional())((PBI->isConditional() && BI->isConditional()) ?
static_cast<void> (0) : __assert_fail ("PBI->isConditional() && BI->isConditional()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 3614, __PRETTY_FUNCTION__))
;
3615 BasicBlock *BB = BI->getParent();
3616
3617 // If this block ends with a branch instruction, and if there is a
3618 // predecessor that ends on a branch of the same condition, make
3619 // this conditional branch redundant.
3620 if (PBI->getCondition() == BI->getCondition() &&
3621 PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
3622 // Okay, the outcome of this conditional branch is statically
3623 // knowable. If this block had a single pred, handle specially.
3624 if (BB->getSinglePredecessor()) {
3625 // Turn this into a branch on constant.
3626 bool CondIsTrue = PBI->getSuccessor(0) == BB;
3627 BI->setCondition(
3628 ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue));
3629 return true; // Nuke the branch on constant.
3630 }
3631
3632 // Otherwise, if there are multiple predecessors, insert a PHI that merges
3633 // in the constant and simplify the block result. Subsequent passes of
3634 // simplifycfg will thread the block.
3635 if (BlockIsSimpleEnoughToThreadThrough(BB)) {
3636 pred_iterator PB = pred_begin(BB), PE = pred_end(BB);
3637 PHINode *NewPN = PHINode::Create(
3638 Type::getInt1Ty(BB->getContext()), std::distance(PB, PE),
3639 BI->getCondition()->getName() + ".pr", &BB->front());
3640 // Okay, we're going to insert the PHI node. Since PBI is not the only
3641 // predecessor, compute the PHI'd conditional value for all of the preds.
3642 // Any predecessor where the condition is not computable we keep symbolic.
3643 for (pred_iterator PI = PB; PI != PE; ++PI) {
3644 BasicBlock *P = *PI;
3645 if ((PBI = dyn_cast<BranchInst>(P->getTerminator())) && PBI != BI &&
3646 PBI->isConditional() && PBI->getCondition() == BI->getCondition() &&
3647 PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
3648 bool CondIsTrue = PBI->getSuccessor(0) == BB;
3649 NewPN->addIncoming(
3650 ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue),
3651 P);
3652 } else {
3653 NewPN->addIncoming(BI->getCondition(), P);
3654 }
3655 }
3656
3657 BI->setCondition(NewPN);
3658 return true;
3659 }
3660 }
3661
3662 // If the previous block ended with a widenable branch, determine if reusing
3663 // the target block is profitable and legal. This will have the effect of
3664 // "widening" PBI, but doesn't require us to reason about hosting safety.
3665 if (tryWidenCondBranchToCondBranch(PBI, BI, DTU))
3666 return true;
3667
3668 if (auto *CE = dyn_cast<ConstantExpr>(BI->getCondition()))
3669 if (CE->canTrap())
3670 return false;
3671
3672 // If both branches are conditional and both contain stores to the same
3673 // address, remove the stores from the conditionals and create a conditional
3674 // merged store at the end.
3675 if (MergeCondStores && mergeConditionalStores(PBI, BI, DTU, DL, TTI))
3676 return true;
3677
3678 // If this is a conditional branch in an empty block, and if any
3679 // predecessors are a conditional branch to one of our destinations,
3680 // fold the conditions into logical ops and one cond br.
3681
3682 // Ignore dbg intrinsics.
3683 if (&*BB->instructionsWithoutDebug().begin() != BI)
3684 return false;
3685
3686 int PBIOp, BIOp;
3687 if (PBI->getSuccessor(0) == BI->getSuccessor(0)) {
3688 PBIOp = 0;
3689 BIOp = 0;
3690 } else if (PBI->getSuccessor(0) == BI->getSuccessor(1)) {
3691 PBIOp = 0;
3692 BIOp = 1;
3693 } else if (PBI->getSuccessor(1) == BI->getSuccessor(0)) {
3694 PBIOp = 1;
3695 BIOp = 0;
3696 } else if (PBI->getSuccessor(1) == BI->getSuccessor(1)) {
3697 PBIOp = 1;
3698 BIOp = 1;
3699 } else {
3700 return false;
3701 }
3702
3703 // Check to make sure that the other destination of this branch
3704 // isn't BB itself. If so, this is an infinite loop that will
3705 // keep getting unwound.
3706 if (PBI->getSuccessor(PBIOp) == BB)
3707 return false;
3708
3709 // Do not perform this transformation if it would require
3710 // insertion of a large number of select instructions. For targets
3711 // without predication/cmovs, this is a big pessimization.
3712
3713 // Also do not perform this transformation if any phi node in the common
3714 // destination block can trap when reached by BB or PBB (PR17073). In that
3715 // case, it would be unsafe to hoist the operation into a select instruction.
3716
3717 BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
3718 BasicBlock *RemovedDest = PBI->getSuccessor(PBIOp ^ 1);
3719 unsigned NumPhis = 0;
3720 for (BasicBlock::iterator II = CommonDest->begin(); isa<PHINode>(II);
3721 ++II, ++NumPhis) {
3722 if (NumPhis > 2) // Disable this xform.
3723 return false;
3724
3725 PHINode *PN = cast<PHINode>(II);
3726 Value *BIV = PN->getIncomingValueForBlock(BB);
3727 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(BIV))
3728 if (CE->canTrap())
3729 return false;
3730
3731 unsigned PBBIdx = PN->getBasicBlockIndex(PBI->getParent());
3732 Value *PBIV = PN->getIncomingValue(PBBIdx);
3733 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(PBIV))
3734 if (CE->canTrap())
3735 return false;
3736 }
3737
3738 // Finally, if everything is ok, fold the branches to logical ops.
3739 BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
3740
3741 LLVM_DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOLDING BRs:" << *PBI
->getParent() << "AND: " << *BI->getParent(
); } } while (false)
3742 << "AND: " << *BI->getParent())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOLDING BRs:" << *PBI
->getParent() << "AND: " << *BI->getParent(
); } } while (false)
;
3743
3744 SmallVector<DominatorTree::UpdateType, 5> Updates;
3745
3746 // If OtherDest *is* BB, then BB is a basic block with a single conditional
3747 // branch in it, where one edge (OtherDest) goes back to itself but the other
3748 // exits. We don't *know* that the program avoids the infinite loop
3749 // (even though that seems likely). If we do this xform naively, we'll end up
3750 // recursively unpeeling the loop. Since we know that (after the xform is
3751 // done) that the block *is* infinite if reached, we just make it an obviously
3752 // infinite loop with no cond branch.
3753 if (OtherDest == BB) {
3754 // Insert it at the end of the function, because it's either code,
3755 // or it won't matter if it's hot. :)
3756 BasicBlock *InfLoopBlock =
3757 BasicBlock::Create(BB->getContext(), "infloop", BB->getParent());
3758 BranchInst::Create(InfLoopBlock, InfLoopBlock);
3759 Updates.push_back({DominatorTree::Insert, InfLoopBlock, InfLoopBlock});
3760 OtherDest = InfLoopBlock;
3761 }
3762
3763 LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << *PBI->getParent()->getParent
(); } } while (false)
;
3764
3765 // BI may have other predecessors. Because of this, we leave
3766 // it alone, but modify PBI.
3767
3768 // Make sure we get to CommonDest on True&True directions.
3769 Value *PBICond = PBI->getCondition();
3770 IRBuilder<NoFolder> Builder(PBI);
3771 if (PBIOp)
3772 PBICond = Builder.CreateNot(PBICond, PBICond->getName() + ".not");
3773
3774 Value *BICond = BI->getCondition();
3775 if (BIOp)
3776 BICond = Builder.CreateNot(BICond, BICond->getName() + ".not");
3777
3778 // Merge the conditions.
3779 Value *Cond = Builder.CreateOr(PBICond, BICond, "brmerge");
3780
3781 // Modify PBI to branch on the new condition to the new dests.
3782 PBI->setCondition(Cond);
3783 PBI->setSuccessor(0, CommonDest);
3784 PBI->setSuccessor(1, OtherDest);
3785
3786 Updates.push_back({DominatorTree::Insert, PBI->getParent(), OtherDest});
3787 Updates.push_back({DominatorTree::Delete, PBI->getParent(), RemovedDest});
3788
3789 if (DTU)
3790 DTU->applyUpdates(Updates);
3791
3792 // Update branch weight for PBI.
3793 uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
3794 uint64_t PredCommon, PredOther, SuccCommon, SuccOther;
3795 bool HasWeights =
3796 extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight,
3797 SuccTrueWeight, SuccFalseWeight);
3798 if (HasWeights) {
3799 PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight;
3800 PredOther = PBIOp ? PredTrueWeight : PredFalseWeight;
3801 SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight;
3802 SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight;
3803 // The weight to CommonDest should be PredCommon * SuccTotal +
3804 // PredOther * SuccCommon.
3805 // The weight to OtherDest should be PredOther * SuccOther.
3806 uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) +
3807 PredOther * SuccCommon,
3808 PredOther * SuccOther};
3809 // Halve the weights if any of them cannot fit in an uint32_t
3810 FitWeights(NewWeights);
3811
3812 setBranchWeights(PBI, NewWeights[0], NewWeights[1]);
3813 }
3814
3815 // OtherDest may have phi nodes. If so, add an entry from PBI's
3816 // block that are identical to the entries for BI's block.
3817 AddPredecessorToBlock(OtherDest, PBI->getParent(), BB);
3818
3819 // We know that the CommonDest already had an edge from PBI to
3820 // it. If it has PHIs though, the PHIs may have different
3821 // entries for BB and PBI's BB. If so, insert a select to make
3822 // them agree.
3823 for (PHINode &PN : CommonDest->phis()) {
3824 Value *BIV = PN.getIncomingValueForBlock(BB);
3825 unsigned PBBIdx = PN.getBasicBlockIndex(PBI->getParent());
3826 Value *PBIV = PN.getIncomingValue(PBBIdx);
3827 if (BIV != PBIV) {
3828 // Insert a select in PBI to pick the right value.
3829 SelectInst *NV = cast<SelectInst>(
3830 Builder.CreateSelect(PBICond, PBIV, BIV, PBIV->getName() + ".mux"));
3831 PN.setIncomingValue(PBBIdx, NV);
3832 // Although the select has the same condition as PBI, the original branch
3833 // weights for PBI do not apply to the new select because the select's
3834 // 'logical' edges are incoming edges of the phi that is eliminated, not
3835 // the outgoing edges of PBI.
3836 if (HasWeights) {
3837 uint64_t PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight;
3838 uint64_t PredOther = PBIOp ? PredTrueWeight : PredFalseWeight;
3839 uint64_t SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight;
3840 uint64_t SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight;
3841 // The weight to PredCommonDest should be PredCommon * SuccTotal.
3842 // The weight to PredOtherDest should be PredOther * SuccCommon.
3843 uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther),
3844 PredOther * SuccCommon};
3845
3846 FitWeights(NewWeights);
3847
3848 setBranchWeights(NV, NewWeights[0], NewWeights[1]);
3849 }
3850 }
3851 }
3852
3853 LLVM_DEBUG(dbgs() << "INTO: " << *PBI->getParent())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "INTO: " << *PBI->
getParent(); } } while (false)
;
3854 LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << *PBI->getParent()->getParent
(); } } while (false)
;
3855
3856 // This basic block is probably dead. We know it has at least
3857 // one fewer predecessor.
3858 return true;
3859}
3860
3861// Simplifies a terminator by replacing it with a branch to TrueBB if Cond is
3862// true or to FalseBB if Cond is false.
3863// Takes care of updating the successors and removing the old terminator.
3864// Also makes sure not to introduce new successors by assuming that edges to
3865// non-successor TrueBBs and FalseBBs aren't reachable.
3866bool SimplifyCFGOpt::SimplifyTerminatorOnSelect(Instruction *OldTerm,
3867 Value *Cond, BasicBlock *TrueBB,
3868 BasicBlock *FalseBB,
3869 uint32_t TrueWeight,
3870 uint32_t FalseWeight) {
3871 auto *BB = OldTerm->getParent();
3872 // Remove any superfluous successor edges from the CFG.
3873 // First, figure out which successors to preserve.
3874 // If TrueBB and FalseBB are equal, only try to preserve one copy of that
3875 // successor.
3876 BasicBlock *KeepEdge1 = TrueBB;
3877 BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr;
3878
3879 SmallSetVector<BasicBlock *, 2> RemovedSuccessors;
3880
3881 // Then remove the rest.
3882 for (BasicBlock *Succ : successors(OldTerm)) {
3883 // Make sure only to keep exactly one copy of each edge.
3884 if (Succ == KeepEdge1)
3885 KeepEdge1 = nullptr;
3886 else if (Succ == KeepEdge2)
3887 KeepEdge2 = nullptr;
3888 else {
3889 Succ->removePredecessor(BB,
3890 /*KeepOneInputPHIs=*/true);
3891
3892 if (Succ != TrueBB && Succ != FalseBB)
3893 RemovedSuccessors.insert(Succ);
3894 }
3895 }
3896
3897 IRBuilder<> Builder(OldTerm);
3898 Builder.SetCurrentDebugLocation(OldTerm->getDebugLoc());
3899
3900 // Insert an appropriate new terminator.
3901 if (!KeepEdge1 && !KeepEdge2) {
3902 if (TrueBB == FalseBB) {
3903 // We were only looking for one successor, and it was present.
3904 // Create an unconditional branch to it.
3905 Builder.CreateBr(TrueBB);
3906 } else {
3907 // We found both of the successors we were looking for.
3908 // Create a conditional branch sharing the condition of the select.
3909 BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB);
3910 if (TrueWeight != FalseWeight)
3911 setBranchWeights(NewBI, TrueWeight, FalseWeight);
3912 }
3913 } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) {
3914 // Neither of the selected blocks were successors, so this
3915 // terminator must be unreachable.
3916 new UnreachableInst(OldTerm->getContext(), OldTerm);
3917 } else {
3918 // One of the selected values was a successor, but the other wasn't.
3919 // Insert an unconditional branch to the one that was found;
3920 // the edge to the one that wasn't must be unreachable.
3921 if (!KeepEdge1) {
3922 // Only TrueBB was found.
3923 Builder.CreateBr(TrueBB);
3924 } else {
3925 // Only FalseBB was found.
3926 Builder.CreateBr(FalseBB);
3927 }
3928 }
3929
3930 EraseTerminatorAndDCECond(OldTerm);
3931
3932 if (DTU) {
3933 SmallVector<DominatorTree::UpdateType, 2> Updates;
3934 Updates.reserve(RemovedSuccessors.size());
3935 for (auto *RemovedSuccessor : RemovedSuccessors)
3936 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
3937 DTU->applyUpdates(Updates);
3938 }
3939
3940 return true;
3941}
3942
3943// Replaces
3944// (switch (select cond, X, Y)) on constant X, Y
3945// with a branch - conditional if X and Y lead to distinct BBs,
3946// unconditional otherwise.
3947bool SimplifyCFGOpt::SimplifySwitchOnSelect(SwitchInst *SI,
3948 SelectInst *Select) {
3949 // Check for constant integer values in the select.
3950 ConstantInt *TrueVal = dyn_cast<ConstantInt>(Select->getTrueValue());
3951 ConstantInt *FalseVal = dyn_cast<ConstantInt>(Select->getFalseValue());
3952 if (!TrueVal || !FalseVal)
3953 return false;
3954
3955 // Find the relevant condition and destinations.
3956 Value *Condition = Select->getCondition();
3957 BasicBlock *TrueBB = SI->findCaseValue(TrueVal)->getCaseSuccessor();
3958 BasicBlock *FalseBB = SI->findCaseValue(FalseVal)->getCaseSuccessor();
3959
3960 // Get weight for TrueBB and FalseBB.
3961 uint32_t TrueWeight = 0, FalseWeight = 0;
3962 SmallVector<uint64_t, 8> Weights;
3963 bool HasWeights = HasBranchWeights(SI);
3964 if (HasWeights) {
3965 GetBranchWeights(SI, Weights);
3966 if (Weights.size() == 1 + SI->getNumCases()) {
3967 TrueWeight =
3968 (uint32_t)Weights[SI->findCaseValue(TrueVal)->getSuccessorIndex()];
3969 FalseWeight =
3970 (uint32_t)Weights[SI->findCaseValue(FalseVal)->getSuccessorIndex()];
3971 }
3972 }
3973
3974 // Perform the actual simplification.
3975 return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB, TrueWeight,
3976 FalseWeight);
3977}
3978
3979// Replaces
3980// (indirectbr (select cond, blockaddress(@fn, BlockA),
3981// blockaddress(@fn, BlockB)))
3982// with
3983// (br cond, BlockA, BlockB).
3984bool SimplifyCFGOpt::SimplifyIndirectBrOnSelect(IndirectBrInst *IBI,
3985 SelectInst *SI) {
3986 // Check that both operands of the select are block addresses.
3987 BlockAddress *TBA = dyn_cast<BlockAddress>(SI->getTrueValue());
3988 BlockAddress *FBA = dyn_cast<BlockAddress>(SI->getFalseValue());
3989 if (!TBA || !FBA)
3990 return false;
3991
3992 // Extract the actual blocks.
3993 BasicBlock *TrueBB = TBA->getBasicBlock();
3994 BasicBlock *FalseBB = FBA->getBasicBlock();
3995
3996 // Perform the actual simplification.
3997 return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB, 0,
3998 0);
3999}
4000
4001/// This is called when we find an icmp instruction
4002/// (a seteq/setne with a constant) as the only instruction in a
4003/// block that ends with an uncond branch. We are looking for a very specific
4004/// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In
4005/// this case, we merge the first two "or's of icmp" into a switch, but then the
4006/// default value goes to an uncond block with a seteq in it, we get something
4007/// like:
4008///
4009/// switch i8 %A, label %DEFAULT [ i8 1, label %end i8 2, label %end ]
4010/// DEFAULT:
4011/// %tmp = icmp eq i8 %A, 92
4012/// br label %end
4013/// end:
4014/// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ]
4015///
4016/// We prefer to split the edge to 'end' so that there is a true/false entry to
4017/// the PHI, merging the third icmp into the switch.
4018bool SimplifyCFGOpt::tryToSimplifyUncondBranchWithICmpInIt(
4019 ICmpInst *ICI, IRBuilder<> &Builder) {
4020 BasicBlock *BB = ICI->getParent();
4021
4022 // If the block has any PHIs in it or the icmp has multiple uses, it is too
4023 // complex.
4024 if (isa<PHINode>(BB->begin()) || !ICI->hasOneUse())
4025 return false;
4026
4027 Value *V = ICI->getOperand(0);
4028 ConstantInt *Cst = cast<ConstantInt>(ICI->getOperand(1));
4029
4030 // The pattern we're looking for is where our only predecessor is a switch on
4031 // 'V' and this block is the default case for the switch. In this case we can
4032 // fold the compared value into the switch to simplify things.
4033 BasicBlock *Pred = BB->getSinglePredecessor();
4034 if (!Pred || !isa<SwitchInst>(Pred->getTerminator()))
4035 return false;
4036
4037 SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
4038 if (SI->getCondition() != V)
4039 return false;
4040
4041 // If BB is reachable on a non-default case, then we simply know the value of
4042 // V in this block. Substitute it and constant fold the icmp instruction
4043 // away.
4044 if (SI->getDefaultDest() != BB) {
4045 ConstantInt *VVal = SI->findCaseDest(BB);
4046 assert(VVal && "Should have a unique destination value")((VVal && "Should have a unique destination value") ?
static_cast<void> (0) : __assert_fail ("VVal && \"Should have a unique destination value\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4046, __PRETTY_FUNCTION__))
;
4047 ICI->setOperand(0, VVal);
4048
4049 if (Value *V = SimplifyInstruction(ICI, {DL, ICI})) {
4050 ICI->replaceAllUsesWith(V);
4051 ICI->eraseFromParent();
4052 }
4053 // BB is now empty, so it is likely to simplify away.
4054 return requestResimplify();
4055 }
4056
4057 // Ok, the block is reachable from the default dest. If the constant we're
4058 // comparing exists in one of the other edges, then we can constant fold ICI
4059 // and zap it.
4060 if (SI->findCaseValue(Cst) != SI->case_default()) {
4061 Value *V;
4062 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
4063 V = ConstantInt::getFalse(BB->getContext());
4064 else
4065 V = ConstantInt::getTrue(BB->getContext());
4066
4067 ICI->replaceAllUsesWith(V);
4068 ICI->eraseFromParent();
4069 // BB is now empty, so it is likely to simplify away.
4070 return requestResimplify();
4071 }
4072
4073 // The use of the icmp has to be in the 'end' block, by the only PHI node in
4074 // the block.
4075 BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
4076 PHINode *PHIUse = dyn_cast<PHINode>(ICI->user_back());
4077 if (PHIUse == nullptr || PHIUse != &SuccBlock->front() ||
4078 isa<PHINode>(++BasicBlock::iterator(PHIUse)))
4079 return false;
4080
4081 // If the icmp is a SETEQ, then the default dest gets false, the new edge gets
4082 // true in the PHI.
4083 Constant *DefaultCst = ConstantInt::getTrue(BB->getContext());
4084 Constant *NewCst = ConstantInt::getFalse(BB->getContext());
4085
4086 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
4087 std::swap(DefaultCst, NewCst);
4088
4089 // Replace ICI (which is used by the PHI for the default value) with true or
4090 // false depending on if it is EQ or NE.
4091 ICI->replaceAllUsesWith(DefaultCst);
4092 ICI->eraseFromParent();
4093
4094 SmallVector<DominatorTree::UpdateType, 2> Updates;
4095
4096 // Okay, the switch goes to this block on a default value. Add an edge from
4097 // the switch to the merge point on the compared value.
4098 BasicBlock *NewBB =
4099 BasicBlock::Create(BB->getContext(), "switch.edge", BB->getParent(), BB);
4100 {
4101 SwitchInstProfUpdateWrapper SIW(*SI);
4102 auto W0 = SIW.getSuccessorWeight(0);
4103 SwitchInstProfUpdateWrapper::CaseWeightOpt NewW;
4104 if (W0) {
4105 NewW = ((uint64_t(*W0) + 1) >> 1);
4106 SIW.setSuccessorWeight(0, *NewW);
4107 }
4108 SIW.addCase(Cst, NewBB, NewW);
4109 Updates.push_back({DominatorTree::Insert, Pred, NewBB});
4110 }
4111
4112 // NewBB branches to the phi block, add the uncond branch and the phi entry.
4113 Builder.SetInsertPoint(NewBB);
4114 Builder.SetCurrentDebugLocation(SI->getDebugLoc());
4115 Builder.CreateBr(SuccBlock);
4116 Updates.push_back({DominatorTree::Insert, NewBB, SuccBlock});
4117 PHIUse->addIncoming(NewCst, NewBB);
4118 if (DTU)
4119 DTU->applyUpdates(Updates);
4120 return true;
4121}
4122
4123/// The specified branch is a conditional branch.
4124/// Check to see if it is branching on an or/and chain of icmp instructions, and
4125/// fold it into a switch instruction if so.
4126bool SimplifyCFGOpt::SimplifyBranchOnICmpChain(BranchInst *BI,
4127 IRBuilder<> &Builder,
4128 const DataLayout &DL) {
4129 Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
4130 if (!Cond)
4131 return false;
4132
4133 // Change br (X == 0 | X == 1), T, F into a switch instruction.
4134 // If this is a bunch of seteq's or'd together, or if it's a bunch of
4135 // 'setne's and'ed together, collect them.
4136
4137 // Try to gather values from a chain of and/or to be turned into a switch
4138 ConstantComparesGatherer ConstantCompare(Cond, DL);
4139 // Unpack the result
4140 SmallVectorImpl<ConstantInt *> &Values = ConstantCompare.Vals;
4141 Value *CompVal = ConstantCompare.CompValue;
4142 unsigned UsedICmps = ConstantCompare.UsedICmps;
4143 Value *ExtraCase = ConstantCompare.Extra;
4144
4145 // If we didn't have a multiply compared value, fail.
4146 if (!CompVal)
4147 return false;
4148
4149 // Avoid turning single icmps into a switch.
4150 if (UsedICmps <= 1)
4151 return false;
4152
4153 bool TrueWhenEqual = match(Cond, m_LogicalOr(m_Value(), m_Value()));
4154
4155 // There might be duplicate constants in the list, which the switch
4156 // instruction can't handle, remove them now.
4157 array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate);
4158 Values.erase(std::unique(Values.begin(), Values.end()), Values.end());
4159
4160 // If Extra was used, we require at least two switch values to do the
4161 // transformation. A switch with one value is just a conditional branch.
4162 if (ExtraCase && Values.size() < 2)
4163 return false;
4164
4165 // TODO: Preserve branch weight metadata, similarly to how
4166 // FoldValueComparisonIntoPredecessors preserves it.
4167
4168 // Figure out which block is which destination.
4169 BasicBlock *DefaultBB = BI->getSuccessor(1);
4170 BasicBlock *EdgeBB = BI->getSuccessor(0);
4171 if (!TrueWhenEqual)
4172 std::swap(DefaultBB, EdgeBB);
4173
4174 BasicBlock *BB = BI->getParent();
4175
4176 // MSAN does not like undefs as branch condition which can be introduced
4177 // with "explicit branch".
4178 if (ExtraCase && BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory))
4179 return false;
4180
4181 LLVM_DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Converting 'icmp' chain with "
<< Values.size() << " cases into SWITCH. BB is:\n"
<< *BB; } } while (false)
4182 << " cases into SWITCH. BB is:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Converting 'icmp' chain with "
<< Values.size() << " cases into SWITCH. BB is:\n"
<< *BB; } } while (false)
4183 << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Converting 'icmp' chain with "
<< Values.size() << " cases into SWITCH. BB is:\n"
<< *BB; } } while (false)
;
4184
4185 SmallVector<DominatorTree::UpdateType, 2> Updates;
4186
4187 // If there are any extra values that couldn't be folded into the switch
4188 // then we evaluate them with an explicit branch first. Split the block
4189 // right before the condbr to handle it.
4190 if (ExtraCase) {
4191 BasicBlock *NewBB = SplitBlock(BB, BI, DTU, /*LI=*/nullptr,
4192 /*MSSAU=*/nullptr, "switch.early.test");
4193
4194 // Remove the uncond branch added to the old block.
4195 Instruction *OldTI = BB->getTerminator();
4196 Builder.SetInsertPoint(OldTI);
4197
4198 if (TrueWhenEqual)
4199 Builder.CreateCondBr(ExtraCase, EdgeBB, NewBB);
4200 else
4201 Builder.CreateCondBr(ExtraCase, NewBB, EdgeBB);
4202
4203 OldTI->eraseFromParent();
4204
4205 Updates.push_back({DominatorTree::Insert, BB, EdgeBB});
4206
4207 // If there are PHI nodes in EdgeBB, then we need to add a new entry to them
4208 // for the edge we just added.
4209 AddPredecessorToBlock(EdgeBB, BB, NewBB);
4210
4211 LLVM_DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCasedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << " ** 'icmp' chain unhandled condition: "
<< *ExtraCase << "\nEXTRABB = " << *BB; } }
while (false)
4212 << "\nEXTRABB = " << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << " ** 'icmp' chain unhandled condition: "
<< *ExtraCase << "\nEXTRABB = " << *BB; } }
while (false)
;
4213 BB = NewBB;
4214 }
4215
4216 Builder.SetInsertPoint(BI);
4217 // Convert pointer to int before we switch.
4218 if (CompVal->getType()->isPointerTy()) {
4219 CompVal = Builder.CreatePtrToInt(
4220 CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr");
4221 }
4222
4223 // Create the new switch instruction now.
4224 SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size());
4225
4226 // Add all of the 'cases' to the switch instruction.
4227 for (unsigned i = 0, e = Values.size(); i != e; ++i)
4228 New->addCase(Values[i], EdgeBB);
4229
4230 // We added edges from PI to the EdgeBB. As such, if there were any
4231 // PHI nodes in EdgeBB, they need entries to be added corresponding to
4232 // the number of edges added.
4233 for (BasicBlock::iterator BBI = EdgeBB->begin(); isa<PHINode>(BBI); ++BBI) {
4234 PHINode *PN = cast<PHINode>(BBI);
4235 Value *InVal = PN->getIncomingValueForBlock(BB);
4236 for (unsigned i = 0, e = Values.size() - 1; i != e; ++i)
4237 PN->addIncoming(InVal, BB);
4238 }
4239
4240 // Erase the old branch instruction.
4241 EraseTerminatorAndDCECond(BI);
4242 if (DTU)
4243 DTU->applyUpdates(Updates);
4244
4245 LLVM_DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << " ** 'icmp' chain result is:\n"
<< *BB << '\n'; } } while (false)
;
4246 return true;
4247}
4248
4249bool SimplifyCFGOpt::simplifyResume(ResumeInst *RI, IRBuilder<> &Builder) {
4250 if (isa<PHINode>(RI->getValue()))
4251 return simplifyCommonResume(RI);
4252 else if (isa<LandingPadInst>(RI->getParent()->getFirstNonPHI()) &&
4253 RI->getValue() == RI->getParent()->getFirstNonPHI())
4254 // The resume must unwind the exception that caused control to branch here.
4255 return simplifySingleResume(RI);
4256
4257 return false;
4258}
4259
4260// Check if cleanup block is empty
4261static bool isCleanupBlockEmpty(iterator_range<BasicBlock::iterator> R) {
4262 for (Instruction &I : R) {
4263 auto *II = dyn_cast<IntrinsicInst>(&I);
4264 if (!II)
4265 return false;
4266
4267 Intrinsic::ID IntrinsicID = II->getIntrinsicID();
4268 switch (IntrinsicID) {
4269 case Intrinsic::dbg_declare:
4270 case Intrinsic::dbg_value:
4271 case Intrinsic::dbg_label:
4272 case Intrinsic::lifetime_end:
4273 break;
4274 default:
4275 return false;
4276 }
4277 }
4278 return true;
4279}
4280
4281// Simplify resume that is shared by several landing pads (phi of landing pad).
4282bool SimplifyCFGOpt::simplifyCommonResume(ResumeInst *RI) {
4283 BasicBlock *BB = RI->getParent();
4284
4285 // Check that there are no other instructions except for debug and lifetime
4286 // intrinsics between the phi's and resume instruction.
4287 if (!isCleanupBlockEmpty(
4288 make_range(RI->getParent()->getFirstNonPHI(), BB->getTerminator())))
4289 return false;
4290
4291 SmallSetVector<BasicBlock *, 4> TrivialUnwindBlocks;
4292 auto *PhiLPInst = cast<PHINode>(RI->getValue());
4293
4294 // Check incoming blocks to see if any of them are trivial.
4295 for (unsigned Idx = 0, End = PhiLPInst->getNumIncomingValues(); Idx != End;
4296 Idx++) {
4297 auto *IncomingBB = PhiLPInst->getIncomingBlock(Idx);
4298 auto *IncomingValue = PhiLPInst->getIncomingValue(Idx);
4299
4300 // If the block has other successors, we can not delete it because
4301 // it has other dependents.
4302 if (IncomingBB->getUniqueSuccessor() != BB)
4303 continue;
4304
4305 auto *LandingPad = dyn_cast<LandingPadInst>(IncomingBB->getFirstNonPHI());
4306 // Not the landing pad that caused the control to branch here.
4307 if (IncomingValue != LandingPad)
4308 continue;
4309
4310 if (isCleanupBlockEmpty(
4311 make_range(LandingPad->getNextNode(), IncomingBB->getTerminator())))
4312 TrivialUnwindBlocks.insert(IncomingBB);
4313 }
4314
4315 // If no trivial unwind blocks, don't do any simplifications.
4316 if (TrivialUnwindBlocks.empty())
4317 return false;
4318
4319 // Turn all invokes that unwind here into calls.
4320 for (auto *TrivialBB : TrivialUnwindBlocks) {
4321 // Blocks that will be simplified should be removed from the phi node.
4322 // Note there could be multiple edges to the resume block, and we need
4323 // to remove them all.
4324 while (PhiLPInst->getBasicBlockIndex(TrivialBB) != -1)
4325 BB->removePredecessor(TrivialBB, true);
4326
4327 for (pred_iterator PI = pred_begin(TrivialBB), PE = pred_end(TrivialBB);
4328 PI != PE;) {
4329 BasicBlock *Pred = *PI++;
4330 removeUnwindEdge(Pred, DTU);
4331 ++NumInvokes;
4332 }
4333
4334 // In each SimplifyCFG run, only the current processed block can be erased.
4335 // Otherwise, it will break the iteration of SimplifyCFG pass. So instead
4336 // of erasing TrivialBB, we only remove the branch to the common resume
4337 // block so that we can later erase the resume block since it has no
4338 // predecessors.
4339 TrivialBB->getTerminator()->eraseFromParent();
4340 new UnreachableInst(RI->getContext(), TrivialBB);
4341 if (DTU)
4342 DTU->applyUpdates({{DominatorTree::Delete, TrivialBB, BB}});
4343 }
4344
4345 // Delete the resume block if all its predecessors have been removed.
4346 if (pred_empty(BB)) {
4347 if (DTU)
4348 DTU->deleteBB(BB);
4349 else
4350 BB->eraseFromParent();
4351 }
4352
4353 return !TrivialUnwindBlocks.empty();
4354}
4355
4356// Simplify resume that is only used by a single (non-phi) landing pad.
4357bool SimplifyCFGOpt::simplifySingleResume(ResumeInst *RI) {
4358 BasicBlock *BB = RI->getParent();
4359 auto *LPInst = cast<LandingPadInst>(BB->getFirstNonPHI());
4360 assert(RI->getValue() == LPInst &&((RI->getValue() == LPInst && "Resume must unwind the exception that caused control to here"
) ? static_cast<void> (0) : __assert_fail ("RI->getValue() == LPInst && \"Resume must unwind the exception that caused control to here\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4361, __PRETTY_FUNCTION__))
4361 "Resume must unwind the exception that caused control to here")((RI->getValue() == LPInst && "Resume must unwind the exception that caused control to here"
) ? static_cast<void> (0) : __assert_fail ("RI->getValue() == LPInst && \"Resume must unwind the exception that caused control to here\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4361, __PRETTY_FUNCTION__))
;
4362
4363 // Check that there are no other instructions except for debug intrinsics.
4364 if (!isCleanupBlockEmpty(
4365 make_range<Instruction *>(LPInst->getNextNode(), RI)))
4366 return false;
4367
4368 // Turn all invokes that unwind here into calls and delete the basic block.
4369 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE;) {
4370 BasicBlock *Pred = *PI++;
4371 removeUnwindEdge(Pred, DTU);
4372 ++NumInvokes;
4373 }
4374
4375 // The landingpad is now unreachable. Zap it.
4376 if (LoopHeaders)
4377 LoopHeaders->erase(BB);
4378 if (DTU)
4379 DTU->deleteBB(BB);
4380 else
4381 BB->eraseFromParent();
4382 return true;
4383}
4384
4385static bool removeEmptyCleanup(CleanupReturnInst *RI, DomTreeUpdater *DTU) {
4386 // If this is a trivial cleanup pad that executes no instructions, it can be
4387 // eliminated. If the cleanup pad continues to the caller, any predecessor
4388 // that is an EH pad will be updated to continue to the caller and any
4389 // predecessor that terminates with an invoke instruction will have its invoke
4390 // instruction converted to a call instruction. If the cleanup pad being
4391 // simplified does not continue to the caller, each predecessor will be
4392 // updated to continue to the unwind destination of the cleanup pad being
4393 // simplified.
4394 BasicBlock *BB = RI->getParent();
4395 CleanupPadInst *CPInst = RI->getCleanupPad();
4396 if (CPInst->getParent() != BB)
4397 // This isn't an empty cleanup.
4398 return false;
4399
4400 // We cannot kill the pad if it has multiple uses. This typically arises
4401 // from unreachable basic blocks.
4402 if (!CPInst->hasOneUse())
4403 return false;
4404
4405 // Check that there are no other instructions except for benign intrinsics.
4406 if (!isCleanupBlockEmpty(
4407 make_range<Instruction *>(CPInst->getNextNode(), RI)))
4408 return false;
4409
4410 // If the cleanup return we are simplifying unwinds to the caller, this will
4411 // set UnwindDest to nullptr.
4412 BasicBlock *UnwindDest = RI->getUnwindDest();
4413 Instruction *DestEHPad = UnwindDest ? UnwindDest->getFirstNonPHI() : nullptr;
4414
4415 // We're about to remove BB from the control flow. Before we do, sink any
4416 // PHINodes into the unwind destination. Doing this before changing the
4417 // control flow avoids some potentially slow checks, since we can currently
4418 // be certain that UnwindDest and BB have no common predecessors (since they
4419 // are both EH pads).
4420 if (UnwindDest) {
4421 // First, go through the PHI nodes in UnwindDest and update any nodes that
4422 // reference the block we are removing
4423 for (BasicBlock::iterator I = UnwindDest->begin(),
4424 IE = DestEHPad->getIterator();
4425 I != IE; ++I) {
4426 PHINode *DestPN = cast<PHINode>(I);
4427
4428 int Idx = DestPN->getBasicBlockIndex(BB);
4429 // Since BB unwinds to UnwindDest, it has to be in the PHI node.
4430 assert(Idx != -1)((Idx != -1) ? static_cast<void> (0) : __assert_fail ("Idx != -1"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4430, __PRETTY_FUNCTION__))
;
4431 // This PHI node has an incoming value that corresponds to a control
4432 // path through the cleanup pad we are removing. If the incoming
4433 // value is in the cleanup pad, it must be a PHINode (because we
4434 // verified above that the block is otherwise empty). Otherwise, the
4435 // value is either a constant or a value that dominates the cleanup
4436 // pad being removed.
4437 //
4438 // Because BB and UnwindDest are both EH pads, all of their
4439 // predecessors must unwind to these blocks, and since no instruction
4440 // can have multiple unwind destinations, there will be no overlap in
4441 // incoming blocks between SrcPN and DestPN.
4442 Value *SrcVal = DestPN->getIncomingValue(Idx);
4443 PHINode *SrcPN = dyn_cast<PHINode>(SrcVal);
4444
4445 // Remove the entry for the block we are deleting.
4446 DestPN->removeIncomingValue(Idx, false);
4447
4448 if (SrcPN && SrcPN->getParent() == BB) {
4449 // If the incoming value was a PHI node in the cleanup pad we are
4450 // removing, we need to merge that PHI node's incoming values into
4451 // DestPN.
4452 for (unsigned SrcIdx = 0, SrcE = SrcPN->getNumIncomingValues();
4453 SrcIdx != SrcE; ++SrcIdx) {
4454 DestPN->addIncoming(SrcPN->getIncomingValue(SrcIdx),
4455 SrcPN->getIncomingBlock(SrcIdx));
4456 }
4457 } else {
4458 // Otherwise, the incoming value came from above BB and
4459 // so we can just reuse it. We must associate all of BB's
4460 // predecessors with this value.
4461 for (auto *pred : predecessors(BB)) {
4462 DestPN->addIncoming(SrcVal, pred);
4463 }
4464 }
4465 }
4466
4467 // Sink any remaining PHI nodes directly into UnwindDest.
4468 Instruction *InsertPt = DestEHPad;
4469 for (BasicBlock::iterator I = BB->begin(),
4470 IE = BB->getFirstNonPHI()->getIterator();
4471 I != IE;) {
4472 // The iterator must be incremented here because the instructions are
4473 // being moved to another block.
4474 PHINode *PN = cast<PHINode>(I++);
4475 if (PN->use_empty() || !PN->isUsedOutsideOfBlock(BB))
4476 // If the PHI node has no uses or all of its uses are in this basic
4477 // block (meaning they are debug or lifetime intrinsics), just leave
4478 // it. It will be erased when we erase BB below.
4479 continue;
4480
4481 // Otherwise, sink this PHI node into UnwindDest.
4482 // Any predecessors to UnwindDest which are not already represented
4483 // must be back edges which inherit the value from the path through
4484 // BB. In this case, the PHI value must reference itself.
4485 for (auto *pred : predecessors(UnwindDest))
4486 if (pred != BB)
4487 PN->addIncoming(PN, pred);
4488 PN->moveBefore(InsertPt);
4489 }
4490 }
4491
4492 std::vector<DominatorTree::UpdateType> Updates;
4493
4494 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE;) {
4495 // The iterator must be updated here because we are removing this pred.
4496 BasicBlock *PredBB = *PI++;
4497 if (UnwindDest == nullptr) {
4498 if (DTU)
4499 DTU->applyUpdates(Updates);
4500 Updates.clear();
4501 removeUnwindEdge(PredBB, DTU);
4502 ++NumInvokes;
4503 } else {
4504 Instruction *TI = PredBB->getTerminator();
4505 TI->replaceUsesOfWith(BB, UnwindDest);
4506 Updates.push_back({DominatorTree::Insert, PredBB, UnwindDest});
4507 Updates.push_back({DominatorTree::Delete, PredBB, BB});
4508 }
4509 }
4510
4511 if (DTU) {
4512 DTU->applyUpdates(Updates);
4513 DTU->deleteBB(BB);
4514 } else
4515 // The cleanup pad is now unreachable. Zap it.
4516 BB->eraseFromParent();
4517
4518 return true;
4519}
4520
4521// Try to merge two cleanuppads together.
4522static bool mergeCleanupPad(CleanupReturnInst *RI) {
4523 // Skip any cleanuprets which unwind to caller, there is nothing to merge
4524 // with.
4525 BasicBlock *UnwindDest = RI->getUnwindDest();
4526 if (!UnwindDest)
4527 return false;
4528
4529 // This cleanupret isn't the only predecessor of this cleanuppad, it wouldn't
4530 // be safe to merge without code duplication.
4531 if (UnwindDest->getSinglePredecessor() != RI->getParent())
4532 return false;
4533
4534 // Verify that our cleanuppad's unwind destination is another cleanuppad.
4535 auto *SuccessorCleanupPad = dyn_cast<CleanupPadInst>(&UnwindDest->front());
4536 if (!SuccessorCleanupPad)
4537 return false;
4538
4539 CleanupPadInst *PredecessorCleanupPad = RI->getCleanupPad();
4540 // Replace any uses of the successor cleanupad with the predecessor pad
4541 // The only cleanuppad uses should be this cleanupret, it's cleanupret and
4542 // funclet bundle operands.
4543 SuccessorCleanupPad->replaceAllUsesWith(PredecessorCleanupPad);
4544 // Remove the old cleanuppad.
4545 SuccessorCleanupPad->eraseFromParent();
4546 // Now, we simply replace the cleanupret with a branch to the unwind
4547 // destination.
4548 BranchInst::Create(UnwindDest, RI->getParent());
4549 RI->eraseFromParent();
4550
4551 return true;
4552}
4553
4554bool SimplifyCFGOpt::simplifyCleanupReturn(CleanupReturnInst *RI) {
4555 // It is possible to transiantly have an undef cleanuppad operand because we
4556 // have deleted some, but not all, dead blocks.
4557 // Eventually, this block will be deleted.
4558 if (isa<UndefValue>(RI->getOperand(0)))
4559 return false;
4560
4561 if (mergeCleanupPad(RI))
4562 return true;
4563
4564 if (removeEmptyCleanup(RI, DTU))
4565 return true;
4566
4567 return false;
4568}
4569
4570bool SimplifyCFGOpt::simplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
4571 BasicBlock *BB = RI->getParent();
4572 if (!BB->getFirstNonPHIOrDbg()->isTerminator())
4573 return false;
4574
4575 // Find predecessors that end with branches.
4576 SmallVector<BasicBlock *, 8> UncondBranchPreds;
4577 SmallVector<BranchInst *, 8> CondBranchPreds;
4578 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
4579 BasicBlock *P = *PI;
4580 Instruction *PTI = P->getTerminator();
4581 if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
4582 if (BI->isUnconditional())
4583 UncondBranchPreds.push_back(P);
4584 else
4585 CondBranchPreds.push_back(BI);
4586 }
4587 }
4588
4589 // If we found some, do the transformation!
4590 if (!UncondBranchPreds.empty() && DupRet) {
4591 while (!UncondBranchPreds.empty()) {
4592 BasicBlock *Pred = UncondBranchPreds.pop_back_val();
4593 LLVM_DEBUG(dbgs() << "FOLDING: " << *BBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOLDING: " << *BB <<
"INTO UNCOND BRANCH PRED: " << *Pred; } } while (false
)
4594 << "INTO UNCOND BRANCH PRED: " << *Pred)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "FOLDING: " << *BB <<
"INTO UNCOND BRANCH PRED: " << *Pred; } } while (false
)
;
4595 (void)FoldReturnIntoUncondBranch(RI, BB, Pred, DTU);
4596 }
4597
4598 // If we eliminated all predecessors of the block, delete the block now.
4599 if (pred_empty(BB)) {
4600 // We know there are no successors, so just nuke the block.
4601 if (LoopHeaders)
4602 LoopHeaders->erase(BB);
4603 if (DTU)
4604 DTU->deleteBB(BB);
4605 else
4606 BB->eraseFromParent();
4607 }
4608
4609 return true;
4610 }
4611
4612 // Check out all of the conditional branches going to this return
4613 // instruction. If any of them just select between returns, change the
4614 // branch itself into a select/return pair.
4615 while (!CondBranchPreds.empty()) {
4616 BranchInst *BI = CondBranchPreds.pop_back_val();
4617
4618 // Check to see if the non-BB successor is also a return block.
4619 if (isa<ReturnInst>(BI->getSuccessor(0)->getTerminator()) &&
4620 isa<ReturnInst>(BI->getSuccessor(1)->getTerminator()) &&
4621 SimplifyCondBranchToTwoReturns(BI, Builder))
4622 return true;
4623 }
4624 return false;
4625}
4626
4627bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
4628 BasicBlock *BB = UI->getParent();
4629
4630 bool Changed = false;
4631
4632 // If there are any instructions immediately before the unreachable that can
4633 // be removed, do so.
4634 while (UI->getIterator() != BB->begin()) {
4635 BasicBlock::iterator BBI = UI->getIterator();
4636 --BBI;
4637 // Do not delete instructions that can have side effects which might cause
4638 // the unreachable to not be reachable; specifically, calls and volatile
4639 // operations may have this effect.
4640 if (isa<CallInst>(BBI) && !isa<DbgInfoIntrinsic>(BBI))
4641 break;
4642
4643 if (BBI->mayHaveSideEffects()) {
4644 if (auto *SI = dyn_cast<StoreInst>(BBI)) {
4645 if (SI->isVolatile())
4646 break;
4647 } else if (auto *LI = dyn_cast<LoadInst>(BBI)) {
4648 if (LI->isVolatile())
4649 break;
4650 } else if (auto *RMWI = dyn_cast<AtomicRMWInst>(BBI)) {
4651 if (RMWI->isVolatile())
4652 break;
4653 } else if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(BBI)) {
4654 if (CXI->isVolatile())
4655 break;
4656 } else if (isa<CatchPadInst>(BBI)) {
4657 // A catchpad may invoke exception object constructors and such, which
4658 // in some languages can be arbitrary code, so be conservative by
4659 // default.
4660 // For CoreCLR, it just involves a type test, so can be removed.
4661 if (classifyEHPersonality(BB->getParent()->getPersonalityFn()) !=
4662 EHPersonality::CoreCLR)
4663 break;
4664 } else if (!isa<FenceInst>(BBI) && !isa<VAArgInst>(BBI) &&
4665 !isa<LandingPadInst>(BBI)) {
4666 break;
4667 }
4668 // Note that deleting LandingPad's here is in fact okay, although it
4669 // involves a bit of subtle reasoning. If this inst is a LandingPad,
4670 // all the predecessors of this block will be the unwind edges of Invokes,
4671 // and we can therefore guarantee this block will be erased.
4672 }
4673
4674 // Delete this instruction (any uses are guaranteed to be dead)
4675 if (!BBI->use_empty())
4676 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
4677 BBI->eraseFromParent();
4678 Changed = true;
4679 }
4680
4681 // If the unreachable instruction is the first in the block, take a gander
4682 // at all of the predecessors of this instruction, and simplify them.
4683 if (&BB->front() != UI)
4684 return Changed;
4685
4686 std::vector<DominatorTree::UpdateType> Updates;
4687
4688 SmallSetVector<BasicBlock *, 8> Preds(pred_begin(BB), pred_end(BB));
4689 for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
4690 auto *Predecessor = Preds[i];
4691 Instruction *TI = Predecessor->getTerminator();
4692 IRBuilder<> Builder(TI);
4693 if (auto *BI = dyn_cast<BranchInst>(TI)) {
4694 // We could either have a proper unconditional branch,
4695 // or a degenerate conditional branch with matching destinations.
4696 if (all_of(BI->successors(),
4697 [BB](auto *Successor) { return Successor == BB; })) {
4698 new UnreachableInst(TI->getContext(), TI);
4699 TI->eraseFromParent();
4700 Changed = true;
4701 } else {
4702 assert(BI->isConditional() && "Can't get here with an uncond branch.")((BI->isConditional() && "Can't get here with an uncond branch."
) ? static_cast<void> (0) : __assert_fail ("BI->isConditional() && \"Can't get here with an uncond branch.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4702, __PRETTY_FUNCTION__))
;
4703 Value* Cond = BI->getCondition();
4704 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&((BI->getSuccessor(0) != BI->getSuccessor(1) &&
"The destinations are guaranteed to be different here.") ? static_cast
<void> (0) : __assert_fail ("BI->getSuccessor(0) != BI->getSuccessor(1) && \"The destinations are guaranteed to be different here.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4705, __PRETTY_FUNCTION__))
4705 "The destinations are guaranteed to be different here.")((BI->getSuccessor(0) != BI->getSuccessor(1) &&
"The destinations are guaranteed to be different here.") ? static_cast
<void> (0) : __assert_fail ("BI->getSuccessor(0) != BI->getSuccessor(1) && \"The destinations are guaranteed to be different here.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4705, __PRETTY_FUNCTION__))
;
4706 if (BI->getSuccessor(0) == BB) {
4707 Builder.CreateAssumption(Builder.CreateNot(Cond));
4708 Builder.CreateBr(BI->getSuccessor(1));
4709 } else {
4710 assert(BI->getSuccessor(1) == BB && "Incorrect CFG")((BI->getSuccessor(1) == BB && "Incorrect CFG") ? static_cast
<void> (0) : __assert_fail ("BI->getSuccessor(1) == BB && \"Incorrect CFG\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4710, __PRETTY_FUNCTION__))
;
4711 Builder.CreateAssumption(Cond);
4712 Builder.CreateBr(BI->getSuccessor(0));
4713 }
4714 EraseTerminatorAndDCECond(BI);
4715 Changed = true;
4716 }
4717 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
4718 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
4719 SwitchInstProfUpdateWrapper SU(*SI);
4720 for (auto i = SU->case_begin(), e = SU->case_end(); i != e;) {
4721 if (i->getCaseSuccessor() != BB) {
4722 ++i;
4723 continue;
4724 }
4725 BB->removePredecessor(SU->getParent());
4726 i = SU.removeCase(i);
4727 e = SU->case_end();
4728 Changed = true;
4729 }
4730 // Note that the default destination can't be removed!
4731 if (SI->getDefaultDest() != BB)
4732 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
4733 } else if (auto *II = dyn_cast<InvokeInst>(TI)) {
4734 if (II->getUnwindDest() == BB) {
4735 if (DTU)
4736 DTU->applyUpdates(Updates);
4737 Updates.clear();
4738 removeUnwindEdge(TI->getParent(), DTU);
4739 Changed = true;
4740 }
4741 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4742 if (CSI->getUnwindDest() == BB) {
4743 if (DTU)
4744 DTU->applyUpdates(Updates);
4745 Updates.clear();
4746 removeUnwindEdge(TI->getParent(), DTU);
4747 Changed = true;
4748 continue;
4749 }
4750
4751 for (CatchSwitchInst::handler_iterator I = CSI->handler_begin(),
4752 E = CSI->handler_end();
4753 I != E; ++I) {
4754 if (*I == BB) {
4755 CSI->removeHandler(I);
4756 --I;
4757 --E;
4758 Changed = true;
4759 }
4760 }
4761 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
4762 if (CSI->getNumHandlers() == 0) {
4763 if (CSI->hasUnwindDest()) {
4764 // Redirect all predecessors of the block containing CatchSwitchInst
4765 // to instead branch to the CatchSwitchInst's unwind destination.
4766 for (auto *PredecessorOfPredecessor : predecessors(Predecessor)) {
4767 Updates.push_back({DominatorTree::Insert, PredecessorOfPredecessor,
4768 CSI->getUnwindDest()});
4769 Updates.push_back(
4770 {DominatorTree::Delete, PredecessorOfPredecessor, Predecessor});
4771 }
4772 Predecessor->replaceAllUsesWith(CSI->getUnwindDest());
4773 } else {
4774 // Rewrite all preds to unwind to caller (or from invoke to call).
4775 if (DTU)
4776 DTU->applyUpdates(Updates);
4777 Updates.clear();
4778 SmallVector<BasicBlock *, 8> EHPreds(predecessors(Predecessor));
4779 for (BasicBlock *EHPred : EHPreds)
4780 removeUnwindEdge(EHPred, DTU);
4781 }
4782 // The catchswitch is no longer reachable.
4783 new UnreachableInst(CSI->getContext(), CSI);
4784 CSI->eraseFromParent();
4785 Changed = true;
4786 }
4787 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4788 (void)CRI;
4789 assert(CRI->hasUnwindDest() && CRI->getUnwindDest() == BB &&((CRI->hasUnwindDest() && CRI->getUnwindDest() ==
BB && "Expected to always have an unwind to BB.") ? static_cast
<void> (0) : __assert_fail ("CRI->hasUnwindDest() && CRI->getUnwindDest() == BB && \"Expected to always have an unwind to BB.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4790, __PRETTY_FUNCTION__))
4790 "Expected to always have an unwind to BB.")((CRI->hasUnwindDest() && CRI->getUnwindDest() ==
BB && "Expected to always have an unwind to BB.") ? static_cast
<void> (0) : __assert_fail ("CRI->hasUnwindDest() && CRI->getUnwindDest() == BB && \"Expected to always have an unwind to BB.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4790, __PRETTY_FUNCTION__))
;
4791 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
4792 new UnreachableInst(TI->getContext(), TI);
4793 TI->eraseFromParent();
4794 Changed = true;
4795 }
4796 }
4797
4798 if (DTU)
4799 DTU->applyUpdates(Updates);
4800
4801 // If this block is now dead, remove it.
4802 if (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) {
4803 // We know there are no successors, so just nuke the block.
4804 if (LoopHeaders)
4805 LoopHeaders->erase(BB);
4806 if (DTU)
4807 DTU->deleteBB(BB);
4808 else
4809 BB->eraseFromParent();
4810 return true;
4811 }
4812
4813 return Changed;
4814}
4815
4816static bool CasesAreContiguous(SmallVectorImpl<ConstantInt *> &Cases) {
4817 assert(Cases.size() >= 1)((Cases.size() >= 1) ? static_cast<void> (0) : __assert_fail
("Cases.size() >= 1", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4817, __PRETTY_FUNCTION__))
;
4818
4819 array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate);
4820 for (size_t I = 1, E = Cases.size(); I != E; ++I) {
4821 if (Cases[I - 1]->getValue() != Cases[I]->getValue() + 1)
4822 return false;
4823 }
4824 return true;
4825}
4826
4827static void createUnreachableSwitchDefault(SwitchInst *Switch,
4828 DomTreeUpdater *DTU) {
4829 LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SimplifyCFG: switch default is dead.\n"
; } } while (false)
;
4830 auto *BB = Switch->getParent();
4831 BasicBlock *NewDefaultBlock = SplitBlockPredecessors(
4832 Switch->getDefaultDest(), Switch->getParent(), "", DTU);
4833 auto *OrigDefaultBlock = Switch->getDefaultDest();
4834 Switch->setDefaultDest(&*NewDefaultBlock);
4835 if (DTU)
4836 DTU->applyUpdates({{DominatorTree::Insert, BB, &*NewDefaultBlock},
4837 {DominatorTree::Delete, BB, OrigDefaultBlock}});
4838 SplitBlock(&*NewDefaultBlock, &NewDefaultBlock->front(), DTU);
4839 SmallVector<DominatorTree::UpdateType, 2> Updates;
4840 for (auto *Successor : successors(NewDefaultBlock))
4841 Updates.push_back({DominatorTree::Delete, NewDefaultBlock, Successor});
4842 auto *NewTerminator = NewDefaultBlock->getTerminator();
4843 new UnreachableInst(Switch->getContext(), NewTerminator);
4844 EraseTerminatorAndDCECond(NewTerminator);
4845 if (DTU)
4846 DTU->applyUpdates(Updates);
4847}
4848
4849/// Turn a switch with two reachable destinations into an integer range
4850/// comparison and branch.
4851bool SimplifyCFGOpt::TurnSwitchRangeIntoICmp(SwitchInst *SI,
4852 IRBuilder<> &Builder) {
4853 assert(SI->getNumCases() > 1 && "Degenerate switch?")((SI->getNumCases() > 1 && "Degenerate switch?"
) ? static_cast<void> (0) : __assert_fail ("SI->getNumCases() > 1 && \"Degenerate switch?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4853, __PRETTY_FUNCTION__))
;
4854
4855 bool HasDefault =
4856 !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
4857
4858 auto *BB = SI->getParent();
4859
4860 // Partition the cases into two sets with different destinations.
4861 BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr;
4862 BasicBlock *DestB = nullptr;
4863 SmallVector<ConstantInt *, 16> CasesA;
4864 SmallVector<ConstantInt *, 16> CasesB;
4865
4866 for (auto Case : SI->cases()) {
4867 BasicBlock *Dest = Case.getCaseSuccessor();
4868 if (!DestA)
4869 DestA = Dest;
4870 if (Dest == DestA) {
4871 CasesA.push_back(Case.getCaseValue());
4872 continue;
4873 }
4874 if (!DestB)
4875 DestB = Dest;
4876 if (Dest == DestB) {
4877 CasesB.push_back(Case.getCaseValue());
4878 continue;
4879 }
4880 return false; // More than two destinations.
4881 }
4882
4883 assert(DestA && DestB &&((DestA && DestB && "Single-destination switch should have been folded."
) ? static_cast<void> (0) : __assert_fail ("DestA && DestB && \"Single-destination switch should have been folded.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4884, __PRETTY_FUNCTION__))
4884 "Single-destination switch should have been folded.")((DestA && DestB && "Single-destination switch should have been folded."
) ? static_cast<void> (0) : __assert_fail ("DestA && DestB && \"Single-destination switch should have been folded.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4884, __PRETTY_FUNCTION__))
;
4885 assert(DestA != DestB)((DestA != DestB) ? static_cast<void> (0) : __assert_fail
("DestA != DestB", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4885, __PRETTY_FUNCTION__))
;
4886 assert(DestB != SI->getDefaultDest())((DestB != SI->getDefaultDest()) ? static_cast<void>
(0) : __assert_fail ("DestB != SI->getDefaultDest()", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4886, __PRETTY_FUNCTION__))
;
4887 assert(!CasesB.empty() && "There must be non-default cases.")((!CasesB.empty() && "There must be non-default cases."
) ? static_cast<void> (0) : __assert_fail ("!CasesB.empty() && \"There must be non-default cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4887, __PRETTY_FUNCTION__))
;
4888 assert(!CasesA.empty() || HasDefault)((!CasesA.empty() || HasDefault) ? static_cast<void> (0
) : __assert_fail ("!CasesA.empty() || HasDefault", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 4888, __PRETTY_FUNCTION__))
;
4889
4890 // Figure out if one of the sets of cases form a contiguous range.
4891 SmallVectorImpl<ConstantInt *> *ContiguousCases = nullptr;
4892 BasicBlock *ContiguousDest = nullptr;
4893 BasicBlock *OtherDest = nullptr;
4894 if (!CasesA.empty() && CasesAreContiguous(CasesA)) {
4895 ContiguousCases = &CasesA;
4896 ContiguousDest = DestA;
4897 OtherDest = DestB;
4898 } else if (CasesAreContiguous(CasesB)) {
4899 ContiguousCases = &CasesB;
4900 ContiguousDest = DestB;
4901 OtherDest = DestA;
4902 } else
4903 return false;
4904
4905 // Start building the compare and branch.
4906
4907 Constant *Offset = ConstantExpr::getNeg(ContiguousCases->back());
4908 Constant *NumCases =
4909 ConstantInt::get(Offset->getType(), ContiguousCases->size());
4910
4911 Value *Sub = SI->getCondition();
4912 if (!Offset->isNullValue())
4913 Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off");
4914
4915 Value *Cmp;
4916 // If NumCases overflowed, then all possible values jump to the successor.
4917 if (NumCases->isNullValue() && !ContiguousCases->empty())
4918 Cmp = ConstantInt::getTrue(SI->getContext());
4919 else
4920 Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
4921 BranchInst *NewBI = Builder.CreateCondBr(Cmp, ContiguousDest, OtherDest);
4922
4923 // Update weight for the newly-created conditional branch.
4924 if (HasBranchWeights(SI)) {
4925 SmallVector<uint64_t, 8> Weights;
4926 GetBranchWeights(SI, Weights);
4927 if (Weights.size() == 1 + SI->getNumCases()) {
4928 uint64_t TrueWeight = 0;
4929 uint64_t FalseWeight = 0;
4930 for (size_t I = 0, E = Weights.size(); I != E; ++I) {
4931 if (SI->getSuccessor(I) == ContiguousDest)
4932 TrueWeight += Weights[I];
4933 else
4934 FalseWeight += Weights[I];
4935 }
4936 while (TrueWeight > UINT32_MAX(4294967295U) || FalseWeight > UINT32_MAX(4294967295U)) {
4937 TrueWeight /= 2;
4938 FalseWeight /= 2;
4939 }
4940 setBranchWeights(NewBI, TrueWeight, FalseWeight);
4941 }
4942 }
4943
4944 // Prune obsolete incoming values off the successors' PHI nodes.
4945 for (auto BBI = ContiguousDest->begin(); isa<PHINode>(BBI); ++BBI) {
4946 unsigned PreviousEdges = ContiguousCases->size();
4947 if (ContiguousDest == SI->getDefaultDest())
4948 ++PreviousEdges;
4949 for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I)
4950 cast<PHINode>(BBI)->removeIncomingValue(SI->getParent());
4951 }
4952 for (auto BBI = OtherDest->begin(); isa<PHINode>(BBI); ++BBI) {
4953 unsigned PreviousEdges = SI->getNumCases() - ContiguousCases->size();
4954 if (OtherDest == SI->getDefaultDest())
4955 ++PreviousEdges;
4956 for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I)
4957 cast<PHINode>(BBI)->removeIncomingValue(SI->getParent());
4958 }
4959
4960 // Clean up the default block - it may have phis or other instructions before
4961 // the unreachable terminator.
4962 if (!HasDefault)
4963 createUnreachableSwitchDefault(SI, DTU);
4964
4965 auto *UnreachableDefault = SI->getDefaultDest();
4966
4967 // Drop the switch.
4968 SI->eraseFromParent();
4969
4970 if (!HasDefault && DTU)
4971 DTU->applyUpdates({{DominatorTree::Delete, BB, UnreachableDefault}});
4972
4973 return true;
4974}
4975
4976/// Compute masked bits for the condition of a switch
4977/// and use it to remove dead cases.
4978static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
4979 AssumptionCache *AC,
4980 const DataLayout &DL) {
4981 Value *Cond = SI->getCondition();
4982 unsigned Bits = Cond->getType()->getIntegerBitWidth();
4983 KnownBits Known = computeKnownBits(Cond, DL, 0, AC, SI);
4984
4985 // We can also eliminate cases by determining that their values are outside of
4986 // the limited range of the condition based on how many significant (non-sign)
4987 // bits are in the condition value.
4988 unsigned ExtraSignBits = ComputeNumSignBits(Cond, DL, 0, AC, SI) - 1;
4989 unsigned MaxSignificantBitsInCond = Bits - ExtraSignBits;
4990
4991 // Gather dead cases.
4992 SmallVector<ConstantInt *, 8> DeadCases;
4993 SmallMapVector<BasicBlock *, int, 8> NumPerSuccessorCases;
4994 for (auto &Case : SI->cases()) {
4995 auto *Successor = Case.getCaseSuccessor();
4996 ++NumPerSuccessorCases[Successor];
4997 const APInt &CaseVal = Case.getCaseValue()->getValue();
4998 if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) ||
4999 (CaseVal.getMinSignedBits() > MaxSignificantBitsInCond)) {
5000 DeadCases.push_back(Case.getCaseValue());
5001 --NumPerSuccessorCases[Successor];
5002 LLVM_DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SimplifyCFG: switch case "
<< CaseVal << " is dead.\n"; } } while (false)
5003 << " is dead.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "SimplifyCFG: switch case "
<< CaseVal << " is dead.\n"; } } while (false)
;
5004 }
5005 }
5006
5007 // If we can prove that the cases must cover all possible values, the
5008 // default destination becomes dead and we can remove it. If we know some
5009 // of the bits in the value, we can use that to more precisely compute the
5010 // number of possible unique case values.
5011 bool HasDefault =
5012 !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
5013 const unsigned NumUnknownBits =
5014 Bits - (Known.Zero | Known.One).countPopulation();
5015 assert(NumUnknownBits <= Bits)((NumUnknownBits <= Bits) ? static_cast<void> (0) : __assert_fail
("NumUnknownBits <= Bits", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5015, __PRETTY_FUNCTION__))
;
5016 if (HasDefault && DeadCases.empty() &&
5017 NumUnknownBits < 64 /* avoid overflow */ &&
5018 SI->getNumCases() == (1ULL << NumUnknownBits)) {
5019 createUnreachableSwitchDefault(SI, DTU);
5020 return true;
5021 }
5022
5023 if (DeadCases.empty())
5024 return false;
5025
5026 SwitchInstProfUpdateWrapper SIW(*SI);
5027 for (ConstantInt *DeadCase : DeadCases) {
5028 SwitchInst::CaseIt CaseI = SI->findCaseValue(DeadCase);
5029 assert(CaseI != SI->case_default() &&((CaseI != SI->case_default() && "Case was not found. Probably mistake in DeadCases forming."
) ? static_cast<void> (0) : __assert_fail ("CaseI != SI->case_default() && \"Case was not found. Probably mistake in DeadCases forming.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5030, __PRETTY_FUNCTION__))
5030 "Case was not found. Probably mistake in DeadCases forming.")((CaseI != SI->case_default() && "Case was not found. Probably mistake in DeadCases forming."
) ? static_cast<void> (0) : __assert_fail ("CaseI != SI->case_default() && \"Case was not found. Probably mistake in DeadCases forming.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5030, __PRETTY_FUNCTION__))
;
5031 // Prune unused values from PHI nodes.
5032 CaseI->getCaseSuccessor()->removePredecessor(SI->getParent());
5033 SIW.removeCase(CaseI);
5034 }
5035
5036 std::vector<DominatorTree::UpdateType> Updates;
5037 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
5038 if (I.second == 0)
5039 Updates.push_back({DominatorTree::Delete, SI->getParent(), I.first});
5040 if (DTU)
5041 DTU->applyUpdates(Updates);
5042
5043 return true;
5044}
5045
5046/// If BB would be eligible for simplification by
5047/// TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated
5048/// by an unconditional branch), look at the phi node for BB in the successor
5049/// block and see if the incoming value is equal to CaseValue. If so, return
5050/// the phi node, and set PhiIndex to BB's index in the phi node.
5051static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
5052 BasicBlock *BB, int *PhiIndex) {
5053 if (BB->getFirstNonPHIOrDbg() != BB->getTerminator())
5054 return nullptr; // BB must be empty to be a candidate for simplification.
5055 if (!BB->getSinglePredecessor())
5056 return nullptr; // BB must be dominated by the switch.
5057
5058 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
5059 if (!Branch || !Branch->isUnconditional())
5060 return nullptr; // Terminator must be unconditional branch.
5061
5062 BasicBlock *Succ = Branch->getSuccessor(0);
5063
5064 for (PHINode &PHI : Succ->phis()) {
5065 int Idx = PHI.getBasicBlockIndex(BB);
5066 assert(Idx >= 0 && "PHI has no entry for predecessor?")((Idx >= 0 && "PHI has no entry for predecessor?")
? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"PHI has no entry for predecessor?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5066, __PRETTY_FUNCTION__))
;
5067
5068 Value *InValue = PHI.getIncomingValue(Idx);
5069 if (InValue != CaseValue)
5070 continue;
5071
5072 *PhiIndex = Idx;
5073 return &PHI;
5074 }
5075
5076 return nullptr;
5077}
5078
5079/// Try to forward the condition of a switch instruction to a phi node
5080/// dominated by the switch, if that would mean that some of the destination
5081/// blocks of the switch can be folded away. Return true if a change is made.
5082static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
5083 using ForwardingNodesMap = DenseMap<PHINode *, SmallVector<int, 4>>;
5084
5085 ForwardingNodesMap ForwardingNodes;
5086 BasicBlock *SwitchBlock = SI->getParent();
5087 bool Changed = false;
5088 for (auto &Case : SI->cases()) {
5089 ConstantInt *CaseValue = Case.getCaseValue();
5090 BasicBlock *CaseDest = Case.getCaseSuccessor();
5091
5092 // Replace phi operands in successor blocks that are using the constant case
5093 // value rather than the switch condition variable:
5094 // switchbb:
5095 // switch i32 %x, label %default [
5096 // i32 17, label %succ
5097 // ...
5098 // succ:
5099 // %r = phi i32 ... [ 17, %switchbb ] ...
5100 // -->
5101 // %r = phi i32 ... [ %x, %switchbb ] ...
5102
5103 for (PHINode &Phi : CaseDest->phis()) {
5104 // This only works if there is exactly 1 incoming edge from the switch to
5105 // a phi. If there is >1, that means multiple cases of the switch map to 1
5106 // value in the phi, and that phi value is not the switch condition. Thus,
5107 // this transform would not make sense (the phi would be invalid because
5108 // a phi can't have different incoming values from the same block).
5109 int SwitchBBIdx = Phi.getBasicBlockIndex(SwitchBlock);
5110 if (Phi.getIncomingValue(SwitchBBIdx) == CaseValue &&
5111 count(Phi.blocks(), SwitchBlock) == 1) {
5112 Phi.setIncomingValue(SwitchBBIdx, SI->getCondition());
5113 Changed = true;
5114 }
5115 }
5116
5117 // Collect phi nodes that are indirectly using this switch's case constants.
5118 int PhiIdx;
5119 if (auto *Phi = FindPHIForConditionForwarding(CaseValue, CaseDest, &PhiIdx))
5120 ForwardingNodes[Phi].push_back(PhiIdx);
5121 }
5122
5123 for (auto &ForwardingNode : ForwardingNodes) {
5124 PHINode *Phi = ForwardingNode.first;
5125 SmallVectorImpl<int> &Indexes = ForwardingNode.second;
5126 if (Indexes.size() < 2)
5127 continue;
5128
5129 for (int Index : Indexes)
5130 Phi->setIncomingValue(Index, SI->getCondition());
5131 Changed = true;
5132 }
5133
5134 return Changed;
5135}
5136
5137/// Return true if the backend will be able to handle
5138/// initializing an array of constants like C.
5139static bool ValidLookupTableConstant(Constant *C, const TargetTransformInfo &TTI) {
5140 if (C->isThreadDependent())
5141 return false;
5142 if (C->isDLLImportDependent())
5143 return false;
5144
5145 if (!isa<ConstantFP>(C) && !isa<ConstantInt>(C) &&
5146 !isa<ConstantPointerNull>(C) && !isa<GlobalValue>(C) &&
5147 !isa<UndefValue>(C) && !isa<ConstantExpr>(C))
5148 return false;
5149
5150 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
5151 if (!CE->isGEPWithNoNotionalOverIndexing())
5152 return false;
5153 if (!ValidLookupTableConstant(CE->getOperand(0), TTI))
5154 return false;
5155 }
5156
5157 if (!TTI.shouldBuildLookupTablesForConstant(C))
5158 return false;
5159
5160 return true;
5161}
5162
5163/// If V is a Constant, return it. Otherwise, try to look up
5164/// its constant value in ConstantPool, returning 0 if it's not there.
5165static Constant *
5166LookupConstant(Value *V,
5167 const SmallDenseMap<Value *, Constant *> &ConstantPool) {
5168 if (Constant *C = dyn_cast<Constant>(V))
5169 return C;
5170 return ConstantPool.lookup(V);
5171}
5172
5173/// Try to fold instruction I into a constant. This works for
5174/// simple instructions such as binary operations where both operands are
5175/// constant or can be replaced by constants from the ConstantPool. Returns the
5176/// resulting constant on success, 0 otherwise.
5177static Constant *
5178ConstantFold(Instruction *I, const DataLayout &DL,
5179 const SmallDenseMap<Value *, Constant *> &ConstantPool) {
5180 if (SelectInst *Select = dyn_cast<SelectInst>(I)) {
5181 Constant *A = LookupConstant(Select->getCondition(), ConstantPool);
5182 if (!A)
5183 return nullptr;
5184 if (A->isAllOnesValue())
5185 return LookupConstant(Select->getTrueValue(), ConstantPool);
5186 if (A->isNullValue())
5187 return LookupConstant(Select->getFalseValue(), ConstantPool);
5188 return nullptr;
5189 }
5190
5191 SmallVector<Constant *, 4> COps;
5192 for (unsigned N = 0, E = I->getNumOperands(); N != E; ++N) {
5193 if (Constant *A = LookupConstant(I->getOperand(N), ConstantPool))
5194 COps.push_back(A);
5195 else
5196 return nullptr;
5197 }
5198
5199 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
5200 return ConstantFoldCompareInstOperands(Cmp->getPredicate(), COps[0],
5201 COps[1], DL);
5202 }
5203
5204 return ConstantFoldInstOperands(I, COps, DL);
5205}
5206
5207/// Try to determine the resulting constant values in phi nodes
5208/// at the common destination basic block, *CommonDest, for one of the case
5209/// destionations CaseDest corresponding to value CaseVal (0 for the default
5210/// case), of a switch instruction SI.
5211static bool
5212GetCaseResults(SwitchInst *SI, ConstantInt *CaseVal, BasicBlock *CaseDest,
5213 BasicBlock **CommonDest,
5214 SmallVectorImpl<std::pair<PHINode *, Constant *>> &Res,
5215 const DataLayout &DL, const TargetTransformInfo &TTI) {
5216 // The block from which we enter the common destination.
5217 BasicBlock *Pred = SI->getParent();
5218
5219 // If CaseDest is empty except for some side-effect free instructions through
5220 // which we can constant-propagate the CaseVal, continue to its successor.
5221 SmallDenseMap<Value *, Constant *> ConstantPool;
5222 ConstantPool.insert(std::make_pair(SI->getCondition(), CaseVal));
5223 for (Instruction &I :CaseDest->instructionsWithoutDebug()) {
5224 if (I.isTerminator()) {
5225 // If the terminator is a simple branch, continue to the next block.
5226 if (I.getNumSuccessors() != 1 || I.isExceptionalTerminator())
5227 return false;
5228 Pred = CaseDest;
5229 CaseDest = I.getSuccessor(0);
5230 } else if (Constant *C = ConstantFold(&I, DL, ConstantPool)) {
5231 // Instruction is side-effect free and constant.
5232
5233 // If the instruction has uses outside this block or a phi node slot for
5234 // the block, it is not safe to bypass the instruction since it would then
5235 // no longer dominate all its uses.
5236 for (auto &Use : I.uses()) {
5237 User *User = Use.getUser();
5238 if (Instruction *I = dyn_cast<Instruction>(User))
5239 if (I->getParent() == CaseDest)
5240 continue;
5241 if (PHINode *Phi = dyn_cast<PHINode>(User))
5242 if (Phi->getIncomingBlock(Use) == CaseDest)
5243 continue;
5244 return false;
5245 }
5246
5247 ConstantPool.insert(std::make_pair(&I, C));
5248 } else {
5249 break;
5250 }
5251 }
5252
5253 // If we did not have a CommonDest before, use the current one.
5254 if (!*CommonDest)
5255 *CommonDest = CaseDest;
5256 // If the destination isn't the common one, abort.
5257 if (CaseDest != *CommonDest)
5258 return false;
5259
5260 // Get the values for this case from phi nodes in the destination block.
5261 for (PHINode &PHI : (*CommonDest)->phis()) {
5262 int Idx = PHI.getBasicBlockIndex(Pred);
5263 if (Idx == -1)
5264 continue;
5265
5266 Constant *ConstVal =
5267 LookupConstant(PHI.getIncomingValue(Idx), ConstantPool);
5268 if (!ConstVal)
5269 return false;
5270
5271 // Be conservative about which kinds of constants we support.
5272 if (!ValidLookupTableConstant(ConstVal, TTI))
5273 return false;
5274
5275 Res.push_back(std::make_pair(&PHI, ConstVal));
5276 }
5277
5278 return Res.size() > 0;
5279}
5280
5281// Helper function used to add CaseVal to the list of cases that generate
5282// Result. Returns the updated number of cases that generate this result.
5283static uintptr_t MapCaseToResult(ConstantInt *CaseVal,
5284 SwitchCaseResultVectorTy &UniqueResults,
5285 Constant *Result) {
5286 for (auto &I : UniqueResults) {
5287 if (I.first == Result) {
5288 I.second.push_back(CaseVal);
5289 return I.second.size();
5290 }
5291 }
5292 UniqueResults.push_back(
5293 std::make_pair(Result, SmallVector<ConstantInt *, 4>(1, CaseVal)));
5294 return 1;
5295}
5296
5297// Helper function that initializes a map containing
5298// results for the PHI node of the common destination block for a switch
5299// instruction. Returns false if multiple PHI nodes have been found or if
5300// there is not a common destination block for the switch.
5301static bool
5302InitializeUniqueCases(SwitchInst *SI, PHINode *&PHI, BasicBlock *&CommonDest,
5303 SwitchCaseResultVectorTy &UniqueResults,
5304 Constant *&DefaultResult, const DataLayout &DL,
5305 const TargetTransformInfo &TTI,
5306 uintptr_t MaxUniqueResults, uintptr_t MaxCasesPerResult) {
5307 for (auto &I : SI->cases()) {
5308 ConstantInt *CaseVal = I.getCaseValue();
5309
5310 // Resulting value at phi nodes for this case value.
5311 SwitchCaseResultsTy Results;
5312 if (!GetCaseResults(SI, CaseVal, I.getCaseSuccessor(), &CommonDest, Results,
5313 DL, TTI))
5314 return false;
5315
5316 // Only one value per case is permitted.
5317 if (Results.size() > 1)
5318 return false;
5319
5320 // Add the case->result mapping to UniqueResults.
5321 const uintptr_t NumCasesForResult =
5322 MapCaseToResult(CaseVal, UniqueResults, Results.begin()->second);
5323
5324 // Early out if there are too many cases for this result.
5325 if (NumCasesForResult > MaxCasesPerResult)
5326 return false;
5327
5328 // Early out if there are too many unique results.
5329 if (UniqueResults.size() > MaxUniqueResults)
5330 return false;
5331
5332 // Check the PHI consistency.
5333 if (!PHI)
5334 PHI = Results[0].first;
5335 else if (PHI != Results[0].first)
5336 return false;
5337 }
5338 // Find the default result value.
5339 SmallVector<std::pair<PHINode *, Constant *>, 1> DefaultResults;
5340 BasicBlock *DefaultDest = SI->getDefaultDest();
5341 GetCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest, DefaultResults,
5342 DL, TTI);
5343 // If the default value is not found abort unless the default destination
5344 // is unreachable.
5345 DefaultResult =
5346 DefaultResults.size() == 1 ? DefaultResults.begin()->second : nullptr;
5347 if ((!DefaultResult &&
5348 !isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg())))
5349 return false;
5350
5351 return true;
5352}
5353
5354// Helper function that checks if it is possible to transform a switch with only
5355// two cases (or two cases + default) that produces a result into a select.
5356// Example:
5357// switch (a) {
5358// case 10: %0 = icmp eq i32 %a, 10
5359// return 10; %1 = select i1 %0, i32 10, i32 4
5360// case 20: ----> %2 = icmp eq i32 %a, 20
5361// return 2; %3 = select i1 %2, i32 2, i32 %1
5362// default:
5363// return 4;
5364// }
5365static Value *ConvertTwoCaseSwitch(const SwitchCaseResultVectorTy &ResultVector,
5366 Constant *DefaultResult, Value *Condition,
5367 IRBuilder<> &Builder) {
5368 assert(ResultVector.size() == 2 &&((ResultVector.size() == 2 && "We should have exactly two unique results at this point"
) ? static_cast<void> (0) : __assert_fail ("ResultVector.size() == 2 && \"We should have exactly two unique results at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5369, __PRETTY_FUNCTION__))
5369 "We should have exactly two unique results at this point")((ResultVector.size() == 2 && "We should have exactly two unique results at this point"
) ? static_cast<void> (0) : __assert_fail ("ResultVector.size() == 2 && \"We should have exactly two unique results at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5369, __PRETTY_FUNCTION__))
;
5370 // If we are selecting between only two cases transform into a simple
5371 // select or a two-way select if default is possible.
5372 if (ResultVector[0].second.size() == 1 &&
5373 ResultVector[1].second.size() == 1) {
5374 ConstantInt *const FirstCase = ResultVector[0].second[0];
5375 ConstantInt *const SecondCase = ResultVector[1].second[0];
5376
5377 bool DefaultCanTrigger = DefaultResult;
5378 Value *SelectValue = ResultVector[1].first;
5379 if (DefaultCanTrigger) {
5380 Value *const ValueCompare =
5381 Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp");
5382 SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first,
5383 DefaultResult, "switch.select");
5384 }
5385 Value *const ValueCompare =
5386 Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp");
5387 return Builder.CreateSelect(ValueCompare, ResultVector[0].first,
5388 SelectValue, "switch.select");
5389 }
5390
5391 return nullptr;
5392}
5393
5394// Helper function to cleanup a switch instruction that has been converted into
5395// a select, fixing up PHI nodes and basic blocks.
5396static void RemoveSwitchAfterSelectConversion(SwitchInst *SI, PHINode *PHI,
5397 Value *SelectValue,
5398 IRBuilder<> &Builder,
5399 DomTreeUpdater *DTU) {
5400 std::vector<DominatorTree::UpdateType> Updates;
5401
5402 BasicBlock *SelectBB = SI->getParent();
5403 BasicBlock *DestBB = PHI->getParent();
5404
5405 if (!is_contained(predecessors(DestBB), SelectBB))
5406 Updates.push_back({DominatorTree::Insert, SelectBB, DestBB});
5407 Builder.CreateBr(DestBB);
5408
5409 // Remove the switch.
5410
5411 while (PHI->getBasicBlockIndex(SelectBB) >= 0)
5412 PHI->removeIncomingValue(SelectBB);
5413 PHI->addIncoming(SelectValue, SelectBB);
5414
5415 for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) {
5416 BasicBlock *Succ = SI->getSuccessor(i);
5417
5418 if (Succ == DestBB)
5419 continue;
5420 Succ->removePredecessor(SelectBB);
5421 Updates.push_back({DominatorTree::Delete, SelectBB, Succ});
5422 }
5423 SI->eraseFromParent();
5424 if (DTU)
5425 DTU->applyUpdates(Updates);
5426}
5427
5428/// If the switch is only used to initialize one or more
5429/// phi nodes in a common successor block with only two different
5430/// constant values, replace the switch with select.
5431static bool switchToSelect(SwitchInst *SI, IRBuilder<> &Builder,
5432 DomTreeUpdater *DTU, const DataLayout &DL,
5433 const TargetTransformInfo &TTI) {
5434 Value *const Cond = SI->getCondition();
5435 PHINode *PHI = nullptr;
5436 BasicBlock *CommonDest = nullptr;
5437 Constant *DefaultResult;
5438 SwitchCaseResultVectorTy UniqueResults;
5439 // Collect all the cases that will deliver the same value from the switch.
5440 if (!InitializeUniqueCases(SI, PHI, CommonDest, UniqueResults, DefaultResult,
5441 DL, TTI, 2, 1))
5442 return false;
5443 // Selects choose between maximum two values.
5444 if (UniqueResults.size() != 2)
5445 return false;
5446 assert(PHI != nullptr && "PHI for value select not found")((PHI != nullptr && "PHI for value select not found")
? static_cast<void> (0) : __assert_fail ("PHI != nullptr && \"PHI for value select not found\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5446, __PRETTY_FUNCTION__))
;
5447
5448 Builder.SetInsertPoint(SI);
5449 Value *SelectValue =
5450 ConvertTwoCaseSwitch(UniqueResults, DefaultResult, Cond, Builder);
5451 if (SelectValue) {
5452 RemoveSwitchAfterSelectConversion(SI, PHI, SelectValue, Builder, DTU);
5453 return true;
5454 }
5455 // The switch couldn't be converted into a select.
5456 return false;
5457}
5458
5459namespace {
5460
5461/// This class represents a lookup table that can be used to replace a switch.
5462class SwitchLookupTable {
5463public:
5464 /// Create a lookup table to use as a switch replacement with the contents
5465 /// of Values, using DefaultValue to fill any holes in the table.
5466 SwitchLookupTable(
5467 Module &M, uint64_t TableSize, ConstantInt *Offset,
5468 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
5469 Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName);
5470
5471 /// Build instructions with Builder to retrieve the value at
5472 /// the position given by Index in the lookup table.
5473 Value *BuildLookup(Value *Index, IRBuilder<> &Builder);
5474
5475 /// Return true if a table with TableSize elements of
5476 /// type ElementType would fit in a target-legal register.
5477 static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize,
5478 Type *ElementType);
5479
5480private:
5481 // Depending on the contents of the table, it can be represented in
5482 // different ways.
5483 enum {
5484 // For tables where each element contains the same value, we just have to
5485 // store that single value and return it for each lookup.
5486 SingleValueKind,
5487
5488 // For tables where there is a linear relationship between table index
5489 // and values. We calculate the result with a simple multiplication
5490 // and addition instead of a table lookup.
5491 LinearMapKind,
5492
5493 // For small tables with integer elements, we can pack them into a bitmap
5494 // that fits into a target-legal register. Values are retrieved by
5495 // shift and mask operations.
5496 BitMapKind,
5497
5498 // The table is stored as an array of values. Values are retrieved by load
5499 // instructions from the table.
5500 ArrayKind
5501 } Kind;
5502
5503 // For SingleValueKind, this is the single value.
5504 Constant *SingleValue = nullptr;
5505
5506 // For BitMapKind, this is the bitmap.
5507 ConstantInt *BitMap = nullptr;
5508 IntegerType *BitMapElementTy = nullptr;
5509
5510 // For LinearMapKind, these are the constants used to derive the value.
5511 ConstantInt *LinearOffset = nullptr;
5512 ConstantInt *LinearMultiplier = nullptr;
5513
5514 // For ArrayKind, this is the array.
5515 GlobalVariable *Array = nullptr;
5516};
5517
5518} // end anonymous namespace
5519
5520SwitchLookupTable::SwitchLookupTable(
5521 Module &M, uint64_t TableSize, ConstantInt *Offset,
5522 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
5523 Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName) {
5524 assert(Values.size() && "Can't build lookup table without values!")((Values.size() && "Can't build lookup table without values!"
) ? static_cast<void> (0) : __assert_fail ("Values.size() && \"Can't build lookup table without values!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5524, __PRETTY_FUNCTION__))
;
5525 assert(TableSize >= Values.size() && "Can't fit values in table!")((TableSize >= Values.size() && "Can't fit values in table!"
) ? static_cast<void> (0) : __assert_fail ("TableSize >= Values.size() && \"Can't fit values in table!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5525, __PRETTY_FUNCTION__))
;
5526
5527 // If all values in the table are equal, this is that value.
5528 SingleValue = Values.begin()->second;
5529
5530 Type *ValueType = Values.begin()->second->getType();
5531
5532 // Build up the table contents.
5533 SmallVector<Constant *, 64> TableContents(TableSize);
5534 for (size_t I = 0, E = Values.size(); I != E; ++I) {
5535 ConstantInt *CaseVal = Values[I].first;
5536 Constant *CaseRes = Values[I].second;
5537 assert(CaseRes->getType() == ValueType)((CaseRes->getType() == ValueType) ? static_cast<void>
(0) : __assert_fail ("CaseRes->getType() == ValueType", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5537, __PRETTY_FUNCTION__))
;
5538
5539 uint64_t Idx = (CaseVal->getValue() - Offset->getValue()).getLimitedValue();
5540 TableContents[Idx] = CaseRes;
5541
5542 if (CaseRes != SingleValue)
5543 SingleValue = nullptr;
5544 }
5545
5546 // Fill in any holes in the table with the default result.
5547 if (Values.size() < TableSize) {
5548 assert(DefaultValue &&((DefaultValue && "Need a default value to fill the lookup table holes."
) ? static_cast<void> (0) : __assert_fail ("DefaultValue && \"Need a default value to fill the lookup table holes.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5549, __PRETTY_FUNCTION__))
5549 "Need a default value to fill the lookup table holes.")((DefaultValue && "Need a default value to fill the lookup table holes."
) ? static_cast<void> (0) : __assert_fail ("DefaultValue && \"Need a default value to fill the lookup table holes.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5549, __PRETTY_FUNCTION__))
;
5550 assert(DefaultValue->getType() == ValueType)((DefaultValue->getType() == ValueType) ? static_cast<void
> (0) : __assert_fail ("DefaultValue->getType() == ValueType"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5550, __PRETTY_FUNCTION__))
;
5551 for (uint64_t I = 0; I < TableSize; ++I) {
5552 if (!TableContents[I])
5553 TableContents[I] = DefaultValue;
5554 }
5555
5556 if (DefaultValue != SingleValue)
5557 SingleValue = nullptr;
5558 }
5559
5560 // If each element in the table contains the same value, we only need to store
5561 // that single value.
5562 if (SingleValue) {
5563 Kind = SingleValueKind;
5564 return;
5565 }
5566
5567 // Check if we can derive the value with a linear transformation from the
5568 // table index.
5569 if (isa<IntegerType>(ValueType)) {
5570 bool LinearMappingPossible = true;
5571 APInt PrevVal;
5572 APInt DistToPrev;
5573 assert(TableSize >= 2 && "Should be a SingleValue table.")((TableSize >= 2 && "Should be a SingleValue table."
) ? static_cast<void> (0) : __assert_fail ("TableSize >= 2 && \"Should be a SingleValue table.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5573, __PRETTY_FUNCTION__))
;
5574 // Check if there is the same distance between two consecutive values.
5575 for (uint64_t I = 0; I < TableSize; ++I) {
5576 ConstantInt *ConstVal = dyn_cast<ConstantInt>(TableContents[I]);
5577 if (!ConstVal) {
5578 // This is an undef. We could deal with it, but undefs in lookup tables
5579 // are very seldom. It's probably not worth the additional complexity.
5580 LinearMappingPossible = false;
5581 break;
5582 }
5583 const APInt &Val = ConstVal->getValue();
5584 if (I != 0) {
5585 APInt Dist = Val - PrevVal;
5586 if (I == 1) {
5587 DistToPrev = Dist;
5588 } else if (Dist != DistToPrev) {
5589 LinearMappingPossible = false;
5590 break;
5591 }
5592 }
5593 PrevVal = Val;
5594 }
5595 if (LinearMappingPossible) {
5596 LinearOffset = cast<ConstantInt>(TableContents[0]);
5597 LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev);
5598 Kind = LinearMapKind;
5599 ++NumLinearMaps;
5600 return;
5601 }
5602 }
5603
5604 // If the type is integer and the table fits in a register, build a bitmap.
5605 if (WouldFitInRegister(DL, TableSize, ValueType)) {
5606 IntegerType *IT = cast<IntegerType>(ValueType);
5607 APInt TableInt(TableSize * IT->getBitWidth(), 0);
5608 for (uint64_t I = TableSize; I > 0; --I) {
5609 TableInt <<= IT->getBitWidth();
5610 // Insert values into the bitmap. Undef values are set to zero.
5611 if (!isa<UndefValue>(TableContents[I - 1])) {
5612 ConstantInt *Val = cast<ConstantInt>(TableContents[I - 1]);
5613 TableInt |= Val->getValue().zext(TableInt.getBitWidth());
5614 }
5615 }
5616 BitMap = ConstantInt::get(M.getContext(), TableInt);
5617 BitMapElementTy = IT;
5618 Kind = BitMapKind;
5619 ++NumBitMaps;
5620 return;
5621 }
5622
5623 // Store the table in an array.
5624 ArrayType *ArrayTy = ArrayType::get(ValueType, TableSize);
5625 Constant *Initializer = ConstantArray::get(ArrayTy, TableContents);
5626
5627 Array = new GlobalVariable(M, ArrayTy, /*isConstant=*/true,
5628 GlobalVariable::PrivateLinkage, Initializer,
5629 "switch.table." + FuncName);
5630 Array->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
5631 // Set the alignment to that of an array items. We will be only loading one
5632 // value out of it.
5633 Array->setAlignment(Align(DL.getPrefTypeAlignment(ValueType)));
5634 Kind = ArrayKind;
5635}
5636
5637Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
5638 switch (Kind) {
5639 case SingleValueKind:
5640 return SingleValue;
5641 case LinearMapKind: {
5642 // Derive the result value from the input value.
5643 Value *Result = Builder.CreateIntCast(Index, LinearMultiplier->getType(),
5644 false, "switch.idx.cast");
5645 if (!LinearMultiplier->isOne())
5646 Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult");
5647 if (!LinearOffset->isZero())
5648 Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset");
5649 return Result;
5650 }
5651 case BitMapKind: {
5652 // Type of the bitmap (e.g. i59).
5653 IntegerType *MapTy = BitMap->getType();
5654
5655 // Cast Index to the same type as the bitmap.
5656 // Note: The Index is <= the number of elements in the table, so
5657 // truncating it to the width of the bitmask is safe.
5658 Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast");
5659
5660 // Multiply the shift amount by the element width.
5661 ShiftAmt = Builder.CreateMul(
5662 ShiftAmt, ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()),
5663 "switch.shiftamt");
5664
5665 // Shift down.
5666 Value *DownShifted =
5667 Builder.CreateLShr(BitMap, ShiftAmt, "switch.downshift");
5668 // Mask off.
5669 return Builder.CreateTrunc(DownShifted, BitMapElementTy, "switch.masked");
5670 }
5671 case ArrayKind: {
5672 // Make sure the table index will not overflow when treated as signed.
5673 IntegerType *IT = cast<IntegerType>(Index->getType());
5674 uint64_t TableSize =
5675 Array->getInitializer()->getType()->getArrayNumElements();
5676 if (TableSize > (1ULL << (IT->getBitWidth() - 1)))
5677 Index = Builder.CreateZExt(
5678 Index, IntegerType::get(IT->getContext(), IT->getBitWidth() + 1),
5679 "switch.tableidx.zext");
5680
5681 Value *GEPIndices[] = {Builder.getInt32(0), Index};
5682 Value *GEP = Builder.CreateInBoundsGEP(Array->getValueType(), Array,
5683 GEPIndices, "switch.gep");
5684 return Builder.CreateLoad(
5685 cast<ArrayType>(Array->getValueType())->getElementType(), GEP,
5686 "switch.load");
5687 }
5688 }
5689 llvm_unreachable("Unknown lookup table kind!")::llvm::llvm_unreachable_internal("Unknown lookup table kind!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5689)
;
5690}
5691
5692bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL,
5693 uint64_t TableSize,
5694 Type *ElementType) {
5695 auto *IT = dyn_cast<IntegerType>(ElementType);
5696 if (!IT)
5697 return false;
5698 // FIXME: If the type is wider than it needs to be, e.g. i8 but all values
5699 // are <= 15, we could try to narrow the type.
5700
5701 // Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
5702 if (TableSize >= UINT_MAX(2147483647 *2U +1U) / IT->getBitWidth())
5703 return false;
5704 return DL.fitsInLegalInteger(TableSize * IT->getBitWidth());
5705}
5706
5707/// Determine whether a lookup table should be built for this switch, based on
5708/// the number of cases, size of the table, and the types of the results.
5709static bool
5710ShouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize,
5711 const TargetTransformInfo &TTI, const DataLayout &DL,
5712 const SmallDenseMap<PHINode *, Type *> &ResultTypes) {
5713 if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX(18446744073709551615UL) / 10)
5714 return false; // TableSize overflowed, or mul below might overflow.
5715
5716 bool AllTablesFitInRegister = true;
5717 bool HasIllegalType = false;
5718 for (const auto &I : ResultTypes) {
5719 Type *Ty = I.second;
5720
5721 // Saturate this flag to true.
5722 HasIllegalType = HasIllegalType || !TTI.isTypeLegal(Ty);
5723
5724 // Saturate this flag to false.
5725 AllTablesFitInRegister =
5726 AllTablesFitInRegister &&
5727 SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty);
5728
5729 // If both flags saturate, we're done. NOTE: This *only* works with
5730 // saturating flags, and all flags have to saturate first due to the
5731 // non-deterministic behavior of iterating over a dense map.
5732 if (HasIllegalType && !AllTablesFitInRegister)
5733 break;
5734 }
5735
5736 // If each table would fit in a register, we should build it anyway.
5737 if (AllTablesFitInRegister)
5738 return true;
5739
5740 // Don't build a table that doesn't fit in-register if it has illegal types.
5741 if (HasIllegalType)
5742 return false;
5743
5744 // The table density should be at least 40%. This is the same criterion as for
5745 // jump tables, see SelectionDAGBuilder::handleJTSwitchCase.
5746 // FIXME: Find the best cut-off.
5747 return SI->getNumCases() * 10 >= TableSize * 4;
5748}
5749
5750/// Try to reuse the switch table index compare. Following pattern:
5751/// \code
5752/// if (idx < tablesize)
5753/// r = table[idx]; // table does not contain default_value
5754/// else
5755/// r = default_value;
5756/// if (r != default_value)
5757/// ...
5758/// \endcode
5759/// Is optimized to:
5760/// \code
5761/// cond = idx < tablesize;
5762/// if (cond)
5763/// r = table[idx];
5764/// else
5765/// r = default_value;
5766/// if (cond)
5767/// ...
5768/// \endcode
5769/// Jump threading will then eliminate the second if(cond).
5770static void reuseTableCompare(
5771 User *PhiUser, BasicBlock *PhiBlock, BranchInst *RangeCheckBranch,
5772 Constant *DefaultValue,
5773 const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values) {
5774 ICmpInst *CmpInst = dyn_cast<ICmpInst>(PhiUser);
5775 if (!CmpInst)
5776 return;
5777
5778 // We require that the compare is in the same block as the phi so that jump
5779 // threading can do its work afterwards.
5780 if (CmpInst->getParent() != PhiBlock)
5781 return;
5782
5783 Constant *CmpOp1 = dyn_cast<Constant>(CmpInst->getOperand(1));
5784 if (!CmpOp1)
5785 return;
5786
5787 Value *RangeCmp = RangeCheckBranch->getCondition();
5788 Constant *TrueConst = ConstantInt::getTrue(RangeCmp->getType());
5789 Constant *FalseConst = ConstantInt::getFalse(RangeCmp->getType());
5790
5791 // Check if the compare with the default value is constant true or false.
5792 Constant *DefaultConst = ConstantExpr::getICmp(CmpInst->getPredicate(),
5793 DefaultValue, CmpOp1, true);
5794 if (DefaultConst != TrueConst && DefaultConst != FalseConst)
5795 return;
5796
5797 // Check if the compare with the case values is distinct from the default
5798 // compare result.
5799 for (auto ValuePair : Values) {
5800 Constant *CaseConst = ConstantExpr::getICmp(CmpInst->getPredicate(),
5801 ValuePair.second, CmpOp1, true);
5802 if (!CaseConst || CaseConst == DefaultConst || isa<UndefValue>(CaseConst))
5803 return;
5804 assert((CaseConst == TrueConst || CaseConst == FalseConst) &&(((CaseConst == TrueConst || CaseConst == FalseConst) &&
"Expect true or false as compare result.") ? static_cast<
void> (0) : __assert_fail ("(CaseConst == TrueConst || CaseConst == FalseConst) && \"Expect true or false as compare result.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5805, __PRETTY_FUNCTION__))
5805 "Expect true or false as compare result.")(((CaseConst == TrueConst || CaseConst == FalseConst) &&
"Expect true or false as compare result.") ? static_cast<
void> (0) : __assert_fail ("(CaseConst == TrueConst || CaseConst == FalseConst) && \"Expect true or false as compare result.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5805, __PRETTY_FUNCTION__))
;
5806 }
5807
5808 // Check if the branch instruction dominates the phi node. It's a simple
5809 // dominance check, but sufficient for our needs.
5810 // Although this check is invariant in the calling loops, it's better to do it
5811 // at this late stage. Practically we do it at most once for a switch.
5812 BasicBlock *BranchBlock = RangeCheckBranch->getParent();
5813 for (auto PI = pred_begin(PhiBlock), E = pred_end(PhiBlock); PI != E; ++PI) {
5814 BasicBlock *Pred = *PI;
5815 if (Pred != BranchBlock && Pred->getUniquePredecessor() != BranchBlock)
5816 return;
5817 }
5818
5819 if (DefaultConst == FalseConst) {
5820 // The compare yields the same result. We can replace it.
5821 CmpInst->replaceAllUsesWith(RangeCmp);
5822 ++NumTableCmpReuses;
5823 } else {
5824 // The compare yields the same result, just inverted. We can replace it.
5825 Value *InvertedTableCmp = BinaryOperator::CreateXor(
5826 RangeCmp, ConstantInt::get(RangeCmp->getType(), 1), "inverted.cmp",
5827 RangeCheckBranch);
5828 CmpInst->replaceAllUsesWith(InvertedTableCmp);
5829 ++NumTableCmpReuses;
5830 }
5831}
5832
5833/// If the switch is only used to initialize one or more phi nodes in a common
5834/// successor block with different constant values, replace the switch with
5835/// lookup tables.
5836static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
5837 DomTreeUpdater *DTU, const DataLayout &DL,
5838 const TargetTransformInfo &TTI) {
5839 assert(SI->getNumCases() > 1 && "Degenerate switch?")((SI->getNumCases() > 1 && "Degenerate switch?"
) ? static_cast<void> (0) : __assert_fail ("SI->getNumCases() > 1 && \"Degenerate switch?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5839, __PRETTY_FUNCTION__))
;
5840
5841 BasicBlock *BB = SI->getParent();
5842 Function *Fn = BB->getParent();
5843 // Only build lookup table when we have a target that supports it or the
5844 // attribute is not set.
5845 if (!TTI.shouldBuildLookupTables() ||
5846 (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true"))
5847 return false;
5848
5849 // FIXME: If the switch is too sparse for a lookup table, perhaps we could
5850 // split off a dense part and build a lookup table for that.
5851
5852 // FIXME: This creates arrays of GEPs to constant strings, which means each
5853 // GEP needs a runtime relocation in PIC code. We should just build one big
5854 // string and lookup indices into that.
5855
5856 // Ignore switches with less than three cases. Lookup tables will not make
5857 // them faster, so we don't analyze them.
5858 if (SI->getNumCases() < 3)
5859 return false;
5860
5861 // Figure out the corresponding result for each case value and phi node in the
5862 // common destination, as well as the min and max case values.
5863 assert(!SI->cases().empty())((!SI->cases().empty()) ? static_cast<void> (0) : __assert_fail
("!SI->cases().empty()", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5863, __PRETTY_FUNCTION__))
;
5864 SwitchInst::CaseIt CI = SI->case_begin();
5865 ConstantInt *MinCaseVal = CI->getCaseValue();
5866 ConstantInt *MaxCaseVal = CI->getCaseValue();
5867
5868 BasicBlock *CommonDest = nullptr;
5869
5870 using ResultListTy = SmallVector<std::pair<ConstantInt *, Constant *>, 4>;
5871 SmallDenseMap<PHINode *, ResultListTy> ResultLists;
5872
5873 SmallDenseMap<PHINode *, Constant *> DefaultResults;
5874 SmallDenseMap<PHINode *, Type *> ResultTypes;
5875 SmallVector<PHINode *, 4> PHIs;
5876
5877 for (SwitchInst::CaseIt E = SI->case_end(); CI != E; ++CI) {
5878 ConstantInt *CaseVal = CI->getCaseValue();
5879 if (CaseVal->getValue().slt(MinCaseVal->getValue()))
5880 MinCaseVal = CaseVal;
5881 if (CaseVal->getValue().sgt(MaxCaseVal->getValue()))
5882 MaxCaseVal = CaseVal;
5883
5884 // Resulting value at phi nodes for this case value.
5885 using ResultsTy = SmallVector<std::pair<PHINode *, Constant *>, 4>;
5886 ResultsTy Results;
5887 if (!GetCaseResults(SI, CaseVal, CI->getCaseSuccessor(), &CommonDest,
5888 Results, DL, TTI))
5889 return false;
5890
5891 // Append the result from this case to the list for each phi.
5892 for (const auto &I : Results) {
5893 PHINode *PHI = I.first;
5894 Constant *Value = I.second;
5895 if (!ResultLists.count(PHI))
5896 PHIs.push_back(PHI);
5897 ResultLists[PHI].push_back(std::make_pair(CaseVal, Value));
5898 }
5899 }
5900
5901 // Keep track of the result types.
5902 for (PHINode *PHI : PHIs) {
5903 ResultTypes[PHI] = ResultLists[PHI][0].second->getType();
5904 }
5905
5906 uint64_t NumResults = ResultLists[PHIs[0]].size();
5907 APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue();
5908 uint64_t TableSize = RangeSpread.getLimitedValue() + 1;
5909 bool TableHasHoles = (NumResults < TableSize);
5910
5911 // If the table has holes, we need a constant result for the default case
5912 // or a bitmask that fits in a register.
5913 SmallVector<std::pair<PHINode *, Constant *>, 4> DefaultResultsList;
5914 bool HasDefaultResults =
5915 GetCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest,
5916 DefaultResultsList, DL, TTI);
5917
5918 bool NeedMask = (TableHasHoles && !HasDefaultResults);
5919 if (NeedMask) {
5920 // As an extra penalty for the validity test we require more cases.
5921 if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark).
5922 return false;
5923 if (!DL.fitsInLegalInteger(TableSize))
5924 return false;
5925 }
5926
5927 for (const auto &I : DefaultResultsList) {
5928 PHINode *PHI = I.first;
5929 Constant *Result = I.second;
5930 DefaultResults[PHI] = Result;
5931 }
5932
5933 if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))
5934 return false;
5935
5936 std::vector<DominatorTree::UpdateType> Updates;
5937
5938 // Create the BB that does the lookups.
5939 Module &Mod = *CommonDest->getParent()->getParent();
5940 BasicBlock *LookupBB = BasicBlock::Create(
5941 Mod.getContext(), "switch.lookup", CommonDest->getParent(), CommonDest);
5942
5943 // Compute the table index value.
5944 Builder.SetInsertPoint(SI);
5945 Value *TableIndex;
5946 if (MinCaseVal->isNullValue())
5947 TableIndex = SI->getCondition();
5948 else
5949 TableIndex = Builder.CreateSub(SI->getCondition(), MinCaseVal,
5950 "switch.tableidx");
5951
5952 // Compute the maximum table size representable by the integer type we are
5953 // switching upon.
5954 unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits();
5955 uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX(18446744073709551615UL) : 1ULL << CaseSize;
5956 assert(MaxTableSize >= TableSize &&((MaxTableSize >= TableSize && "It is impossible for a switch to have more entries than the max "
"representable value of its input integer type's size.") ? static_cast
<void> (0) : __assert_fail ("MaxTableSize >= TableSize && \"It is impossible for a switch to have more entries than the max \" \"representable value of its input integer type's size.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5958, __PRETTY_FUNCTION__))
5957 "It is impossible for a switch to have more entries than the max "((MaxTableSize >= TableSize && "It is impossible for a switch to have more entries than the max "
"representable value of its input integer type's size.") ? static_cast
<void> (0) : __assert_fail ("MaxTableSize >= TableSize && \"It is impossible for a switch to have more entries than the max \" \"representable value of its input integer type's size.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5958, __PRETTY_FUNCTION__))
5958 "representable value of its input integer type's size.")((MaxTableSize >= TableSize && "It is impossible for a switch to have more entries than the max "
"representable value of its input integer type's size.") ? static_cast
<void> (0) : __assert_fail ("MaxTableSize >= TableSize && \"It is impossible for a switch to have more entries than the max \" \"representable value of its input integer type's size.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 5958, __PRETTY_FUNCTION__))
;
5959
5960 // If the default destination is unreachable, or if the lookup table covers
5961 // all values of the conditional variable, branch directly to the lookup table
5962 // BB. Otherwise, check that the condition is within the case range.
5963 const bool DefaultIsReachable =
5964 !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
5965 const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize);
5966 BranchInst *RangeCheckBranch = nullptr;
5967
5968 if (!DefaultIsReachable || GeneratingCoveredLookupTable) {
5969 Builder.CreateBr(LookupBB);
5970 Updates.push_back({DominatorTree::Insert, BB, LookupBB});
5971 // Note: We call removeProdecessor later since we need to be able to get the
5972 // PHI value for the default case in case we're using a bit mask.
5973 } else {
5974 Value *Cmp = Builder.CreateICmpULT(
5975 TableIndex, ConstantInt::get(MinCaseVal->getType(), TableSize));
5976 RangeCheckBranch =
5977 Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest());
5978 Updates.push_back({DominatorTree::Insert, BB, LookupBB});
5979 }
5980
5981 // Populate the BB that does the lookups.
5982 Builder.SetInsertPoint(LookupBB);
5983
5984 if (NeedMask) {
5985 // Before doing the lookup, we do the hole check. The LookupBB is therefore
5986 // re-purposed to do the hole check, and we create a new LookupBB.
5987 BasicBlock *MaskBB = LookupBB;
5988 MaskBB->setName("switch.hole_check");
5989 LookupBB = BasicBlock::Create(Mod.getContext(), "switch.lookup",
5990 CommonDest->getParent(), CommonDest);
5991
5992 // Make the mask's bitwidth at least 8-bit and a power-of-2 to avoid
5993 // unnecessary illegal types.
5994 uint64_t TableSizePowOf2 = NextPowerOf2(std::max(7ULL, TableSize - 1ULL));
5995 APInt MaskInt(TableSizePowOf2, 0);
5996 APInt One(TableSizePowOf2, 1);
5997 // Build bitmask; fill in a 1 bit for every case.
5998 const ResultListTy &ResultList = ResultLists[PHIs[0]];
5999 for (size_t I = 0, E = ResultList.size(); I != E; ++I) {
6000 uint64_t Idx = (ResultList[I].first->getValue() - MinCaseVal->getValue())
6001 .getLimitedValue();
6002 MaskInt |= One << Idx;
6003 }
6004 ConstantInt *TableMask = ConstantInt::get(Mod.getContext(), MaskInt);
6005
6006 // Get the TableIndex'th bit of the bitmask.
6007 // If this bit is 0 (meaning hole) jump to the default destination,
6008 // else continue with table lookup.
6009 IntegerType *MapTy = TableMask->getType();
6010 Value *MaskIndex =
6011 Builder.CreateZExtOrTrunc(TableIndex, MapTy, "switch.maskindex");
6012 Value *Shifted = Builder.CreateLShr(TableMask, MaskIndex, "switch.shifted");
6013 Value *LoBit = Builder.CreateTrunc(
6014 Shifted, Type::getInt1Ty(Mod.getContext()), "switch.lobit");
6015 Builder.CreateCondBr(LoBit, LookupBB, SI->getDefaultDest());
6016 Updates.push_back({DominatorTree::Insert, MaskBB, LookupBB});
6017 Updates.push_back({DominatorTree::Insert, MaskBB, SI->getDefaultDest()});
6018 Builder.SetInsertPoint(LookupBB);
6019 AddPredecessorToBlock(SI->getDefaultDest(), MaskBB, BB);
6020 }
6021
6022 if (!DefaultIsReachable || GeneratingCoveredLookupTable) {
6023 // We cached PHINodes in PHIs. To avoid accessing deleted PHINodes later,
6024 // do not delete PHINodes here.
6025 SI->getDefaultDest()->removePredecessor(BB,
6026 /*KeepOneInputPHIs=*/true);
6027 Updates.push_back({DominatorTree::Delete, BB, SI->getDefaultDest()});
6028 }
6029
6030 bool ReturnedEarly = false;
6031 for (PHINode *PHI : PHIs) {
6032 const ResultListTy &ResultList = ResultLists[PHI];
6033
6034 // If using a bitmask, use any value to fill the lookup table holes.
6035 Constant *DV = NeedMask ? ResultLists[PHI][0].second : DefaultResults[PHI];
6036 StringRef FuncName = Fn->getName();
6037 SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultList, DV, DL,
6038 FuncName);
6039
6040 Value *Result = Table.BuildLookup(TableIndex, Builder);
6041
6042 // If the result is used to return immediately from the function, we want to
6043 // do that right here.
6044 if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->user_begin()) &&
6045 PHI->user_back() == CommonDest->getFirstNonPHIOrDbg()) {
6046 Builder.CreateRet(Result);
6047 ReturnedEarly = true;
6048 break;
6049 }
6050
6051 // Do a small peephole optimization: re-use the switch table compare if
6052 // possible.
6053 if (!TableHasHoles && HasDefaultResults && RangeCheckBranch) {
6054 BasicBlock *PhiBlock = PHI->getParent();
6055 // Search for compare instructions which use the phi.
6056 for (auto *User : PHI->users()) {
6057 reuseTableCompare(User, PhiBlock, RangeCheckBranch, DV, ResultList);
6058 }
6059 }
6060
6061 PHI->addIncoming(Result, LookupBB);
6062 }
6063
6064 if (!ReturnedEarly) {
6065 Builder.CreateBr(CommonDest);
6066 Updates.push_back({DominatorTree::Insert, LookupBB, CommonDest});
6067 }
6068
6069 // Remove the switch.
6070 SmallSetVector<BasicBlock *, 8> RemovedSuccessors;
6071 for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) {
6072 BasicBlock *Succ = SI->getSuccessor(i);
6073
6074 if (Succ == SI->getDefaultDest())
6075 continue;
6076 Succ->removePredecessor(BB);
6077 RemovedSuccessors.insert(Succ);
6078 }
6079 SI->eraseFromParent();
6080
6081 if (DTU) {
6082 for (BasicBlock *RemovedSuccessor : RemovedSuccessors)
6083 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
6084 DTU->applyUpdates(Updates);
6085 }
6086
6087 ++NumLookupTables;
6088 if (NeedMask)
6089 ++NumLookupTablesHoles;
6090 return true;
6091}
6092
6093static bool isSwitchDense(ArrayRef<int64_t> Values) {
6094 // See also SelectionDAGBuilder::isDense(), which this function was based on.
6095 uint64_t Diff = (uint64_t)Values.back() - (uint64_t)Values.front();
6096 uint64_t Range = Diff + 1;
6097 uint64_t NumCases = Values.size();
6098 // 40% is the default density for building a jump table in optsize/minsize mode.
6099 uint64_t MinDensity = 40;
6100
6101 return NumCases * 100 >= Range * MinDensity;
6102}
6103
6104/// Try to transform a switch that has "holes" in it to a contiguous sequence
6105/// of cases.
6106///
6107/// A switch such as: switch(i) {case 5: case 9: case 13: case 17:} can be
6108/// range-reduced to: switch ((i-5) / 4) {case 0: case 1: case 2: case 3:}.
6109///
6110/// This converts a sparse switch into a dense switch which allows better
6111/// lowering and could also allow transforming into a lookup table.
6112static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
6113 const DataLayout &DL,
6114 const TargetTransformInfo &TTI) {
6115 auto *CondTy = cast<IntegerType>(SI->getCondition()->getType());
6116 if (CondTy->getIntegerBitWidth() > 64 ||
6117 !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth()))
6118 return false;
6119 // Only bother with this optimization if there are more than 3 switch cases;
6120 // SDAG will only bother creating jump tables for 4 or more cases.
6121 if (SI->getNumCases() < 4)
6122 return false;
6123
6124 // This transform is agnostic to the signedness of the input or case values. We
6125 // can treat the case values as signed or unsigned. We can optimize more common
6126 // cases such as a sequence crossing zero {-4,0,4,8} if we interpret case values
6127 // as signed.
6128 SmallVector<int64_t,4> Values;
6129 for (auto &C : SI->cases())
6130 Values.push_back(C.getCaseValue()->getValue().getSExtValue());
6131 llvm::sort(Values);
6132
6133 // If the switch is already dense, there's nothing useful to do here.
6134 if (isSwitchDense(Values))
6135 return false;
6136
6137 // First, transform the values such that they start at zero and ascend.
6138 int64_t Base = Values[0];
6139 for (auto &V : Values)
6140 V -= (uint64_t)(Base);
6141
6142 // Now we have signed numbers that have been shifted so that, given enough
6143 // precision, there are no negative values. Since the rest of the transform
6144 // is bitwise only, we switch now to an unsigned representation.
6145
6146 // This transform can be done speculatively because it is so cheap - it
6147 // results in a single rotate operation being inserted.
6148 // FIXME: It's possible that optimizing a switch on powers of two might also
6149 // be beneficial - flag values are often powers of two and we could use a CLZ
6150 // as the key function.
6151
6152 // countTrailingZeros(0) returns 64. As Values is guaranteed to have more than
6153 // one element and LLVM disallows duplicate cases, Shift is guaranteed to be
6154 // less than 64.
6155 unsigned Shift = 64;
6156 for (auto &V : Values)
6157 Shift = std::min(Shift, countTrailingZeros((uint64_t)V));
6158 assert(Shift < 64)((Shift < 64) ? static_cast<void> (0) : __assert_fail
("Shift < 64", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6158, __PRETTY_FUNCTION__))
;
6159 if (Shift > 0)
6160 for (auto &V : Values)
6161 V = (int64_t)((uint64_t)V >> Shift);
6162
6163 if (!isSwitchDense(Values))
6164 // Transform didn't create a dense switch.
6165 return false;
6166
6167 // The obvious transform is to shift the switch condition right and emit a
6168 // check that the condition actually cleanly divided by GCD, i.e.
6169 // C & (1 << Shift - 1) == 0
6170 // inserting a new CFG edge to handle the case where it didn't divide cleanly.
6171 //
6172 // A cheaper way of doing this is a simple ROTR(C, Shift). This performs the
6173 // shift and puts the shifted-off bits in the uppermost bits. If any of these
6174 // are nonzero then the switch condition will be very large and will hit the
6175 // default case.
6176
6177 auto *Ty = cast<IntegerType>(SI->getCondition()->getType());
6178 Builder.SetInsertPoint(SI);
6179 auto *ShiftC = ConstantInt::get(Ty, Shift);
6180 auto *Sub = Builder.CreateSub(SI->getCondition(), ConstantInt::get(Ty, Base));
6181 auto *LShr = Builder.CreateLShr(Sub, ShiftC);
6182 auto *Shl = Builder.CreateShl(Sub, Ty->getBitWidth() - Shift);
6183 auto *Rot = Builder.CreateOr(LShr, Shl);
6184 SI->replaceUsesOfWith(SI->getCondition(), Rot);
6185
6186 for (auto Case : SI->cases()) {
6187 auto *Orig = Case.getCaseValue();
6188 auto Sub = Orig->getValue() - APInt(Ty->getBitWidth(), Base);
6189 Case.setValue(
6190 cast<ConstantInt>(ConstantInt::get(Ty, Sub.lshr(ShiftC->getValue()))));
6191 }
6192 return true;
6193}
6194
6195bool SimplifyCFGOpt::simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
6196 BasicBlock *BB = SI->getParent();
6197
6198 if (isValueEqualityComparison(SI)) {
6199 // If we only have one predecessor, and if it is a branch on this value,
6200 // see if that predecessor totally determines the outcome of this switch.
6201 if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
6202 if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
6203 return requestResimplify();
6204
6205 Value *Cond = SI->getCondition();
6206 if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
6207 if (SimplifySwitchOnSelect(SI, Select))
6208 return requestResimplify();
6209
6210 // If the block only contains the switch, see if we can fold the block
6211 // away into any preds.
6212 if (SI == &*BB->instructionsWithoutDebug().begin())
6213 if (FoldValueComparisonIntoPredecessors(SI, Builder))
6214 return requestResimplify();
6215 }
6216
6217 // Try to transform the switch into an icmp and a branch.
6218 if (TurnSwitchRangeIntoICmp(SI, Builder))
6219 return requestResimplify();
6220
6221 // Remove unreachable cases.
6222 if (eliminateDeadSwitchCases(SI, DTU, Options.AC, DL))
6223 return requestResimplify();
6224
6225 if (switchToSelect(SI, Builder, DTU, DL, TTI))
6226 return requestResimplify();
6227
6228 if (Options.ForwardSwitchCondToPhi && ForwardSwitchConditionToPHI(SI))
6229 return requestResimplify();
6230
6231 // The conversion from switch to lookup tables results in difficult-to-analyze
6232 // code and makes pruning branches much harder. This is a problem if the
6233 // switch expression itself can still be restricted as a result of inlining or
6234 // CVP. Therefore, only apply this transformation during late stages of the
6235 // optimisation pipeline.
6236 if (Options.ConvertSwitchToLookupTable &&
6237 SwitchToLookupTable(SI, Builder, DTU, DL, TTI))
6238 return requestResimplify();
6239
6240 if (ReduceSwitchRange(SI, Builder, DL, TTI))
6241 return requestResimplify();
6242
6243 return false;
6244}
6245
6246bool SimplifyCFGOpt::simplifyIndirectBr(IndirectBrInst *IBI) {
6247 BasicBlock *BB = IBI->getParent();
6248 bool Changed = false;
6249
6250 // Eliminate redundant destinations.
6251 SmallPtrSet<Value *, 8> Succs;
6252 SmallSetVector<BasicBlock *, 8> RemovedSuccs;
6253 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
6254 BasicBlock *Dest = IBI->getDestination(i);
6255 if (!Dest->hasAddressTaken() || !Succs.insert(Dest).second) {
6256 if (!Dest->hasAddressTaken())
6257 RemovedSuccs.insert(Dest);
6258 Dest->removePredecessor(BB);
6259 IBI->removeDestination(i);
6260 --i;
6261 --e;
6262 Changed = true;
6263 }
6264 }
6265
6266 if (DTU) {
6267 std::vector<DominatorTree::UpdateType> Updates;
6268 Updates.reserve(RemovedSuccs.size());
6269 for (auto *RemovedSucc : RemovedSuccs)
6270 Updates.push_back({DominatorTree::Delete, BB, RemovedSucc});
6271 DTU->applyUpdates(Updates);
6272 }
6273
6274 if (IBI->getNumDestinations() == 0) {
6275 // If the indirectbr has no successors, change it to unreachable.
6276 new UnreachableInst(IBI->getContext(), IBI);
6277 EraseTerminatorAndDCECond(IBI);
6278 return true;
6279 }
6280
6281 if (IBI->getNumDestinations() == 1) {
6282 // If the indirectbr has one successor, change it to a direct branch.
6283 BranchInst::Create(IBI->getDestination(0), IBI);
6284 EraseTerminatorAndDCECond(IBI);
6285 return true;
6286 }
6287
6288 if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
6289 if (SimplifyIndirectBrOnSelect(IBI, SI))
6290 return requestResimplify();
6291 }
6292 return Changed;
6293}
6294
6295/// Given an block with only a single landing pad and a unconditional branch
6296/// try to find another basic block which this one can be merged with. This
6297/// handles cases where we have multiple invokes with unique landing pads, but
6298/// a shared handler.
6299///
6300/// We specifically choose to not worry about merging non-empty blocks
6301/// here. That is a PRE/scheduling problem and is best solved elsewhere. In
6302/// practice, the optimizer produces empty landing pad blocks quite frequently
6303/// when dealing with exception dense code. (see: instcombine, gvn, if-else
6304/// sinking in this file)
6305///
6306/// This is primarily a code size optimization. We need to avoid performing
6307/// any transform which might inhibit optimization (such as our ability to
6308/// specialize a particular handler via tail commoning). We do this by not
6309/// merging any blocks which require us to introduce a phi. Since the same
6310/// values are flowing through both blocks, we don't lose any ability to
6311/// specialize. If anything, we make such specialization more likely.
6312///
6313/// TODO - This transformation could remove entries from a phi in the target
6314/// block when the inputs in the phi are the same for the two blocks being
6315/// merged. In some cases, this could result in removal of the PHI entirely.
6316static bool TryToMergeLandingPad(LandingPadInst *LPad, BranchInst *BI,
6317 BasicBlock *BB, DomTreeUpdater *DTU) {
6318 auto Succ = BB->getUniqueSuccessor();
6319 assert(Succ)((Succ) ? static_cast<void> (0) : __assert_fail ("Succ"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6319, __PRETTY_FUNCTION__))
;
6320 // If there's a phi in the successor block, we'd likely have to introduce
6321 // a phi into the merged landing pad block.
6322 if (isa<PHINode>(*Succ->begin()))
6323 return false;
6324
6325 for (BasicBlock *OtherPred : predecessors(Succ)) {
6326 if (BB == OtherPred)
6327 continue;
6328 BasicBlock::iterator I = OtherPred->begin();
6329 LandingPadInst *LPad2 = dyn_cast<LandingPadInst>(I);
6330 if (!LPad2 || !LPad2->isIdenticalTo(LPad))
6331 continue;
6332 for (++I; isa<DbgInfoIntrinsic>(I); ++I)
6333 ;
6334 BranchInst *BI2 = dyn_cast<BranchInst>(I);
6335 if (!BI2 || !BI2->isIdenticalTo(BI))
6336 continue;
6337
6338 std::vector<DominatorTree::UpdateType> Updates;
6339
6340 // We've found an identical block. Update our predecessors to take that
6341 // path instead and make ourselves dead.
6342 SmallPtrSet<BasicBlock *, 16> Preds;
6343 Preds.insert(pred_begin(BB), pred_end(BB));
6344 for (BasicBlock *Pred : Preds) {
6345 InvokeInst *II = cast<InvokeInst>(Pred->getTerminator());
6346 assert(II->getNormalDest() != BB && II->getUnwindDest() == BB &&((II->getNormalDest() != BB && II->getUnwindDest
() == BB && "unexpected successor") ? static_cast<
void> (0) : __assert_fail ("II->getNormalDest() != BB && II->getUnwindDest() == BB && \"unexpected successor\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6347, __PRETTY_FUNCTION__))
6347 "unexpected successor")((II->getNormalDest() != BB && II->getUnwindDest
() == BB && "unexpected successor") ? static_cast<
void> (0) : __assert_fail ("II->getNormalDest() != BB && II->getUnwindDest() == BB && \"unexpected successor\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6347, __PRETTY_FUNCTION__))
;
6348 II->setUnwindDest(OtherPred);
6349 Updates.push_back({DominatorTree::Insert, Pred, OtherPred});
6350 Updates.push_back({DominatorTree::Delete, Pred, BB});
6351 }
6352
6353 // The debug info in OtherPred doesn't cover the merged control flow that
6354 // used to go through BB. We need to delete it or update it.
6355 for (auto I = OtherPred->begin(), E = OtherPred->end(); I != E;) {
6356 Instruction &Inst = *I;
6357 I++;
6358 if (isa<DbgInfoIntrinsic>(Inst))
6359 Inst.eraseFromParent();
6360 }
6361
6362 SmallPtrSet<BasicBlock *, 16> Succs;
6363 Succs.insert(succ_begin(BB), succ_end(BB));
6364 for (BasicBlock *Succ : Succs) {
6365 Succ->removePredecessor(BB);
6366 Updates.push_back({DominatorTree::Delete, BB, Succ});
6367 }
6368
6369 IRBuilder<> Builder(BI);
6370 Builder.CreateUnreachable();
6371 BI->eraseFromParent();
6372 if (DTU)
6373 DTU->applyUpdates(Updates);
6374 return true;
6375 }
6376 return false;
6377}
6378
6379bool SimplifyCFGOpt::simplifyBranch(BranchInst *Branch, IRBuilder<> &Builder) {
6380 return Branch->isUnconditional() ? simplifyUncondBranch(Branch, Builder)
6381 : simplifyCondBranch(Branch, Builder);
6382}
6383
6384bool SimplifyCFGOpt::simplifyUncondBranch(BranchInst *BI,
6385 IRBuilder<> &Builder) {
6386 BasicBlock *BB = BI->getParent();
6387 BasicBlock *Succ = BI->getSuccessor(0);
6388
6389 // If the Terminator is the only non-phi instruction, simplify the block.
6390 // If LoopHeader is provided, check if the block or its successor is a loop
6391 // header. (This is for early invocations before loop simplify and
6392 // vectorization to keep canonical loop forms for nested loops. These blocks
6393 // can be eliminated when the pass is invoked later in the back-end.)
6394 // Note that if BB has only one predecessor then we do not introduce new
6395 // backedge, so we can eliminate BB.
6396 bool NeedCanonicalLoop =
6397 Options.NeedCanonicalLoop &&
6398 (LoopHeaders && BB->hasNPredecessorsOrMore(2) &&
6399 (LoopHeaders->count(BB) || LoopHeaders->count(Succ)));
6400 BasicBlock::iterator I = BB->getFirstNonPHIOrDbg()->getIterator();
6401 if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() &&
6402 !NeedCanonicalLoop && TryToSimplifyUncondBranchFromEmptyBlock(BB, DTU))
6403 return true;
6404
6405 // If the only instruction in the block is a seteq/setne comparison against a
6406 // constant, try to simplify the block.
6407 if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
6408 if (ICI->isEquality() && isa<ConstantInt>(ICI->getOperand(1))) {
6409 for (++I; isa<DbgInfoIntrinsic>(I); ++I)
6410 ;
6411 if (I->isTerminator() &&
6412 tryToSimplifyUncondBranchWithICmpInIt(ICI, Builder))
6413 return true;
6414 }
6415
6416 // See if we can merge an empty landing pad block with another which is
6417 // equivalent.
6418 if (LandingPadInst *LPad = dyn_cast<LandingPadInst>(I)) {
6419 for (++I; isa<DbgInfoIntrinsic>(I); ++I)
6420 ;
6421 if (I->isTerminator() && TryToMergeLandingPad(LPad, BI, BB, DTU))
6422 return true;
6423 }
6424
6425 // If this basic block is ONLY a compare and a branch, and if a predecessor
6426 // branches to us and our successor, fold the comparison into the
6427 // predecessor and use logical operations to update the incoming value
6428 // for PHI nodes in common successor.
6429 if (FoldBranchToCommonDest(BI, DTU, /*MSSAU=*/nullptr, &TTI,
6430 Options.BonusInstThreshold))
6431 return requestResimplify();
6432 return false;
6433}
6434
6435static BasicBlock *allPredecessorsComeFromSameSource(BasicBlock *BB) {
6436 BasicBlock *PredPred = nullptr;
6437 for (auto *P : predecessors(BB)) {
6438 BasicBlock *PPred = P->getSinglePredecessor();
6439 if (!PPred || (PredPred && PredPred != PPred))
6440 return nullptr;
6441 PredPred = PPred;
6442 }
6443 return PredPred;
6444}
6445
6446bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
6447 BasicBlock *BB = BI->getParent();
6448 if (!Options.SimplifyCondBranch)
6449 return false;
6450
6451 // Conditional branch
6452 if (isValueEqualityComparison(BI)) {
6453 // If we only have one predecessor, and if it is a branch on this value,
6454 // see if that predecessor totally determines the outcome of this
6455 // switch.
6456 if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
6457 if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))
6458 return requestResimplify();
6459
6460 // This block must be empty, except for the setcond inst, if it exists.
6461 // Ignore dbg intrinsics.
6462 auto I = BB->instructionsWithoutDebug().begin();
6463 if (&*I == BI) {
6464 if (FoldValueComparisonIntoPredecessors(BI, Builder))
6465 return requestResimplify();
6466 } else if (&*I == cast<Instruction>(BI->getCondition())) {
6467 ++I;
6468 if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder))
6469 return requestResimplify();
6470 }
6471 }
6472
6473 // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
6474 if (SimplifyBranchOnICmpChain(BI, Builder, DL))
6475 return true;
6476
6477 // If this basic block has dominating predecessor blocks and the dominating
6478 // blocks' conditions imply BI's condition, we know the direction of BI.
6479 Optional<bool> Imp = isImpliedByDomCondition(BI->getCondition(), BI, DL);
6480 if (Imp) {
6481 // Turn this into a branch on constant.
6482 auto *OldCond = BI->getCondition();
6483 ConstantInt *TorF = *Imp ? ConstantInt::getTrue(BB->getContext())
6484 : ConstantInt::getFalse(BB->getContext());
6485 BI->setCondition(TorF);
6486 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
6487 return requestResimplify();
6488 }
6489
6490 // If this basic block is ONLY a compare and a branch, and if a predecessor
6491 // branches to us and one of our successors, fold the comparison into the
6492 // predecessor and use logical operations to pick the right destination.
6493 if (FoldBranchToCommonDest(BI, DTU, /*MSSAU=*/nullptr, &TTI,
6494 Options.BonusInstThreshold))
6495 return requestResimplify();
6496
6497 // We have a conditional branch to two blocks that are only reachable
6498 // from BI. We know that the condbr dominates the two blocks, so see if
6499 // there is any identical code in the "then" and "else" blocks. If so, we
6500 // can hoist it up to the branching block.
6501 if (BI->getSuccessor(0)->getSinglePredecessor()) {
6502 if (BI->getSuccessor(1)->getSinglePredecessor()) {
6503 if (HoistCommon && Options.HoistCommonInsts)
6504 if (HoistThenElseCodeToIf(BI, TTI))
6505 return requestResimplify();
6506 } else {
6507 // If Successor #1 has multiple preds, we may be able to conditionally
6508 // execute Successor #0 if it branches to Successor #1.
6509 Instruction *Succ0TI = BI->getSuccessor(0)->getTerminator();
6510 if (Succ0TI->getNumSuccessors() == 1 &&
6511 Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
6512 if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), TTI))
6513 return requestResimplify();
6514 }
6515 } else if (BI->getSuccessor(1)->getSinglePredecessor()) {
6516 // If Successor #0 has multiple preds, we may be able to conditionally
6517 // execute Successor #1 if it branches to Successor #0.
6518 Instruction *Succ1TI = BI->getSuccessor(1)->getTerminator();
6519 if (Succ1TI->getNumSuccessors() == 1 &&
6520 Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
6521 if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), TTI))
6522 return requestResimplify();
6523 }
6524
6525 // If this is a branch on a phi node in the current block, thread control
6526 // through this block if any PHI node entries are constants.
6527 if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
6528 if (PN->getParent() == BI->getParent())
6529 if (FoldCondBranchOnPHI(BI, DTU, DL, Options.AC))
6530 return requestResimplify();
6531
6532 // Scan predecessor blocks for conditional branches.
6533 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
6534 if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
6535 if (PBI != BI && PBI->isConditional())
6536 if (SimplifyCondBranchToCondBranch(PBI, BI, DTU, DL, TTI))
6537 return requestResimplify();
6538
6539 // Look for diamond patterns.
6540 if (MergeCondStores)
6541 if (BasicBlock *PrevBB = allPredecessorsComeFromSameSource(BB))
6542 if (BranchInst *PBI = dyn_cast<BranchInst>(PrevBB->getTerminator()))
6543 if (PBI != BI && PBI->isConditional())
6544 if (mergeConditionalStores(PBI, BI, DTU, DL, TTI))
6545 return requestResimplify();
6546
6547 return false;
6548}
6549
6550/// Check if passing a value to an instruction will cause undefined behavior.
6551static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified) {
6552 Constant *C = dyn_cast<Constant>(V);
6553 if (!C)
6554 return false;
6555
6556 if (I->use_empty())
6557 return false;
6558
6559 if (C->isNullValue() || isa<UndefValue>(C)) {
6560 // Only look at the first use, avoid hurting compile time with long uselists
6561 User *Use = *I->user_begin();
6562
6563 // Now make sure that there are no instructions in between that can alter
6564 // control flow (eg. calls)
6565 for (BasicBlock::iterator
6566 i = ++BasicBlock::iterator(I),
6567 UI = BasicBlock::iterator(dyn_cast<Instruction>(Use));
6568 i != UI; ++i)
6569 if (i == I->getParent()->end() || i->mayHaveSideEffects())
6570 return false;
6571
6572 // Look through GEPs. A load from a GEP derived from NULL is still undefined
6573 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Use))
6574 if (GEP->getPointerOperand() == I) {
6575 if (!GEP->isInBounds() || !GEP->hasAllZeroIndices())
6576 PtrValueMayBeModified = true;
6577 return passingValueIsAlwaysUndefined(V, GEP, PtrValueMayBeModified);
6578 }
6579
6580 // Look through bitcasts.
6581 if (BitCastInst *BC = dyn_cast<BitCastInst>(Use))
6582 return passingValueIsAlwaysUndefined(V, BC, PtrValueMayBeModified);
6583
6584 // Load from null is undefined.
6585 if (LoadInst *LI = dyn_cast<LoadInst>(Use))
6586 if (!LI->isVolatile())
6587 return !NullPointerIsDefined(LI->getFunction(),
6588 LI->getPointerAddressSpace());
6589
6590 // Store to null is undefined.
6591 if (StoreInst *SI = dyn_cast<StoreInst>(Use))
6592 if (!SI->isVolatile())
6593 return (!NullPointerIsDefined(SI->getFunction(),
6594 SI->getPointerAddressSpace())) &&
6595 SI->getPointerOperand() == I;
6596
6597 if (auto *CB = dyn_cast<CallBase>(Use)) {
6598 if (C->isNullValue() && NullPointerIsDefined(CB->getFunction()))
6599 return false;
6600 // A call to null is undefined.
6601 if (CB->getCalledOperand() == I)
6602 return true;
6603
6604 if (C->isNullValue()) {
6605 for (const llvm::Use &Arg : CB->args())
6606 if (Arg == I) {
6607 unsigned ArgIdx = CB->getArgOperandNo(&Arg);
6608 if (CB->paramHasAttr(ArgIdx, Attribute::NonNull) &&
6609 CB->paramHasAttr(ArgIdx, Attribute::NoUndef)) {
6610 // Passing null to a nonnnull+noundef argument is undefined.
6611 return !PtrValueMayBeModified;
6612 }
6613 }
6614 } else if (isa<UndefValue>(C)) {
6615 // Passing undef to a noundef argument is undefined.
6616 for (const llvm::Use &Arg : CB->args())
6617 if (Arg == I) {
6618 unsigned ArgIdx = CB->getArgOperandNo(&Arg);
6619 if (CB->paramHasAttr(ArgIdx, Attribute::NoUndef)) {
6620 // Passing undef to a noundef argument is undefined.
6621 return true;
6622 }
6623 }
6624 }
6625 }
6626 }
6627 return false;
6628}
6629
6630/// If BB has an incoming value that will always trigger undefined behavior
6631/// (eg. null pointer dereference), remove the branch leading here.
6632static bool removeUndefIntroducingPredecessor(BasicBlock *BB,
6633 DomTreeUpdater *DTU) {
6634 for (PHINode &PHI : BB->phis())
6635 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i)
6636 if (passingValueIsAlwaysUndefined(PHI.getIncomingValue(i), &PHI)) {
6637 BasicBlock *Predecessor = PHI.getIncomingBlock(i);
6638 Instruction *T = Predecessor->getTerminator();
6639 IRBuilder<> Builder(T);
6640 if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
6641 BB->removePredecessor(Predecessor);
6642 // Turn uncoditional branches into unreachables and remove the dead
6643 // destination from conditional branches.
6644 if (BI->isUnconditional())
6645 Builder.CreateUnreachable();
6646 else
6647 Builder.CreateBr(BI->getSuccessor(0) == BB ? BI->getSuccessor(1)
6648 : BI->getSuccessor(0));
6649 BI->eraseFromParent();
6650 if (DTU)
6651 DTU->applyUpdates({{DominatorTree::Delete, Predecessor, BB}});
6652 return true;
6653 }
6654 // TODO: SwitchInst.
6655 }
6656
6657 return false;
6658}
6659
6660bool SimplifyCFGOpt::simplifyOnceImpl(BasicBlock *BB) {
6661 bool Changed = false;
6662
6663 assert(BB && BB->getParent() && "Block not embedded in function!")((BB && BB->getParent() && "Block not embedded in function!"
) ? static_cast<void> (0) : __assert_fail ("BB && BB->getParent() && \"Block not embedded in function!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6663, __PRETTY_FUNCTION__))
;
6664 assert(BB->getTerminator() && "Degenerate basic block encountered!")((BB->getTerminator() && "Degenerate basic block encountered!"
) ? static_cast<void> (0) : __assert_fail ("BB->getTerminator() && \"Degenerate basic block encountered!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6664, __PRETTY_FUNCTION__))
;
6665
6666 // Remove basic blocks that have no predecessors (except the entry block)...
6667 // or that just have themself as a predecessor. These are unreachable.
6668 if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) ||
6669 BB->getSinglePredecessor() == BB) {
6670 LLVM_DEBUG(dbgs() << "Removing BB: \n" << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("simplifycfg")) { dbgs() << "Removing BB: \n" <<
*BB; } } while (false)
;
6671 DeleteDeadBlock(BB, DTU);
6672 return true;
6673 }
6674
6675 // Check to see if we can constant propagate this terminator instruction
6676 // away...
6677 Changed |= ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true,
6678 /*TLI=*/nullptr, DTU);
6679
6680 // Check for and eliminate duplicate PHI nodes in this block.
6681 Changed |= EliminateDuplicatePHINodes(BB);
6682
6683 // Check for and remove branches that will always cause undefined behavior.
6684 Changed |= removeUndefIntroducingPredecessor(BB, DTU);
6685
6686 // Merge basic blocks into their predecessor if there is only one distinct
6687 // pred, and if there is only one distinct successor of the predecessor, and
6688 // if there are no PHI nodes.
6689 if (MergeBlockIntoPredecessor(BB, DTU))
6690 return true;
6691
6692 if (SinkCommon && Options.SinkCommonInsts)
6693 Changed |= SinkCommonCodeFromPredecessors(BB, DTU);
6694
6695 IRBuilder<> Builder(BB);
6696
6697 if (Options.FoldTwoEntryPHINode) {
6698 // If there is a trivial two-entry PHI node in this basic block, and we can
6699 // eliminate it, do so now.
6700 if (auto *PN = dyn_cast<PHINode>(BB->begin()))
6701 if (PN->getNumIncomingValues() == 2)
6702 Changed |= FoldTwoEntryPHINode(PN, TTI, DTU, DL);
6703 }
6704
6705 Instruction *Terminator = BB->getTerminator();
6706 Builder.SetInsertPoint(Terminator);
6707 switch (Terminator->getOpcode()) {
6708 case Instruction::Br:
6709 Changed |= simplifyBranch(cast<BranchInst>(Terminator), Builder);
6710 break;
6711 case Instruction::Ret:
6712 Changed |= simplifyReturn(cast<ReturnInst>(Terminator), Builder);
6713 break;
6714 case Instruction::Resume:
6715 Changed |= simplifyResume(cast<ResumeInst>(Terminator), Builder);
6716 break;
6717 case Instruction::CleanupRet:
6718 Changed |= simplifyCleanupReturn(cast<CleanupReturnInst>(Terminator));
6719 break;
6720 case Instruction::Switch:
6721 Changed |= simplifySwitch(cast<SwitchInst>(Terminator), Builder);
6722 break;
6723 case Instruction::Unreachable:
6724 Changed |= simplifyUnreachable(cast<UnreachableInst>(Terminator));
6725 break;
6726 case Instruction::IndirectBr:
6727 Changed |= simplifyIndirectBr(cast<IndirectBrInst>(Terminator));
6728 break;
6729 }
6730
6731 return Changed;
6732}
6733
6734bool SimplifyCFGOpt::simplifyOnce(BasicBlock *BB) {
6735 bool Changed = simplifyOnceImpl(BB);
6736
6737 assert((!RequireAndPreserveDomTree ||(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Failed to maintain validity of domtree!") ? static_cast<
void> (0) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Failed to maintain validity of domtree!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6740, __PRETTY_FUNCTION__))
6738 (DTU &&(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Failed to maintain validity of domtree!") ? static_cast<
void> (0) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Failed to maintain validity of domtree!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6740, __PRETTY_FUNCTION__))
6739 DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) &&(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Failed to maintain validity of domtree!") ? static_cast<
void> (0) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Failed to maintain validity of domtree!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6740, __PRETTY_FUNCTION__))
6740 "Failed to maintain validity of domtree!")(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Failed to maintain validity of domtree!") ? static_cast<
void> (0) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Failed to maintain validity of domtree!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6740, __PRETTY_FUNCTION__))
;
6741
6742 return Changed;
6743}
6744
6745bool SimplifyCFGOpt::run(BasicBlock *BB) {
6746 assert((!RequireAndPreserveDomTree ||(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Original domtree is invalid?") ? static_cast<void> (0
) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Original domtree is invalid?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6749, __PRETTY_FUNCTION__))
6747 (DTU &&(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Original domtree is invalid?") ? static_cast<void> (0
) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Original domtree is invalid?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6749, __PRETTY_FUNCTION__))
6748 DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) &&(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Original domtree is invalid?") ? static_cast<void> (0
) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Original domtree is invalid?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6749, __PRETTY_FUNCTION__))
6749 "Original domtree is invalid?")(((!RequireAndPreserveDomTree || (DTU && DTU->getDomTree
().verify(DominatorTree::VerificationLevel::Full))) &&
"Original domtree is invalid?") ? static_cast<void> (0
) : __assert_fail ("(!RequireAndPreserveDomTree || (DTU && DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full))) && \"Original domtree is invalid?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp"
, 6749, __PRETTY_FUNCTION__))
;
6750
6751 bool Changed = false;
6752
6753 // Repeated simplify BB as long as resimplification is requested.
6754 do {
6755 Resimplify = false;
6756
6757 // Perform one round of simplifcation. Resimplify flag will be set if
6758 // another iteration is requested.
6759 Changed |= simplifyOnce(BB);
6760 } while (Resimplify);
6761
6762 return Changed;
6763}
6764
6765bool llvm::simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
6766 DomTreeUpdater *DTU, const SimplifyCFGOptions &Options,
6767 SmallPtrSetImpl<BasicBlock *> *LoopHeaders) {
6768 return SimplifyCFGOpt(TTI, RequireAndPreserveDomTree ? DTU : nullptr,
6769 BB->getModule()->getDataLayout(), LoopHeaders, Options)
6770 .run(BB);
6771}

/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 /// Returns the success ordering constraint of this cmpxchg instruction.
594 AtomicOrdering getSuccessOrdering() const {
595 return getSubclassData<SuccessOrderingField>();
596 }
597
598 /// Sets the success ordering constraint of this cmpxchg instruction.
599 void setSuccessOrdering(AtomicOrdering Ordering) {
600 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
601 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
;
602 setSubclassData<SuccessOrderingField>(Ordering);
603 }
604
605 /// Returns the failure ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getFailureOrdering() const {
607 return getSubclassData<FailureOrderingField>();
608 }
609
610 /// Sets the failure ordering constraint of this cmpxchg instruction.
611 void setFailureOrdering(AtomicOrdering Ordering) {
612 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
613 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
;
614 setSubclassData<FailureOrderingField>(Ordering);
615 }
616
617 /// Returns the synchronization scope ID of this cmpxchg instruction.
618 SyncScope::ID getSyncScopeID() const {
619 return SSID;
620 }
621
622 /// Sets the synchronization scope ID of this cmpxchg instruction.
623 void setSyncScopeID(SyncScope::ID SSID) {
624 this->SSID = SSID;
625 }
626
627 Value *getPointerOperand() { return getOperand(0); }
628 const Value *getPointerOperand() const { return getOperand(0); }
629 static unsigned getPointerOperandIndex() { return 0U; }
630
631 Value *getCompareOperand() { return getOperand(1); }
632 const Value *getCompareOperand() const { return getOperand(1); }
633
634 Value *getNewValOperand() { return getOperand(2); }
635 const Value *getNewValOperand() const { return getOperand(2); }
636
637 /// Returns the address space of the pointer operand.
638 unsigned getPointerAddressSpace() const {
639 return getPointerOperand()->getType()->getPointerAddressSpace();
640 }
641
642 /// Returns the strongest permitted ordering on failure, given the
643 /// desired ordering on success.
644 ///
645 /// If the comparison in a cmpxchg operation fails, there is no atomic store
646 /// so release semantics cannot be provided. So this function drops explicit
647 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
648 /// operation would remain SequentiallyConsistent.
649 static AtomicOrdering
650 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
651 switch (SuccessOrdering) {
652 default:
653 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 653)
;
654 case AtomicOrdering::Release:
655 case AtomicOrdering::Monotonic:
656 return AtomicOrdering::Monotonic;
657 case AtomicOrdering::AcquireRelease:
658 case AtomicOrdering::Acquire:
659 return AtomicOrdering::Acquire;
660 case AtomicOrdering::SequentiallyConsistent:
661 return AtomicOrdering::SequentiallyConsistent;
662 }
663 }
664
665 // Methods for support type inquiry through isa, cast, and dyn_cast:
666 static bool classof(const Instruction *I) {
667 return I->getOpcode() == Instruction::AtomicCmpXchg;
668 }
669 static bool classof(const Value *V) {
670 return isa<Instruction>(V) && classof(cast<Instruction>(V));
671 }
672
673private:
674 // Shadow Instruction::setInstructionSubclassData with a private forwarding
675 // method so that subclasses cannot accidentally use it.
676 template <typename Bitfield>
677 void setSubclassData(typename Bitfield::Type Value) {
678 Instruction::setSubclassData<Bitfield>(Value);
679 }
680
681 /// The synchronization scope ID of this cmpxchg instruction. Not quite
682 /// enough room in SubClassData for everything, so synchronization scope ID
683 /// gets its own field.
684 SyncScope::ID SSID;
685};
686
687template <>
688struct OperandTraits<AtomicCmpXchgInst> :
689 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
690};
691
692DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
693
694//===----------------------------------------------------------------------===//
695// AtomicRMWInst Class
696//===----------------------------------------------------------------------===//
697
698/// an instruction that atomically reads a memory location,
699/// combines it with another value, and then stores the result back. Returns
700/// the old value.
701///
702class AtomicRMWInst : public Instruction {
703protected:
704 // Note: Instruction needs to be a friend here to call cloneImpl.
705 friend class Instruction;
706
707 AtomicRMWInst *cloneImpl() const;
708
709public:
710 /// This enumeration lists the possible modifications atomicrmw can make. In
711 /// the descriptions, 'p' is the pointer to the instruction's memory location,
712 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
713 /// instruction. These instructions always return 'old'.
714 enum BinOp : unsigned {
715 /// *p = v
716 Xchg,
717 /// *p = old + v
718 Add,
719 /// *p = old - v
720 Sub,
721 /// *p = old & v
722 And,
723 /// *p = ~(old & v)
724 Nand,
725 /// *p = old | v
726 Or,
727 /// *p = old ^ v
728 Xor,
729 /// *p = old >signed v ? old : v
730 Max,
731 /// *p = old <signed v ? old : v
732 Min,
733 /// *p = old >unsigned v ? old : v
734 UMax,
735 /// *p = old <unsigned v ? old : v
736 UMin,
737
738 /// *p = old + v
739 FAdd,
740
741 /// *p = old - v
742 FSub,
743
744 FIRST_BINOP = Xchg,
745 LAST_BINOP = FSub,
746 BAD_BINOP
747 };
748
749private:
750 template <unsigned Offset>
751 using AtomicOrderingBitfieldElement =
752 typename Bitfield::Element<AtomicOrdering, Offset, 3,
753 AtomicOrdering::LAST>;
754
755 template <unsigned Offset>
756 using BinOpBitfieldElement =
757 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
758
759public:
760 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
761 AtomicOrdering Ordering, SyncScope::ID SSID,
762 Instruction *InsertBefore = nullptr);
763 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
764 AtomicOrdering Ordering, SyncScope::ID SSID,
765 BasicBlock *InsertAtEnd);
766
767 // allocate space for exactly two operands
768 void *operator new(size_t s) {
769 return User::operator new(s, 2);
770 }
771
772 using VolatileField = BoolBitfieldElementT<0>;
773 using AtomicOrderingField =
774 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
775 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
776 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
777 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
778 OperationField, AlignmentField>(),
779 "Bitfields must be contiguous");
780
781 BinOp getOperation() const { return getSubclassData<OperationField>(); }
782
783 static StringRef getOperationName(BinOp Op);
784
785 static bool isFPOperation(BinOp Op) {
786 switch (Op) {
787 case AtomicRMWInst::FAdd:
788 case AtomicRMWInst::FSub:
789 return true;
790 default:
791 return false;
792 }
793 }
794
795 void setOperation(BinOp Operation) {
796 setSubclassData<OperationField>(Operation);
797 }
798
799 /// Return the alignment of the memory that is being allocated by the
800 /// instruction.
801 Align getAlign() const {
802 return Align(1ULL << getSubclassData<AlignmentField>());
803 }
804
805 void setAlignment(Align Align) {
806 setSubclassData<AlignmentField>(Log2(Align));
807 }
808
809 /// Return true if this is a RMW on a volatile memory location.
810 ///
811 bool isVolatile() const { return getSubclassData<VolatileField>(); }
812
813 /// Specify whether this is a volatile RMW or not.
814 ///
815 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
816
817 /// Transparently provide more efficient getOperand methods.
818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
819
820 /// Returns the ordering constraint of this rmw instruction.
821 AtomicOrdering getOrdering() const {
822 return getSubclassData<AtomicOrderingField>();
823 }
824
825 /// Sets the ordering constraint of this rmw instruction.
826 void setOrdering(AtomicOrdering Ordering) {
827 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
828 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
;
829 setSubclassData<AtomicOrderingField>(Ordering);
830 }
831
832 /// Returns the synchronization scope ID of this rmw instruction.
833 SyncScope::ID getSyncScopeID() const {
834 return SSID;
835 }
836
837 /// Sets the synchronization scope ID of this rmw instruction.
838 void setSyncScopeID(SyncScope::ID SSID) {
839 this->SSID = SSID;
840 }
841
842 Value *getPointerOperand() { return getOperand(0); }
843 const Value *getPointerOperand() const { return getOperand(0); }
844 static unsigned getPointerOperandIndex() { return 0U; }
845
846 Value *getValOperand() { return getOperand(1); }
847 const Value *getValOperand() const { return getOperand(1); }
848
849 /// Returns the address space of the pointer operand.
850 unsigned getPointerAddressSpace() const {
851 return getPointerOperand()->getType()->getPointerAddressSpace();
852 }
853
854 bool isFloatingPointOperation() const {
855 return isFPOperation(getOperation());
856 }
857
858 // Methods for support type inquiry through isa, cast, and dyn_cast:
859 static bool classof(const Instruction *I) {
860 return I->getOpcode() == Instruction::AtomicRMW;
861 }
862 static bool classof(const Value *V) {
863 return isa<Instruction>(V) && classof(cast<Instruction>(V));
864 }
865
866private:
867 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
868 AtomicOrdering Ordering, SyncScope::ID SSID);
869
870 // Shadow Instruction::setInstructionSubclassData with a private forwarding
871 // method so that subclasses cannot accidentally use it.
872 template <typename Bitfield>
873 void setSubclassData(typename Bitfield::Type Value) {
874 Instruction::setSubclassData<Bitfield>(Value);
875 }
876
877 /// The synchronization scope ID of this rmw instruction. Not quite enough
878 /// room in SubClassData for everything, so synchronization scope ID gets its
879 /// own field.
880 SyncScope::ID SSID;
881};
882
883template <>
884struct OperandTraits<AtomicRMWInst>
885 : public FixedNumOperandTraits<AtomicRMWInst,2> {
886};
887
888DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
889
890//===----------------------------------------------------------------------===//
891// GetElementPtrInst Class
892//===----------------------------------------------------------------------===//
893
894// checkGEPType - Simple wrapper function to give a better assertion failure
895// message on bad indexes for a gep instruction.
896//
897inline Type *checkGEPType(Type *Ty) {
898 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 898, __PRETTY_FUNCTION__))
;
899 return Ty;
900}
901
902/// an instruction for type-safe pointer arithmetic to
903/// access elements of arrays and structs
904///
905class GetElementPtrInst : public Instruction {
906 Type *SourceElementType;
907 Type *ResultElementType;
908
909 GetElementPtrInst(const GetElementPtrInst &GEPI);
910
911 /// Constructors - Create a getelementptr instruction with a base pointer an
912 /// list of indices. The first ctor can optionally insert before an existing
913 /// instruction, the second appends the new instruction to the specified
914 /// BasicBlock.
915 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
916 ArrayRef<Value *> IdxList, unsigned Values,
917 const Twine &NameStr, Instruction *InsertBefore);
918 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
919 ArrayRef<Value *> IdxList, unsigned Values,
920 const Twine &NameStr, BasicBlock *InsertAtEnd);
921
922 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
923
924protected:
925 // Note: Instruction needs to be a friend here to call cloneImpl.
926 friend class Instruction;
927
928 GetElementPtrInst *cloneImpl() const;
929
930public:
931 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
932 ArrayRef<Value *> IdxList,
933 const Twine &NameStr = "",
934 Instruction *InsertBefore = nullptr) {
935 unsigned Values = 1 + unsigned(IdxList.size());
936 if (!PointeeType)
937 PointeeType =
938 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
939 else
940 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
941 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
942 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
;
943 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
944 NameStr, InsertBefore);
945 }
946
947 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
948 ArrayRef<Value *> IdxList,
949 const Twine &NameStr,
950 BasicBlock *InsertAtEnd) {
951 unsigned Values = 1 + unsigned(IdxList.size());
952 if (!PointeeType)
953 PointeeType =
954 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
955 else
956 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
957 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
958 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
;
959 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
960 NameStr, InsertAtEnd);
961 }
962
963 /// Create an "inbounds" getelementptr. See the documentation for the
964 /// "inbounds" flag in LangRef.html for details.
965 static GetElementPtrInst *CreateInBounds(Value *Ptr,
966 ArrayRef<Value *> IdxList,
967 const Twine &NameStr = "",
968 Instruction *InsertBefore = nullptr){
969 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
970 }
971
972 static GetElementPtrInst *
973 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
974 const Twine &NameStr = "",
975 Instruction *InsertBefore = nullptr) {
976 GetElementPtrInst *GEP =
977 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
978 GEP->setIsInBounds(true);
979 return GEP;
980 }
981
982 static GetElementPtrInst *CreateInBounds(Value *Ptr,
983 ArrayRef<Value *> IdxList,
984 const Twine &NameStr,
985 BasicBlock *InsertAtEnd) {
986 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
987 }
988
989 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
990 ArrayRef<Value *> IdxList,
991 const Twine &NameStr,
992 BasicBlock *InsertAtEnd) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 /// Transparently provide more efficient getOperand methods.
1000 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1001
1002 Type *getSourceElementType() const { return SourceElementType; }
1003
1004 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1005 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1006
1007 Type *getResultElementType() const {
1008 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
1009 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
;
1010 return ResultElementType;
1011 }
1012
1013 /// Returns the address space of this instruction's pointer type.
1014 unsigned getAddressSpace() const {
1015 // Note that this is always the same as the pointer operand's address space
1016 // and that is cheaper to compute, so cheat here.
1017 return getPointerAddressSpace();
1018 }
1019
1020 /// Returns the result type of a getelementptr with the given source
1021 /// element type and indexes.
1022 ///
1023 /// Null is returned if the indices are invalid for the specified
1024 /// source element type.
1025 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1026 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1027 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1028
1029 /// Return the type of the element at the given index of an indexable
1030 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1031 ///
1032 /// Returns null if the type can't be indexed, or the given index is not
1033 /// legal for the given type.
1034 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1035 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1036
1037 inline op_iterator idx_begin() { return op_begin()+1; }
1038 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1039 inline op_iterator idx_end() { return op_end(); }
1040 inline const_op_iterator idx_end() const { return op_end(); }
1041
1042 inline iterator_range<op_iterator> indices() {
1043 return make_range(idx_begin(), idx_end());
1044 }
1045
1046 inline iterator_range<const_op_iterator> indices() const {
1047 return make_range(idx_begin(), idx_end());
1048 }
1049
1050 Value *getPointerOperand() {
1051 return getOperand(0);
1052 }
1053 const Value *getPointerOperand() const {
1054 return getOperand(0);
1055 }
1056 static unsigned getPointerOperandIndex() {
1057 return 0U; // get index for modifying correct operand.
1058 }
1059
1060 /// Method to return the pointer operand as a
1061 /// PointerType.
1062 Type *getPointerOperandType() const {
1063 return getPointerOperand()->getType();
1064 }
1065
1066 /// Returns the address space of the pointer operand.
1067 unsigned getPointerAddressSpace() const {
1068 return getPointerOperandType()->getPointerAddressSpace();
1069 }
1070
1071 /// Returns the pointer type returned by the GEP
1072 /// instruction, which may be a vector of pointers.
1073 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1074 ArrayRef<Value *> IdxList) {
1075 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1076 Ptr->getType()->getPointerAddressSpace());
1077 // Vector GEP
1078 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1079 ElementCount EltCount = PtrVTy->getElementCount();
1080 return VectorType::get(PtrTy, EltCount);
1081 }
1082 for (Value *Index : IdxList)
1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084 ElementCount EltCount = IndexVTy->getElementCount();
1085 return VectorType::get(PtrTy, EltCount);
1086 }
1087 // Scalar GEP
1088 return PtrTy;
1089 }
1090
1091 unsigned getNumIndices() const { // Note: always non-negative
1092 return getNumOperands() - 1;
1093 }
1094
1095 bool hasIndices() const {
1096 return getNumOperands() > 1;
1097 }
1098
1099 /// Return true if all of the indices of this GEP are
1100 /// zeros. If so, the result pointer and the first operand have the same
1101 /// value, just potentially different types.
1102 bool hasAllZeroIndices() const;
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// constant integers. If so, the result pointer and the first operand have
1106 /// a constant offset between them.
1107 bool hasAllConstantIndices() const;
1108
1109 /// Set or clear the inbounds flag on this GEP instruction.
1110 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1111 void setIsInBounds(bool b = true);
1112
1113 /// Determine whether the GEP has the inbounds flag.
1114 bool isInBounds() const;
1115
1116 /// Accumulate the constant address offset of this GEP if possible.
1117 ///
1118 /// This routine accepts an APInt into which it will accumulate the constant
1119 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1120 /// all-constant, it returns false and the value of the offset APInt is
1121 /// undefined (it is *not* preserved!). The APInt passed into this routine
1122 /// must be at least as wide as the IntPtr type for the address space of
1123 /// the base GEP pointer.
1124 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1125
1126 // Methods for support type inquiry through isa, cast, and dyn_cast:
1127 static bool classof(const Instruction *I) {
1128 return (I->getOpcode() == Instruction::GetElementPtr);
1129 }
1130 static bool classof(const Value *V) {
1131 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1132 }
1133};
1134
1135template <>
1136struct OperandTraits<GetElementPtrInst> :
1137 public VariadicOperandTraits<GetElementPtrInst, 1> {
1138};
1139
1140GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1141 ArrayRef<Value *> IdxList, unsigned Values,
1142 const Twine &NameStr,
1143 Instruction *InsertBefore)
1144 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1145 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1146 Values, InsertBefore),
1147 SourceElementType(PointeeType),
1148 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1149 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
1150 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
;
1151 init(Ptr, IdxList, NameStr);
1152}
1153
1154GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155 ArrayRef<Value *> IdxList, unsigned Values,
1156 const Twine &NameStr,
1157 BasicBlock *InsertAtEnd)
1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160 Values, InsertAtEnd),
1161 SourceElementType(PointeeType),
1162 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
1164 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
;
1165 init(Ptr, IdxList, NameStr);
1166}
1167
1168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1169
1170//===----------------------------------------------------------------------===//
1171// ICmpInst Class
1172//===----------------------------------------------------------------------===//
1173
1174/// This instruction compares its operands according to the predicate given
1175/// to the constructor. It only operates on integers or pointers. The operands
1176/// must be identical types.
1177/// Represent an integer comparison operator.
1178class ICmpInst: public CmpInst {
1179 void AssertOK() {
1180 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
1181 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
;
1182 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
1183 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
;
1184 // Check that the operands are the right type
1185 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1186 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1187 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
;
1188 }
1189
1190protected:
1191 // Note: Instruction needs to be a friend here to call cloneImpl.
1192 friend class Instruction;
1193
1194 /// Clone an identical ICmpInst
1195 ICmpInst *cloneImpl() const;
1196
1197public:
1198 /// Constructor with insert-before-instruction semantics.
1199 ICmpInst(
1200 Instruction *InsertBefore, ///< Where to insert
1201 Predicate pred, ///< The predicate to use for the comparison
1202 Value *LHS, ///< The left-hand-side of the expression
1203 Value *RHS, ///< The right-hand-side of the expression
1204 const Twine &NameStr = "" ///< Name of the instruction
1205 ) : CmpInst(makeCmpResultType(LHS->getType()),
1206 Instruction::ICmp, pred, LHS, RHS, NameStr,
1207 InsertBefore) {
1208#ifndef NDEBUG
1209 AssertOK();
1210#endif
1211 }
1212
1213 /// Constructor with insert-at-end semantics.
1214 ICmpInst(
1215 BasicBlock &InsertAtEnd, ///< Block to insert into.
1216 Predicate pred, ///< The predicate to use for the comparison
1217 Value *LHS, ///< The left-hand-side of the expression
1218 Value *RHS, ///< The right-hand-side of the expression
1219 const Twine &NameStr = "" ///< Name of the instruction
1220 ) : CmpInst(makeCmpResultType(LHS->getType()),
1221 Instruction::ICmp, pred, LHS, RHS, NameStr,
1222 &InsertAtEnd) {
1223#ifndef NDEBUG
1224 AssertOK();
1225#endif
1226 }
1227
1228 /// Constructor with no-insertion semantics
1229 ICmpInst(
1230 Predicate pred, ///< The predicate to use for the comparison
1231 Value *LHS, ///< The left-hand-side of the expression
1232 Value *RHS, ///< The right-hand-side of the expression
1233 const Twine &NameStr = "" ///< Name of the instruction
1234 ) : CmpInst(makeCmpResultType(LHS->getType()),
1235 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1236#ifndef NDEBUG
1237 AssertOK();
1238#endif
1239 }
1240
1241 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1242 /// @returns the predicate that would be the result if the operand were
1243 /// regarded as signed.
1244 /// Return the signed version of the predicate
1245 Predicate getSignedPredicate() const {
1246 return getSignedPredicate(getPredicate());
1247 }
1248
1249 /// This is a static version that you can use without an instruction.
1250 /// Return the signed version of the predicate.
1251 static Predicate getSignedPredicate(Predicate pred);
1252
1253 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as unsigned.
1256 /// Return the unsigned version of the predicate
1257 Predicate getUnsignedPredicate() const {
1258 return getUnsignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the unsigned version of the predicate.
1263 static Predicate getUnsignedPredicate(Predicate pred);
1264
1265 /// Return true if this predicate is either EQ or NE. This also
1266 /// tests for commutativity.
1267 static bool isEquality(Predicate P) {
1268 return P == ICMP_EQ || P == ICMP_NE;
1269 }
1270
1271 /// Return true if this predicate is either EQ or NE. This also
1272 /// tests for commutativity.
1273 bool isEquality() const {
1274 return isEquality(getPredicate());
1275 }
1276
1277 /// @returns true if the predicate of this ICmpInst is commutative
1278 /// Determine if this relation is commutative.
1279 bool isCommutative() const { return isEquality(); }
1280
1281 /// Return true if the predicate is relational (not EQ or NE).
1282 ///
1283 bool isRelational() const {
1284 return !isEquality();
1285 }
1286
1287 /// Return true if the predicate is relational (not EQ or NE).
1288 ///
1289 static bool isRelational(Predicate P) {
1290 return !isEquality(P);
1291 }
1292
1293 /// Return true if the predicate is SGT or UGT.
1294 ///
1295 static bool isGT(Predicate P) {
1296 return P == ICMP_SGT || P == ICMP_UGT;
1297 }
1298
1299 /// Return true if the predicate is SLT or ULT.
1300 ///
1301 static bool isLT(Predicate P) {
1302 return P == ICMP_SLT || P == ICMP_ULT;
1303 }
1304
1305 /// Return true if the predicate is SGE or UGE.
1306 ///
1307 static bool isGE(Predicate P) {
1308 return P == ICMP_SGE || P == ICMP_UGE;
1309 }
1310
1311 /// Return true if the predicate is SLE or ULE.
1312 ///
1313 static bool isLE(Predicate P) {
1314 return P == ICMP_SLE || P == ICMP_ULE;
1315 }
1316
1317 /// Exchange the two operands to this instruction in such a way that it does
1318 /// not modify the semantics of the instruction. The predicate value may be
1319 /// changed to retain the same result if the predicate is order dependent
1320 /// (e.g. ult).
1321 /// Swap operands and adjust predicate.
1322 void swapOperands() {
1323 setPredicate(getSwappedPredicate());
1324 Op<0>().swap(Op<1>());
1325 }
1326
1327 // Methods for support type inquiry through isa, cast, and dyn_cast:
1328 static bool classof(const Instruction *I) {
1329 return I->getOpcode() == Instruction::ICmp;
1330 }
1331 static bool classof(const Value *V) {
1332 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1333 }
1334};
1335
1336//===----------------------------------------------------------------------===//
1337// FCmpInst Class
1338//===----------------------------------------------------------------------===//
1339
1340/// This instruction compares its operands according to the predicate given
1341/// to the constructor. It only operates on floating point values or packed
1342/// vectors of floating point values. The operands must be identical types.
1343/// Represents a floating point comparison operator.
1344class FCmpInst: public CmpInst {
1345 void AssertOK() {
1346 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1346, __PRETTY_FUNCTION__))
;
1347 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1348, __PRETTY_FUNCTION__))
1348 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1348, __PRETTY_FUNCTION__))
;
1349 // Check that the operands are the right type
1350 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1351, __PRETTY_FUNCTION__))
1351 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1351, __PRETTY_FUNCTION__))
;
1352 }
1353
1354protected:
1355 // Note: Instruction needs to be a friend here to call cloneImpl.
1356 friend class Instruction;
1357
1358 /// Clone an identical FCmpInst
1359 FCmpInst *cloneImpl() const;
1360
1361public:
1362 /// Constructor with insert-before-instruction semantics.
1363 FCmpInst(
1364 Instruction *InsertBefore, ///< Where to insert
1365 Predicate pred, ///< The predicate to use for the comparison
1366 Value *LHS, ///< The left-hand-side of the expression
1367 Value *RHS, ///< The right-hand-side of the expression
1368 const Twine &NameStr = "" ///< Name of the instruction
1369 ) : CmpInst(makeCmpResultType(LHS->getType()),
1370 Instruction::FCmp, pred, LHS, RHS, NameStr,
1371 InsertBefore) {
1372 AssertOK();
1373 }
1374
1375 /// Constructor with insert-at-end semantics.
1376 FCmpInst(
1377 BasicBlock &InsertAtEnd, ///< Block to insert into.
1378 Predicate pred, ///< The predicate to use for the comparison
1379 Value *LHS, ///< The left-hand-side of the expression
1380 Value *RHS, ///< The right-hand-side of the expression
1381 const Twine &NameStr = "" ///< Name of the instruction
1382 ) : CmpInst(makeCmpResultType(LHS->getType()),
1383 Instruction::FCmp, pred, LHS, RHS, NameStr,
1384 &InsertAtEnd) {
1385 AssertOK();
1386 }
1387
1388 /// Constructor with no-insertion semantics
1389 FCmpInst(
1390 Predicate Pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "", ///< Name of the instruction
1394 Instruction *FlagsSource = nullptr
1395 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1396 RHS, NameStr, nullptr, FlagsSource) {
1397 AssertOK();
1398 }
1399
1400 /// @returns true if the predicate of this instruction is EQ or NE.
1401 /// Determine if this is an equality predicate.
1402 static bool isEquality(Predicate Pred) {
1403 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1404 Pred == FCMP_UNE;
1405 }
1406
1407 /// @returns true if the predicate of this instruction is EQ or NE.
1408 /// Determine if this is an equality predicate.
1409 bool isEquality() const { return isEquality(getPredicate()); }
1410
1411 /// @returns true if the predicate of this instruction is commutative.
1412 /// Determine if this is a commutative predicate.
1413 bool isCommutative() const {
1414 return isEquality() ||
1415 getPredicate() == FCMP_FALSE ||
1416 getPredicate() == FCMP_TRUE ||
1417 getPredicate() == FCMP_ORD ||
1418 getPredicate() == FCMP_UNO;
1419 }
1420
1421 /// @returns true if the predicate is relational (not EQ or NE).
1422 /// Determine if this a relational predicate.
1423 bool isRelational() const { return !isEquality(); }
1424
1425 /// Exchange the two operands to this instruction in such a way that it does
1426 /// not modify the semantics of the instruction. The predicate value may be
1427 /// changed to retain the same result if the predicate is order dependent
1428 /// (e.g. ult).
1429 /// Swap operands and adjust predicate.
1430 void swapOperands() {
1431 setPredicate(getSwappedPredicate());
1432 Op<0>().swap(Op<1>());
1433 }
1434
1435 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1436 static bool classof(const Instruction *I) {
1437 return I->getOpcode() == Instruction::FCmp;
1438 }
1439 static bool classof(const Value *V) {
1440 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1441 }
1442};
1443
1444//===----------------------------------------------------------------------===//
1445/// This class represents a function call, abstracting a target
1446/// machine's calling convention. This class uses low bit of the SubClassData
1447/// field to indicate whether or not this is a tail call. The rest of the bits
1448/// hold the calling convention of the call.
1449///
1450class CallInst : public CallBase {
1451 CallInst(const CallInst &CI);
1452
1453 /// Construct a CallInst given a range of arguments.
1454 /// Construct a CallInst from a range of arguments
1455 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1456 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1457 Instruction *InsertBefore);
1458
1459 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1460 const Twine &NameStr, Instruction *InsertBefore)
1461 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1462
1463 /// Construct a CallInst given a range of arguments.
1464 /// Construct a CallInst from a range of arguments
1465 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1466 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1467 BasicBlock *InsertAtEnd);
1468
1469 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1470 Instruction *InsertBefore);
1471
1472 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1473 BasicBlock *InsertAtEnd);
1474
1475 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1476 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1477 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1478
1479 /// Compute the number of operands to allocate.
1480 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1481 // We need one operand for the called function, plus the input operand
1482 // counts provided.
1483 return 1 + NumArgs + NumBundleInputs;
1484 }
1485
1486protected:
1487 // Note: Instruction needs to be a friend here to call cloneImpl.
1488 friend class Instruction;
1489
1490 CallInst *cloneImpl() const;
1491
1492public:
1493 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1494 Instruction *InsertBefore = nullptr) {
1495 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1496 }
1497
1498 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1499 const Twine &NameStr,
1500 Instruction *InsertBefore = nullptr) {
1501 return new (ComputeNumOperands(Args.size()))
1502 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1503 }
1504
1505 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1506 ArrayRef<OperandBundleDef> Bundles = None,
1507 const Twine &NameStr = "",
1508 Instruction *InsertBefore = nullptr) {
1509 const int NumOperands =
1510 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1511 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1512
1513 return new (NumOperands, DescriptorBytes)
1514 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1515 }
1516
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1518 BasicBlock *InsertAtEnd) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1524 return new (ComputeNumOperands(Args.size()))
1525 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1526 }
1527
1528 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1529 ArrayRef<OperandBundleDef> Bundles,
1530 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1531 const int NumOperands =
1532 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1533 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1534
1535 return new (NumOperands, DescriptorBytes)
1536 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1537 }
1538
1539 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1540 Instruction *InsertBefore = nullptr) {
1541 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1542 InsertBefore);
1543 }
1544
1545 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1546 ArrayRef<OperandBundleDef> Bundles = None,
1547 const Twine &NameStr = "",
1548 Instruction *InsertBefore = nullptr) {
1549 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1550 NameStr, InsertBefore);
1551 }
1552
1553 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1554 const Twine &NameStr,
1555 Instruction *InsertBefore = nullptr) {
1556 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1557 InsertBefore);
1558 }
1559
1560 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1561 BasicBlock *InsertAtEnd) {
1562 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1563 InsertAtEnd);
1564 }
1565
1566 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1567 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1568 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1569 InsertAtEnd);
1570 }
1571
1572 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1573 ArrayRef<OperandBundleDef> Bundles,
1574 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertAtEnd);
1577 }
1578
1579 /// Create a clone of \p CI with a different set of operand bundles and
1580 /// insert it before \p InsertPt.
1581 ///
1582 /// The returned call instruction is identical \p CI in every way except that
1583 /// the operand bundles for the new instruction are set to the operand bundles
1584 /// in \p Bundles.
1585 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1586 Instruction *InsertPt = nullptr);
1587
1588 /// Create a clone of \p CI with a different set of operand bundles and
1589 /// insert it before \p InsertPt.
1590 ///
1591 /// The returned call instruction is identical \p CI in every way except that
1592 /// the operand bundle for the new instruction is set to the operand bundle
1593 /// in \p Bundle.
1594 static CallInst *CreateWithReplacedBundle(CallInst *CI,
1595 OperandBundleDef Bundle,
1596 Instruction *InsertPt = nullptr);
1597
1598 /// Generate the IR for a call to malloc:
1599 /// 1. Compute the malloc call's argument as the specified type's size,
1600 /// possibly multiplied by the array size if the array size is not
1601 /// constant 1.
1602 /// 2. Call malloc with that argument.
1603 /// 3. Bitcast the result of the malloc call to the specified type.
1604 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1605 Type *AllocTy, Value *AllocSize,
1606 Value *ArraySize = nullptr,
1607 Function *MallocF = nullptr,
1608 const Twine &Name = "");
1609 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1610 Type *AllocTy, Value *AllocSize,
1611 Value *ArraySize = nullptr,
1612 Function *MallocF = nullptr,
1613 const Twine &Name = "");
1614 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1615 Type *AllocTy, Value *AllocSize,
1616 Value *ArraySize = nullptr,
1617 ArrayRef<OperandBundleDef> Bundles = None,
1618 Function *MallocF = nullptr,
1619 const Twine &Name = "");
1620 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1621 Type *AllocTy, Value *AllocSize,
1622 Value *ArraySize = nullptr,
1623 ArrayRef<OperandBundleDef> Bundles = None,
1624 Function *MallocF = nullptr,
1625 const Twine &Name = "");
1626 /// Generate the IR for a call to the builtin free function.
1627 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1628 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1629 static Instruction *CreateFree(Value *Source,
1630 ArrayRef<OperandBundleDef> Bundles,
1631 Instruction *InsertBefore);
1632 static Instruction *CreateFree(Value *Source,
1633 ArrayRef<OperandBundleDef> Bundles,
1634 BasicBlock *InsertAtEnd);
1635
1636 // Note that 'musttail' implies 'tail'.
1637 enum TailCallKind : unsigned {
1638 TCK_None = 0,
1639 TCK_Tail = 1,
1640 TCK_MustTail = 2,
1641 TCK_NoTail = 3,
1642 TCK_LAST = TCK_NoTail
1643 };
1644
1645 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1646 static_assert(
1647 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1648 "Bitfields must be contiguous");
1649
1650 TailCallKind getTailCallKind() const {
1651 return getSubclassData<TailCallKindField>();
1652 }
1653
1654 bool isTailCall() const {
1655 TailCallKind Kind = getTailCallKind();
1656 return Kind == TCK_Tail || Kind == TCK_MustTail;
1657 }
1658
1659 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1660
1661 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1662
1663 void setTailCallKind(TailCallKind TCK) {
1664 setSubclassData<TailCallKindField>(TCK);
1665 }
1666
1667 void setTailCall(bool IsTc = true) {
1668 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1669 }
1670
1671 /// Return true if the call can return twice
1672 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1673 void setCanReturnTwice() {
1674 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1675 }
1676
1677 // Methods for support type inquiry through isa, cast, and dyn_cast:
1678 static bool classof(const Instruction *I) {
1679 return I->getOpcode() == Instruction::Call;
1680 }
1681 static bool classof(const Value *V) {
1682 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1683 }
1684
1685 /// Updates profile metadata by scaling it by \p S / \p T.
1686 void updateProfWeight(uint64_t S, uint64_t T);
1687
1688private:
1689 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1690 // method so that subclasses cannot accidentally use it.
1691 template <typename Bitfield>
1692 void setSubclassData(typename Bitfield::Type Value) {
1693 Instruction::setSubclassData<Bitfield>(Value);
1694 }
1695};
1696
1697CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1698 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1699 BasicBlock *InsertAtEnd)
1700 : CallBase(Ty->getReturnType(), Instruction::Call,
1701 OperandTraits<CallBase>::op_end(this) -
1702 (Args.size() + CountBundleInputs(Bundles) + 1),
1703 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1704 InsertAtEnd) {
1705 init(Ty, Func, Args, Bundles, NameStr);
1706}
1707
1708CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1709 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1710 Instruction *InsertBefore)
1711 : CallBase(Ty->getReturnType(), Instruction::Call,
1712 OperandTraits<CallBase>::op_end(this) -
1713 (Args.size() + CountBundleInputs(Bundles) + 1),
1714 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1715 InsertBefore) {
1716 init(Ty, Func, Args, Bundles, NameStr);
1717}
1718
1719//===----------------------------------------------------------------------===//
1720// SelectInst Class
1721//===----------------------------------------------------------------------===//
1722
1723/// This class represents the LLVM 'select' instruction.
1724///
1725class SelectInst : public Instruction {
1726 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1727 Instruction *InsertBefore)
1728 : Instruction(S1->getType(), Instruction::Select,
1729 &Op<0>(), 3, InsertBefore) {
1730 init(C, S1, S2);
1731 setName(NameStr);
1732 }
1733
1734 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1735 BasicBlock *InsertAtEnd)
1736 : Instruction(S1->getType(), Instruction::Select,
1737 &Op<0>(), 3, InsertAtEnd) {
1738 init(C, S1, S2);
1739 setName(NameStr);
1740 }
1741
1742 void init(Value *C, Value *S1, Value *S2) {
1743 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1743, __PRETTY_FUNCTION__))
;
1744 Op<0>() = C;
1745 Op<1>() = S1;
1746 Op<2>() = S2;
1747 }
1748
1749protected:
1750 // Note: Instruction needs to be a friend here to call cloneImpl.
1751 friend class Instruction;
1752
1753 SelectInst *cloneImpl() const;
1754
1755public:
1756 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1757 const Twine &NameStr = "",
1758 Instruction *InsertBefore = nullptr,
1759 Instruction *MDFrom = nullptr) {
1760 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1761 if (MDFrom)
1762 Sel->copyMetadata(*MDFrom);
1763 return Sel;
1764 }
1765
1766 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1767 const Twine &NameStr,
1768 BasicBlock *InsertAtEnd) {
1769 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1770 }
1771
1772 const Value *getCondition() const { return Op<0>(); }
1773 const Value *getTrueValue() const { return Op<1>(); }
1774 const Value *getFalseValue() const { return Op<2>(); }
1775 Value *getCondition() { return Op<0>(); }
1776 Value *getTrueValue() { return Op<1>(); }
1777 Value *getFalseValue() { return Op<2>(); }
1778
1779 void setCondition(Value *V) { Op<0>() = V; }
1780 void setTrueValue(Value *V) { Op<1>() = V; }
1781 void setFalseValue(Value *V) { Op<2>() = V; }
1782
1783 /// Swap the true and false values of the select instruction.
1784 /// This doesn't swap prof metadata.
1785 void swapValues() { Op<1>().swap(Op<2>()); }
1786
1787 /// Return a string if the specified operands are invalid
1788 /// for a select operation, otherwise return null.
1789 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1790
1791 /// Transparently provide more efficient getOperand methods.
1792 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1793
1794 OtherOps getOpcode() const {
1795 return static_cast<OtherOps>(Instruction::getOpcode());
1796 }
1797
1798 // Methods for support type inquiry through isa, cast, and dyn_cast:
1799 static bool classof(const Instruction *I) {
1800 return I->getOpcode() == Instruction::Select;
1801 }
1802 static bool classof(const Value *V) {
1803 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1804 }
1805};
1806
1807template <>
1808struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1809};
1810
1811DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1811, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1811, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1812
1813//===----------------------------------------------------------------------===//
1814// VAArgInst Class
1815//===----------------------------------------------------------------------===//
1816
1817/// This class represents the va_arg llvm instruction, which returns
1818/// an argument of the specified type given a va_list and increments that list
1819///
1820class VAArgInst : public UnaryInstruction {
1821protected:
1822 // Note: Instruction needs to be a friend here to call cloneImpl.
1823 friend class Instruction;
1824
1825 VAArgInst *cloneImpl() const;
1826
1827public:
1828 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1829 Instruction *InsertBefore = nullptr)
1830 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1831 setName(NameStr);
1832 }
1833
1834 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1835 BasicBlock *InsertAtEnd)
1836 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1837 setName(NameStr);
1838 }
1839
1840 Value *getPointerOperand() { return getOperand(0); }
1841 const Value *getPointerOperand() const { return getOperand(0); }
1842 static unsigned getPointerOperandIndex() { return 0U; }
1843
1844 // Methods for support type inquiry through isa, cast, and dyn_cast:
1845 static bool classof(const Instruction *I) {
1846 return I->getOpcode() == VAArg;
1847 }
1848 static bool classof(const Value *V) {
1849 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1850 }
1851};
1852
1853//===----------------------------------------------------------------------===//
1854// ExtractElementInst Class
1855//===----------------------------------------------------------------------===//
1856
1857/// This instruction extracts a single (scalar)
1858/// element from a VectorType value
1859///
1860class ExtractElementInst : public Instruction {
1861 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1862 Instruction *InsertBefore = nullptr);
1863 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1864 BasicBlock *InsertAtEnd);
1865
1866protected:
1867 // Note: Instruction needs to be a friend here to call cloneImpl.
1868 friend class Instruction;
1869
1870 ExtractElementInst *cloneImpl() const;
1871
1872public:
1873 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1874 const Twine &NameStr = "",
1875 Instruction *InsertBefore = nullptr) {
1876 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1877 }
1878
1879 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1880 const Twine &NameStr,
1881 BasicBlock *InsertAtEnd) {
1882 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1883 }
1884
1885 /// Return true if an extractelement instruction can be
1886 /// formed with the specified operands.
1887 static bool isValidOperands(const Value *Vec, const Value *Idx);
1888
1889 Value *getVectorOperand() { return Op<0>(); }
1890 Value *getIndexOperand() { return Op<1>(); }
1891 const Value *getVectorOperand() const { return Op<0>(); }
1892 const Value *getIndexOperand() const { return Op<1>(); }
1893
1894 VectorType *getVectorOperandType() const {
1895 return cast<VectorType>(getVectorOperand()->getType());
1896 }
1897
1898 /// Transparently provide more efficient getOperand methods.
1899 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1900
1901 // Methods for support type inquiry through isa, cast, and dyn_cast:
1902 static bool classof(const Instruction *I) {
1903 return I->getOpcode() == Instruction::ExtractElement;
1904 }
1905 static bool classof(const Value *V) {
1906 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1907 }
1908};
1909
1910template <>
1911struct OperandTraits<ExtractElementInst> :
1912 public FixedNumOperandTraits<ExtractElementInst, 2> {
1913};
1914
1915DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((i_nocapture < OperandTraits<ExtractElementInst>::
operands(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1915, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst
>::operands(this) && "setOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1915, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ExtractElementInst::getNumOperands() const { return OperandTraits
<ExtractElementInst>::operands(this); } template <int
Idx_nocapture> Use &ExtractElementInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ExtractElementInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1916
1917//===----------------------------------------------------------------------===//
1918// InsertElementInst Class
1919//===----------------------------------------------------------------------===//
1920
1921/// This instruction inserts a single (scalar)
1922/// element into a VectorType value
1923///
1924class InsertElementInst : public Instruction {
1925 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1926 const Twine &NameStr = "",
1927 Instruction *InsertBefore = nullptr);
1928 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1929 BasicBlock *InsertAtEnd);
1930
1931protected:
1932 // Note: Instruction needs to be a friend here to call cloneImpl.
1933 friend class Instruction;
1934
1935 InsertElementInst *cloneImpl() const;
1936
1937public:
1938 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1939 const Twine &NameStr = "",
1940 Instruction *InsertBefore = nullptr) {
1941 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1942 }
1943
1944 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1945 const Twine &NameStr,
1946 BasicBlock *InsertAtEnd) {
1947 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1948 }
1949
1950 /// Return true if an insertelement instruction can be
1951 /// formed with the specified operands.
1952 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1953 const Value *Idx);
1954
1955 /// Overload to return most specific vector type.
1956 ///
1957 VectorType *getType() const {
1958 return cast<VectorType>(Instruction::getType());
1959 }
1960
1961 /// Transparently provide more efficient getOperand methods.
1962 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1963
1964 // Methods for support type inquiry through isa, cast, and dyn_cast:
1965 static bool classof(const Instruction *I) {
1966 return I->getOpcode() == Instruction::InsertElement;
1967 }
1968 static bool classof(const Value *V) {
1969 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1970 }
1971};
1972
1973template <>
1974struct OperandTraits<InsertElementInst> :
1975 public FixedNumOperandTraits<InsertElementInst, 3> {
1976};
1977
1978DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1978, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<InsertElementInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 1978, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertElementInst::getNumOperands() const { return OperandTraits
<InsertElementInst>::operands(this); } template <int
Idx_nocapture> Use &InsertElementInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &InsertElementInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1979
1980//===----------------------------------------------------------------------===//
1981// ShuffleVectorInst Class
1982//===----------------------------------------------------------------------===//
1983
1984constexpr int UndefMaskElem = -1;
1985
1986/// This instruction constructs a fixed permutation of two
1987/// input vectors.
1988///
1989/// For each element of the result vector, the shuffle mask selects an element
1990/// from one of the input vectors to copy to the result. Non-negative elements
1991/// in the mask represent an index into the concatenated pair of input vectors.
1992/// UndefMaskElem (-1) specifies that the result element is undefined.
1993///
1994/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1995/// requirement may be relaxed in the future.
1996class ShuffleVectorInst : public Instruction {
1997 SmallVector<int, 4> ShuffleMask;
1998 Constant *ShuffleMaskForBitcode;
1999
2000protected:
2001 // Note: Instruction needs to be a friend here to call cloneImpl.
2002 friend class Instruction;
2003
2004 ShuffleVectorInst *cloneImpl() const;
2005
2006public:
2007 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2008 const Twine &NameStr = "",
2009 Instruction *InsertBefor = nullptr);
2010 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2011 const Twine &NameStr, BasicBlock *InsertAtEnd);
2012 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2013 const Twine &NameStr = "",
2014 Instruction *InsertBefor = nullptr);
2015 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2016 const Twine &NameStr, BasicBlock *InsertAtEnd);
2017
2018 void *operator new(size_t s) { return User::operator new(s, 2); }
2019
2020 /// Swap the operands and adjust the mask to preserve the semantics
2021 /// of the instruction.
2022 void commute();
2023
2024 /// Return true if a shufflevector instruction can be
2025 /// formed with the specified operands.
2026 static bool isValidOperands(const Value *V1, const Value *V2,
2027 const Value *Mask);
2028 static bool isValidOperands(const Value *V1, const Value *V2,
2029 ArrayRef<int> Mask);
2030
2031 /// Overload to return most specific vector type.
2032 ///
2033 VectorType *getType() const {
2034 return cast<VectorType>(Instruction::getType());
2035 }
2036
2037 /// Transparently provide more efficient getOperand methods.
2038 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2039
2040 /// Return the shuffle mask value of this instruction for the given element
2041 /// index. Return UndefMaskElem if the element is undef.
2042 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2043
2044 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2045 /// elements of the mask are returned as UndefMaskElem.
2046 static void getShuffleMask(const Constant *Mask,
2047 SmallVectorImpl<int> &Result);
2048
2049 /// Return the mask for this instruction as a vector of integers. Undefined
2050 /// elements of the mask are returned as UndefMaskElem.
2051 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2052 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2053 }
2054
2055 /// Return the mask for this instruction, for use in bitcode.
2056 ///
2057 /// TODO: This is temporary until we decide a new bitcode encoding for
2058 /// shufflevector.
2059 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2060
2061 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2062 Type *ResultTy);
2063
2064 void setShuffleMask(ArrayRef<int> Mask);
2065
2066 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2067
2068 /// Return true if this shuffle returns a vector with a different number of
2069 /// elements than its source vectors.
2070 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2071 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2072 bool changesLength() const {
2073 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2074 ->getElementCount()
2075 .getKnownMinValue();
2076 unsigned NumMaskElts = ShuffleMask.size();
2077 return NumSourceElts != NumMaskElts;
2078 }
2079
2080 /// Return true if this shuffle returns a vector with a greater number of
2081 /// elements than its source vectors.
2082 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2083 bool increasesLength() const {
2084 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2085 ->getElementCount()
2086 .getKnownMinValue();
2087 unsigned NumMaskElts = ShuffleMask.size();
2088 return NumSourceElts < NumMaskElts;
2089 }
2090
2091 /// Return true if this shuffle mask chooses elements from exactly one source
2092 /// vector.
2093 /// Example: <7,5,undef,7>
2094 /// This assumes that vector operands are the same length as the mask.
2095 static bool isSingleSourceMask(ArrayRef<int> Mask);
2096 static bool isSingleSourceMask(const Constant *Mask) {
2097 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2097, __PRETTY_FUNCTION__))
;
2098 SmallVector<int, 16> MaskAsInts;
2099 getShuffleMask(Mask, MaskAsInts);
2100 return isSingleSourceMask(MaskAsInts);
2101 }
2102
2103 /// Return true if this shuffle chooses elements from exactly one source
2104 /// vector without changing the length of that vector.
2105 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2106 /// TODO: Optionally allow length-changing shuffles.
2107 bool isSingleSource() const {
2108 return !changesLength() && isSingleSourceMask(ShuffleMask);
2109 }
2110
2111 /// Return true if this shuffle mask chooses elements from exactly one source
2112 /// vector without lane crossings. A shuffle using this mask is not
2113 /// necessarily a no-op because it may change the number of elements from its
2114 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2115 /// Example: <undef,undef,2,3>
2116 static bool isIdentityMask(ArrayRef<int> Mask);
2117 static bool isIdentityMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2118, __PRETTY_FUNCTION__))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isIdentityMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without lane crossings and does not change the number of elements
2126 /// from its input vectors.
2127 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2128 bool isIdentity() const {
2129 return !changesLength() && isIdentityMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle lengthens exactly one source vector with
2133 /// undefs in the high elements.
2134 bool isIdentityWithPadding() const;
2135
2136 /// Return true if this shuffle extracts the first N elements of exactly one
2137 /// source vector.
2138 bool isIdentityWithExtract() const;
2139
2140 /// Return true if this shuffle concatenates its 2 source vectors. This
2141 /// returns false if either input is undefined. In that case, the shuffle is
2142 /// is better classified as an identity with padding operation.
2143 bool isConcat() const;
2144
2145 /// Return true if this shuffle mask chooses elements from its source vectors
2146 /// without lane crossings. A shuffle using this mask would be
2147 /// equivalent to a vector select with a constant condition operand.
2148 /// Example: <4,1,6,undef>
2149 /// This returns false if the mask does not choose from both input vectors.
2150 /// In that case, the shuffle is better classified as an identity shuffle.
2151 /// This assumes that vector operands are the same length as the mask
2152 /// (a length-changing shuffle can never be equivalent to a vector select).
2153 static bool isSelectMask(ArrayRef<int> Mask);
2154 static bool isSelectMask(const Constant *Mask) {
2155 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2155, __PRETTY_FUNCTION__))
;
2156 SmallVector<int, 16> MaskAsInts;
2157 getShuffleMask(Mask, MaskAsInts);
2158 return isSelectMask(MaskAsInts);
2159 }
2160
2161 /// Return true if this shuffle chooses elements from its source vectors
2162 /// without lane crossings and all operands have the same number of elements.
2163 /// In other words, this shuffle is equivalent to a vector select with a
2164 /// constant condition operand.
2165 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2166 /// This returns false if the mask does not choose from both input vectors.
2167 /// In that case, the shuffle is better classified as an identity shuffle.
2168 /// TODO: Optionally allow length-changing shuffles.
2169 bool isSelect() const {
2170 return !changesLength() && isSelectMask(ShuffleMask);
2171 }
2172
2173 /// Return true if this shuffle mask swaps the order of elements from exactly
2174 /// one source vector.
2175 /// Example: <7,6,undef,4>
2176 /// This assumes that vector operands are the same length as the mask.
2177 static bool isReverseMask(ArrayRef<int> Mask);
2178 static bool isReverseMask(const Constant *Mask) {
2179 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2179, __PRETTY_FUNCTION__))
;
2180 SmallVector<int, 16> MaskAsInts;
2181 getShuffleMask(Mask, MaskAsInts);
2182 return isReverseMask(MaskAsInts);
2183 }
2184
2185 /// Return true if this shuffle swaps the order of elements from exactly
2186 /// one source vector.
2187 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2188 /// TODO: Optionally allow length-changing shuffles.
2189 bool isReverse() const {
2190 return !changesLength() && isReverseMask(ShuffleMask);
2191 }
2192
2193 /// Return true if this shuffle mask chooses all elements with the same value
2194 /// as the first element of exactly one source vector.
2195 /// Example: <4,undef,undef,4>
2196 /// This assumes that vector operands are the same length as the mask.
2197 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2198 static bool isZeroEltSplatMask(const Constant *Mask) {
2199 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2199, __PRETTY_FUNCTION__))
;
2200 SmallVector<int, 16> MaskAsInts;
2201 getShuffleMask(Mask, MaskAsInts);
2202 return isZeroEltSplatMask(MaskAsInts);
2203 }
2204
2205 /// Return true if all elements of this shuffle are the same value as the
2206 /// first element of exactly one source vector without changing the length
2207 /// of that vector.
2208 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2209 /// TODO: Optionally allow length-changing shuffles.
2210 /// TODO: Optionally allow splats from other elements.
2211 bool isZeroEltSplat() const {
2212 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2213 }
2214
2215 /// Return true if this shuffle mask is a transpose mask.
2216 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2217 /// even- or odd-numbered vector elements from two n-dimensional source
2218 /// vectors and write each result into consecutive elements of an
2219 /// n-dimensional destination vector. Two shuffles are necessary to complete
2220 /// the transpose, one for the even elements and another for the odd elements.
2221 /// This description closely follows how the TRN1 and TRN2 AArch64
2222 /// instructions operate.
2223 ///
2224 /// For example, a simple 2x2 matrix can be transposed with:
2225 ///
2226 /// ; Original matrix
2227 /// m0 = < a, b >
2228 /// m1 = < c, d >
2229 ///
2230 /// ; Transposed matrix
2231 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2232 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2233 ///
2234 /// For matrices having greater than n columns, the resulting nx2 transposed
2235 /// matrix is stored in two result vectors such that one vector contains
2236 /// interleaved elements from all the even-numbered rows and the other vector
2237 /// contains interleaved elements from all the odd-numbered rows. For example,
2238 /// a 2x4 matrix can be transposed with:
2239 ///
2240 /// ; Original matrix
2241 /// m0 = < a, b, c, d >
2242 /// m1 = < e, f, g, h >
2243 ///
2244 /// ; Transposed matrix
2245 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2246 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2247 static bool isTransposeMask(ArrayRef<int> Mask);
2248 static bool isTransposeMask(const Constant *Mask) {
2249 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2249, __PRETTY_FUNCTION__))
;
2250 SmallVector<int, 16> MaskAsInts;
2251 getShuffleMask(Mask, MaskAsInts);
2252 return isTransposeMask(MaskAsInts);
2253 }
2254
2255 /// Return true if this shuffle transposes the elements of its inputs without
2256 /// changing the length of the vectors. This operation may also be known as a
2257 /// merge or interleave. See the description for isTransposeMask() for the
2258 /// exact specification.
2259 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2260 bool isTranspose() const {
2261 return !changesLength() && isTransposeMask(ShuffleMask);
2262 }
2263
2264 /// Return true if this shuffle mask is an extract subvector mask.
2265 /// A valid extract subvector mask returns a smaller vector from a single
2266 /// source operand. The base extraction index is returned as well.
2267 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2268 int &Index);
2269 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2270 int &Index) {
2271 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2271, __PRETTY_FUNCTION__))
;
2272 // Not possible to express a shuffle mask for a scalable vector for this
2273 // case.
2274 if (isa<ScalableVectorType>(Mask->getType()))
2275 return false;
2276 SmallVector<int, 16> MaskAsInts;
2277 getShuffleMask(Mask, MaskAsInts);
2278 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2279 }
2280
2281 /// Return true if this shuffle mask is an extract subvector mask.
2282 bool isExtractSubvectorMask(int &Index) const {
2283 // Not possible to express a shuffle mask for a scalable vector for this
2284 // case.
2285 if (isa<ScalableVectorType>(getType()))
2286 return false;
2287
2288 int NumSrcElts =
2289 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2290 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2291 }
2292
2293 /// Change values in a shuffle permute mask assuming the two vector operands
2294 /// of length InVecNumElts have swapped position.
2295 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2296 unsigned InVecNumElts) {
2297 for (int &Idx : Mask) {
2298 if (Idx == -1)
2299 continue;
2300 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2301 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2302, __PRETTY_FUNCTION__))
2302 "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2302, __PRETTY_FUNCTION__))
;
2303 }
2304 }
2305
2306 // Methods for support type inquiry through isa, cast, and dyn_cast:
2307 static bool classof(const Instruction *I) {
2308 return I->getOpcode() == Instruction::ShuffleVector;
2309 }
2310 static bool classof(const Value *V) {
2311 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2312 }
2313};
2314
2315template <>
2316struct OperandTraits<ShuffleVectorInst>
2317 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2318
2319DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2319, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<ShuffleVectorInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2319, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ShuffleVectorInst::getNumOperands() const { return OperandTraits
<ShuffleVectorInst>::operands(this); } template <int
Idx_nocapture> Use &ShuffleVectorInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &ShuffleVectorInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2320
2321//===----------------------------------------------------------------------===//
2322// ExtractValueInst Class
2323//===----------------------------------------------------------------------===//
2324
2325/// This instruction extracts a struct member or array
2326/// element value from an aggregate value.
2327///
2328class ExtractValueInst : public UnaryInstruction {
2329 SmallVector<unsigned, 4> Indices;
2330
2331 ExtractValueInst(const ExtractValueInst &EVI);
2332
2333 /// Constructors - Create a extractvalue instruction with a base aggregate
2334 /// value and a list of indices. The first ctor can optionally insert before
2335 /// an existing instruction, the second appends the new instruction to the
2336 /// specified BasicBlock.
2337 inline ExtractValueInst(Value *Agg,
2338 ArrayRef<unsigned> Idxs,
2339 const Twine &NameStr,
2340 Instruction *InsertBefore);
2341 inline ExtractValueInst(Value *Agg,
2342 ArrayRef<unsigned> Idxs,
2343 const Twine &NameStr, BasicBlock *InsertAtEnd);
2344
2345 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2346
2347protected:
2348 // Note: Instruction needs to be a friend here to call cloneImpl.
2349 friend class Instruction;
2350
2351 ExtractValueInst *cloneImpl() const;
2352
2353public:
2354 static ExtractValueInst *Create(Value *Agg,
2355 ArrayRef<unsigned> Idxs,
2356 const Twine &NameStr = "",
2357 Instruction *InsertBefore = nullptr) {
2358 return new
2359 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2360 }
2361
2362 static ExtractValueInst *Create(Value *Agg,
2363 ArrayRef<unsigned> Idxs,
2364 const Twine &NameStr,
2365 BasicBlock *InsertAtEnd) {
2366 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2367 }
2368
2369 /// Returns the type of the element that would be extracted
2370 /// with an extractvalue instruction with the specified parameters.
2371 ///
2372 /// Null is returned if the indices are invalid for the specified type.
2373 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2374
2375 using idx_iterator = const unsigned*;
2376
2377 inline idx_iterator idx_begin() const { return Indices.begin(); }
2378 inline idx_iterator idx_end() const { return Indices.end(); }
2379 inline iterator_range<idx_iterator> indices() const {
2380 return make_range(idx_begin(), idx_end());
2381 }
2382
2383 Value *getAggregateOperand() {
2384 return getOperand(0);
2385 }
2386 const Value *getAggregateOperand() const {
2387 return getOperand(0);
2388 }
2389 static unsigned getAggregateOperandIndex() {
2390 return 0U; // get index for modifying correct operand
2391 }
2392
2393 ArrayRef<unsigned> getIndices() const {
2394 return Indices;
2395 }
2396
2397 unsigned getNumIndices() const {
2398 return (unsigned)Indices.size();
2399 }
2400
2401 bool hasIndices() const {
2402 return true;
2403 }
2404
2405 // Methods for support type inquiry through isa, cast, and dyn_cast:
2406 static bool classof(const Instruction *I) {
2407 return I->getOpcode() == Instruction::ExtractValue;
2408 }
2409 static bool classof(const Value *V) {
2410 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2411 }
2412};
2413
2414ExtractValueInst::ExtractValueInst(Value *Agg,
2415 ArrayRef<unsigned> Idxs,
2416 const Twine &NameStr,
2417 Instruction *InsertBefore)
2418 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2419 ExtractValue, Agg, InsertBefore) {
2420 init(Idxs, NameStr);
2421}
2422
2423ExtractValueInst::ExtractValueInst(Value *Agg,
2424 ArrayRef<unsigned> Idxs,
2425 const Twine &NameStr,
2426 BasicBlock *InsertAtEnd)
2427 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2428 ExtractValue, Agg, InsertAtEnd) {
2429 init(Idxs, NameStr);
2430}
2431
2432//===----------------------------------------------------------------------===//
2433// InsertValueInst Class
2434//===----------------------------------------------------------------------===//
2435
2436/// This instruction inserts a struct field of array element
2437/// value into an aggregate value.
2438///
2439class InsertValueInst : public Instruction {
2440 SmallVector<unsigned, 4> Indices;
2441
2442 InsertValueInst(const InsertValueInst &IVI);
2443
2444 /// Constructors - Create a insertvalue instruction with a base aggregate
2445 /// value, a value to insert, and a list of indices. The first ctor can
2446 /// optionally insert before an existing instruction, the second appends
2447 /// the new instruction to the specified BasicBlock.
2448 inline InsertValueInst(Value *Agg, Value *Val,
2449 ArrayRef<unsigned> Idxs,
2450 const Twine &NameStr,
2451 Instruction *InsertBefore);
2452 inline InsertValueInst(Value *Agg, Value *Val,
2453 ArrayRef<unsigned> Idxs,
2454 const Twine &NameStr, BasicBlock *InsertAtEnd);
2455
2456 /// Constructors - These two constructors are convenience methods because one
2457 /// and two index insertvalue instructions are so common.
2458 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2459 const Twine &NameStr = "",
2460 Instruction *InsertBefore = nullptr);
2461 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2462 BasicBlock *InsertAtEnd);
2463
2464 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2465 const Twine &NameStr);
2466
2467protected:
2468 // Note: Instruction needs to be a friend here to call cloneImpl.
2469 friend class Instruction;
2470
2471 InsertValueInst *cloneImpl() const;
2472
2473public:
2474 // allocate space for exactly two operands
2475 void *operator new(size_t s) {
2476 return User::operator new(s, 2);
2477 }
2478
2479 static InsertValueInst *Create(Value *Agg, Value *Val,
2480 ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr = "",
2482 Instruction *InsertBefore = nullptr) {
2483 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2484 }
2485
2486 static InsertValueInst *Create(Value *Agg, Value *Val,
2487 ArrayRef<unsigned> Idxs,
2488 const Twine &NameStr,
2489 BasicBlock *InsertAtEnd) {
2490 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2491 }
2492
2493 /// Transparently provide more efficient getOperand methods.
2494 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2495
2496 using idx_iterator = const unsigned*;
2497
2498 inline idx_iterator idx_begin() const { return Indices.begin(); }
2499 inline idx_iterator idx_end() const { return Indices.end(); }
2500 inline iterator_range<idx_iterator> indices() const {
2501 return make_range(idx_begin(), idx_end());
2502 }
2503
2504 Value *getAggregateOperand() {
2505 return getOperand(0);
2506 }
2507 const Value *getAggregateOperand() const {
2508 return getOperand(0);
2509 }
2510 static unsigned getAggregateOperandIndex() {
2511 return 0U; // get index for modifying correct operand
2512 }
2513
2514 Value *getInsertedValueOperand() {
2515 return getOperand(1);
2516 }
2517 const Value *getInsertedValueOperand() const {
2518 return getOperand(1);
2519 }
2520 static unsigned getInsertedValueOperandIndex() {
2521 return 1U; // get index for modifying correct operand
2522 }
2523
2524 ArrayRef<unsigned> getIndices() const {
2525 return Indices;
2526 }
2527
2528 unsigned getNumIndices() const {
2529 return (unsigned)Indices.size();
2530 }
2531
2532 bool hasIndices() const {
2533 return true;
2534 }
2535
2536 // Methods for support type inquiry through isa, cast, and dyn_cast:
2537 static bool classof(const Instruction *I) {
2538 return I->getOpcode() == Instruction::InsertValue;
2539 }
2540 static bool classof(const Value *V) {
2541 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2542 }
2543};
2544
2545template <>
2546struct OperandTraits<InsertValueInst> :
2547 public FixedNumOperandTraits<InsertValueInst, 2> {
2548};
2549
2550InsertValueInst::InsertValueInst(Value *Agg,
2551 Value *Val,
2552 ArrayRef<unsigned> Idxs,
2553 const Twine &NameStr,
2554 Instruction *InsertBefore)
2555 : Instruction(Agg->getType(), InsertValue,
2556 OperandTraits<InsertValueInst>::op_begin(this),
2557 2, InsertBefore) {
2558 init(Agg, Val, Idxs, NameStr);
2559}
2560
2561InsertValueInst::InsertValueInst(Value *Agg,
2562 Value *Val,
2563 ArrayRef<unsigned> Idxs,
2564 const Twine &NameStr,
2565 BasicBlock *InsertAtEnd)
2566 : Instruction(Agg->getType(), InsertValue,
2567 OperandTraits<InsertValueInst>::op_begin(this),
2568 2, InsertAtEnd) {
2569 init(Agg, Val, Idxs, NameStr);
2570}
2571
2572DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2572, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<InsertValueInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2572, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertValueInst::getNumOperands() const { return OperandTraits
<InsertValueInst>::operands(this); } template <int Idx_nocapture
> Use &InsertValueInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &InsertValueInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2573
2574//===----------------------------------------------------------------------===//
2575// PHINode Class
2576//===----------------------------------------------------------------------===//
2577
2578// PHINode - The PHINode class is used to represent the magical mystical PHI
2579// node, that can not exist in nature, but can be synthesized in a computer
2580// scientist's overactive imagination.
2581//
2582class PHINode : public Instruction {
2583 /// The number of operands actually allocated. NumOperands is
2584 /// the number actually in use.
2585 unsigned ReservedSpace;
2586
2587 PHINode(const PHINode &PN);
2588
2589 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2590 const Twine &NameStr = "",
2591 Instruction *InsertBefore = nullptr)
2592 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2593 ReservedSpace(NumReservedValues) {
2594 setName(NameStr);
2595 allocHungoffUses(ReservedSpace);
2596 }
2597
2598 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2599 BasicBlock *InsertAtEnd)
2600 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2601 ReservedSpace(NumReservedValues) {
2602 setName(NameStr);
2603 allocHungoffUses(ReservedSpace);
2604 }
2605
2606protected:
2607 // Note: Instruction needs to be a friend here to call cloneImpl.
2608 friend class Instruction;
2609
2610 PHINode *cloneImpl() const;
2611
2612 // allocHungoffUses - this is more complicated than the generic
2613 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2614 // values and pointers to the incoming blocks, all in one allocation.
2615 void allocHungoffUses(unsigned N) {
2616 User::allocHungoffUses(N, /* IsPhi */ true);
2617 }
2618
2619public:
2620 /// Constructors - NumReservedValues is a hint for the number of incoming
2621 /// edges that this phi node will have (use 0 if you really have no idea).
2622 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2623 const Twine &NameStr = "",
2624 Instruction *InsertBefore = nullptr) {
2625 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2626 }
2627
2628 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2629 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2630 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2631 }
2632
2633 /// Provide fast operand accessors
2634 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2635
2636 // Block iterator interface. This provides access to the list of incoming
2637 // basic blocks, which parallels the list of incoming values.
2638
2639 using block_iterator = BasicBlock **;
2640 using const_block_iterator = BasicBlock * const *;
2641
2642 block_iterator block_begin() {
2643 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2644 }
2645
2646 const_block_iterator block_begin() const {
2647 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2648 }
2649
2650 block_iterator block_end() {
2651 return block_begin() + getNumOperands();
2652 }
2653
2654 const_block_iterator block_end() const {
2655 return block_begin() + getNumOperands();
2656 }
2657
2658 iterator_range<block_iterator> blocks() {
2659 return make_range(block_begin(), block_end());
2660 }
2661
2662 iterator_range<const_block_iterator> blocks() const {
2663 return make_range(block_begin(), block_end());
2664 }
2665
2666 op_range incoming_values() { return operands(); }
2667
2668 const_op_range incoming_values() const { return operands(); }
2669
2670 /// Return the number of incoming edges
2671 ///
2672 unsigned getNumIncomingValues() const { return getNumOperands(); }
2673
2674 /// Return incoming value number x
2675 ///
2676 Value *getIncomingValue(unsigned i) const {
2677 return getOperand(i);
2678 }
2679 void setIncomingValue(unsigned i, Value *V) {
2680 assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast<
void> (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2680, __PRETTY_FUNCTION__))
;
2681 assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2682, __PRETTY_FUNCTION__))
2682 "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2682, __PRETTY_FUNCTION__))
;
2683 setOperand(i, V);
2684 }
2685
2686 static unsigned getOperandNumForIncomingValue(unsigned i) {
2687 return i;
2688 }
2689
2690 static unsigned getIncomingValueNumForOperand(unsigned i) {
2691 return i;
2692 }
2693
2694 /// Return incoming basic block number @p i.
2695 ///
2696 BasicBlock *getIncomingBlock(unsigned i) const {
2697 return block_begin()[i];
2698 }
2699
2700 /// Return incoming basic block corresponding
2701 /// to an operand of the PHI.
2702 ///
2703 BasicBlock *getIncomingBlock(const Use &U) const {
2704 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2704, __PRETTY_FUNCTION__))
;
2705 return getIncomingBlock(unsigned(&U - op_begin()));
2706 }
2707
2708 /// Return incoming basic block corresponding
2709 /// to value use iterator.
2710 ///
2711 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2712 return getIncomingBlock(I.getUse());
2713 }
2714
2715 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2716 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2716, __PRETTY_FUNCTION__))
;
2717 block_begin()[i] = BB;
2718 }
2719
2720 /// Replace every incoming basic block \p Old to basic block \p New.
2721 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2722 assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!"
) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2722, __PRETTY_FUNCTION__))
;
2723 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2724 if (getIncomingBlock(Op) == Old)
2725 setIncomingBlock(Op, New);
2726 }
2727
2728 /// Add an incoming value to the end of the PHI list
2729 ///
2730 void addIncoming(Value *V, BasicBlock *BB) {
2731 if (getNumOperands() == ReservedSpace)
2732 growOperands(); // Get more space!
2733 // Initialize some new operands.
2734 setNumHungOffUseOperands(getNumOperands() + 1);
2735 setIncomingValue(getNumOperands() - 1, V);
2736 setIncomingBlock(getNumOperands() - 1, BB);
2737 }
2738
2739 /// Remove an incoming value. This is useful if a
2740 /// predecessor basic block is deleted. The value removed is returned.
2741 ///
2742 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2743 /// is true), the PHI node is destroyed and any uses of it are replaced with
2744 /// dummy values. The only time there should be zero incoming values to a PHI
2745 /// node is when the block is dead, so this strategy is sound.
2746 ///
2747 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2748
2749 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2750 int Idx = getBasicBlockIndex(BB);
2751 assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2751, __PRETTY_FUNCTION__))
;
2752 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2753 }
2754
2755 /// Return the first index of the specified basic
2756 /// block in the value list for this PHI. Returns -1 if no instance.
2757 ///
2758 int getBasicBlockIndex(const BasicBlock *BB) const {
2759 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2760 if (block_begin()[i] == BB)
2761 return i;
2762 return -1;
2763 }
2764
2765 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2766 int Idx = getBasicBlockIndex(BB);
2767 assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast
<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2767, __PRETTY_FUNCTION__))
;
2768 return getIncomingValue(Idx);
2769 }
2770
2771 /// Set every incoming value(s) for block \p BB to \p V.
2772 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2773 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2773, __PRETTY_FUNCTION__))
;
2774 bool Found = false;
2775 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2776 if (getIncomingBlock(Op) == BB) {
2777 Found = true;
2778 setIncomingValue(Op, V);
2779 }
2780 (void)Found;
2781 assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast
<void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2781, __PRETTY_FUNCTION__))
;
2782 }
2783
2784 /// If the specified PHI node always merges together the
2785 /// same value, return the value, otherwise return null.
2786 Value *hasConstantValue() const;
2787
2788 /// Whether the specified PHI node always merges
2789 /// together the same value, assuming undefs are equal to a unique
2790 /// non-undef value.
2791 bool hasConstantOrUndefValue() const;
2792
2793 /// If the PHI node is complete which means all of its parent's predecessors
2794 /// have incoming value in this PHI, return true, otherwise return false.
2795 bool isComplete() const {
2796 return llvm::all_of(predecessors(getParent()),
2797 [this](const BasicBlock *Pred) {
2798 return getBasicBlockIndex(Pred) >= 0;
2799 });
2800 }
2801
2802 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2803 static bool classof(const Instruction *I) {
2804 return I->getOpcode() == Instruction::PHI;
2805 }
2806 static bool classof(const Value *V) {
2807 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2808 }
2809
2810private:
2811 void growOperands();
2812};
2813
2814template <>
2815struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2816};
2817
2818DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<PHINode>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2818, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<PHINode>::op_begin(const_cast<PHINode
*>(this))[i_nocapture].get()); } void PHINode::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2818, __PRETTY_FUNCTION__)); OperandTraits<PHINode>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode
::getNumOperands() const { return OperandTraits<PHINode>
::operands(this); } template <int Idx_nocapture> Use &
PHINode::Op() { return this->OpFrom<Idx_nocapture>(this
); } template <int Idx_nocapture> const Use &PHINode
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2819
2820//===----------------------------------------------------------------------===//
2821// LandingPadInst Class
2822//===----------------------------------------------------------------------===//
2823
2824//===---------------------------------------------------------------------------
2825/// The landingpad instruction holds all of the information
2826/// necessary to generate correct exception handling. The landingpad instruction
2827/// cannot be moved from the top of a landing pad block, which itself is
2828/// accessible only from the 'unwind' edge of an invoke. This uses the
2829/// SubclassData field in Value to store whether or not the landingpad is a
2830/// cleanup.
2831///
2832class LandingPadInst : public Instruction {
2833 using CleanupField = BoolBitfieldElementT<0>;
2834
2835 /// The number of operands actually allocated. NumOperands is
2836 /// the number actually in use.
2837 unsigned ReservedSpace;
2838
2839 LandingPadInst(const LandingPadInst &LP);
2840
2841public:
2842 enum ClauseType { Catch, Filter };
2843
2844private:
2845 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2846 const Twine &NameStr, Instruction *InsertBefore);
2847 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2848 const Twine &NameStr, BasicBlock *InsertAtEnd);
2849
2850 // Allocate space for exactly zero operands.
2851 void *operator new(size_t s) {
2852 return User::operator new(s);
2853 }
2854
2855 void growOperands(unsigned Size);
2856 void init(unsigned NumReservedValues, const Twine &NameStr);
2857
2858protected:
2859 // Note: Instruction needs to be a friend here to call cloneImpl.
2860 friend class Instruction;
2861
2862 LandingPadInst *cloneImpl() const;
2863
2864public:
2865 /// Constructors - NumReservedClauses is a hint for the number of incoming
2866 /// clauses that this landingpad will have (use 0 if you really have no idea).
2867 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2868 const Twine &NameStr = "",
2869 Instruction *InsertBefore = nullptr);
2870 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2871 const Twine &NameStr, BasicBlock *InsertAtEnd);
2872
2873 /// Provide fast operand accessors
2874 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2875
2876 /// Return 'true' if this landingpad instruction is a
2877 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2878 /// doesn't catch the exception.
2879 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2880
2881 /// Indicate that this landingpad instruction is a cleanup.
2882 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2883
2884 /// Add a catch or filter clause to the landing pad.
2885 void addClause(Constant *ClauseVal);
2886
2887 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2888 /// determine what type of clause this is.
2889 Constant *getClause(unsigned Idx) const {
2890 return cast<Constant>(getOperandList()[Idx]);
2891 }
2892
2893 /// Return 'true' if the clause and index Idx is a catch clause.
2894 bool isCatch(unsigned Idx) const {
2895 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2896 }
2897
2898 /// Return 'true' if the clause and index Idx is a filter clause.
2899 bool isFilter(unsigned Idx) const {
2900 return isa<ArrayType>(getOperandList()[Idx]->getType());
2901 }
2902
2903 /// Get the number of clauses for this landing pad.
2904 unsigned getNumClauses() const { return getNumOperands(); }
2905
2906 /// Grow the size of the operand list to accommodate the new
2907 /// number of clauses.
2908 void reserveClauses(unsigned Size) { growOperands(Size); }
2909
2910 // Methods for support type inquiry through isa, cast, and dyn_cast:
2911 static bool classof(const Instruction *I) {
2912 return I->getOpcode() == Instruction::LandingPad;
2913 }
2914 static bool classof(const Value *V) {
2915 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2916 }
2917};
2918
2919template <>
2920struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2921};
2922
2923DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2923, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2923, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2924
2925//===----------------------------------------------------------------------===//
2926// ReturnInst Class
2927//===----------------------------------------------------------------------===//
2928
2929//===---------------------------------------------------------------------------
2930/// Return a value (possibly void), from a function. Execution
2931/// does not continue in this function any longer.
2932///
2933class ReturnInst : public Instruction {
2934 ReturnInst(const ReturnInst &RI);
2935
2936private:
2937 // ReturnInst constructors:
2938 // ReturnInst() - 'ret void' instruction
2939 // ReturnInst( null) - 'ret void' instruction
2940 // ReturnInst(Value* X) - 'ret X' instruction
2941 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2942 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2943 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2944 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2945 //
2946 // NOTE: If the Value* passed is of type void then the constructor behaves as
2947 // if it was passed NULL.
2948 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2949 Instruction *InsertBefore = nullptr);
2950 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2951 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2952
2953protected:
2954 // Note: Instruction needs to be a friend here to call cloneImpl.
2955 friend class Instruction;
2956
2957 ReturnInst *cloneImpl() const;
2958
2959public:
2960 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2961 Instruction *InsertBefore = nullptr) {
2962 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2963 }
2964
2965 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2966 BasicBlock *InsertAtEnd) {
2967 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2968 }
2969
2970 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2971 return new(0) ReturnInst(C, InsertAtEnd);
2972 }
2973
2974 /// Provide fast operand accessors
2975 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2976
2977 /// Convenience accessor. Returns null if there is no return value.
2978 Value *getReturnValue() const {
2979 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2980 }
2981
2982 unsigned getNumSuccessors() const { return 0; }
2983
2984 // Methods for support type inquiry through isa, cast, and dyn_cast:
2985 static bool classof(const Instruction *I) {
2986 return (I->getOpcode() == Instruction::Ret);
2987 }
2988 static bool classof(const Value *V) {
2989 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2990 }
2991
2992private:
2993 BasicBlock *getSuccessor(unsigned idx) const {
2994 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2994)
;
2995 }
2996
2997 void setSuccessor(unsigned idx, BasicBlock *B) {
2998 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 2998)
;
2999 }
3000};
3001
3002template <>
3003struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3004};
3005
3006DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3006, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst
*>(this))[i_nocapture].get()); } void ReturnInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3006, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst
::getNumOperands() const { return OperandTraits<ReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3007
3008//===----------------------------------------------------------------------===//
3009// BranchInst Class
3010//===----------------------------------------------------------------------===//
3011
3012//===---------------------------------------------------------------------------
3013/// Conditional or Unconditional Branch instruction.
3014///
3015class BranchInst : public Instruction {
3016 /// Ops list - Branches are strange. The operands are ordered:
3017 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3018 /// they don't have to check for cond/uncond branchness. These are mostly
3019 /// accessed relative from op_end().
3020 BranchInst(const BranchInst &BI);
3021 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3022 // BranchInst(BB *B) - 'br B'
3023 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3024 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3025 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3026 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3027 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3028 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3029 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3030 Instruction *InsertBefore = nullptr);
3031 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3032 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3033 BasicBlock *InsertAtEnd);
3034
3035 void AssertOK();
3036
3037protected:
3038 // Note: Instruction needs to be a friend here to call cloneImpl.
3039 friend class Instruction;
3040
3041 BranchInst *cloneImpl() const;
3042
3043public:
3044 /// Iterator type that casts an operand to a basic block.
3045 ///
3046 /// This only makes sense because the successors are stored as adjacent
3047 /// operands for branch instructions.
3048 struct succ_op_iterator
3049 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3050 std::random_access_iterator_tag, BasicBlock *,
3051 ptrdiff_t, BasicBlock *, BasicBlock *> {
3052 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3053
3054 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3055 BasicBlock *operator->() const { return operator*(); }
3056 };
3057
3058 /// The const version of `succ_op_iterator`.
3059 struct const_succ_op_iterator
3060 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3061 std::random_access_iterator_tag,
3062 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3063 const BasicBlock *> {
3064 explicit const_succ_op_iterator(const_value_op_iterator I)
3065 : iterator_adaptor_base(I) {}
3066
3067 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3068 const BasicBlock *operator->() const { return operator*(); }
3069 };
3070
3071 static BranchInst *Create(BasicBlock *IfTrue,
3072 Instruction *InsertBefore = nullptr) {
3073 return new(1) BranchInst(IfTrue, InsertBefore);
3074 }
3075
3076 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3077 Value *Cond, Instruction *InsertBefore = nullptr) {
3078 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3079 }
3080
3081 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3082 return new(1) BranchInst(IfTrue, InsertAtEnd);
3083 }
3084
3085 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3086 Value *Cond, BasicBlock *InsertAtEnd) {
3087 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3088 }
3089
3090 /// Transparently provide more efficient getOperand methods.
3091 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3092
3093 bool isUnconditional() const { return getNumOperands() == 1; }
43
Assuming the condition is false
44
Returning zero, which participates in a condition later
3094 bool isConditional() const { return getNumOperands() == 3; }
4
Assuming the condition is true
5
Returning the value 1, which participates in a condition later
47
Returning the value 1, which participates in a condition later
55
Returning the value 1, which participates in a condition later
58
Returning the value 1, which participates in a condition later
71
Returning the value 1, which participates in a condition later
3095
3096 Value *getCondition() const {
3097 assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3097, __PRETTY_FUNCTION__))
;
3098 return Op<-3>();
3099 }
3100
3101 void setCondition(Value *V) {
3102 assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3102, __PRETTY_FUNCTION__))
;
3103 Op<-3>() = V;
3104 }
3105
3106 unsigned getNumSuccessors() const { return 1+isConditional(); }
3107
3108 BasicBlock *getSuccessor(unsigned i) const {
3109 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3109, __PRETTY_FUNCTION__))
;
3110 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3111 }
3112
3113 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3114 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3114, __PRETTY_FUNCTION__))
;
3115 *(&Op<-1>() - idx) = NewSucc;
3116 }
3117
3118 /// Swap the successors of this branch instruction.
3119 ///
3120 /// Swaps the successors of the branch instruction. This also swaps any
3121 /// branch weight metadata associated with the instruction so that it
3122 /// continues to map correctly to each operand.
3123 void swapSuccessors();
3124
3125 iterator_range<succ_op_iterator> successors() {
3126 return make_range(
3127 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3128 succ_op_iterator(value_op_end()));
3129 }
3130
3131 iterator_range<const_succ_op_iterator> successors() const {
3132 return make_range(const_succ_op_iterator(
3133 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3134 const_succ_op_iterator(value_op_end()));
3135 }
3136
3137 // Methods for support type inquiry through isa, cast, and dyn_cast:
3138 static bool classof(const Instruction *I) {
3139 return (I->getOpcode() == Instruction::Br);
3140 }
3141 static bool classof(const Value *V) {
3142 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3143 }
3144};
3145
3146template <>
3147struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3148};
3149
3150DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3150, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst
*>(this))[i_nocapture].get()); } void BranchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3150, __PRETTY_FUNCTION__)); OperandTraits<BranchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst
::getNumOperands() const { return OperandTraits<BranchInst
>::operands(this); } template <int Idx_nocapture> Use
&BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3151
3152//===----------------------------------------------------------------------===//
3153// SwitchInst Class
3154//===----------------------------------------------------------------------===//
3155
3156//===---------------------------------------------------------------------------
3157/// Multiway switch
3158///
3159class SwitchInst : public Instruction {
3160 unsigned ReservedSpace;
3161
3162 // Operand[0] = Value to switch on
3163 // Operand[1] = Default basic block destination
3164 // Operand[2n ] = Value to match
3165 // Operand[2n+1] = BasicBlock to go to on match
3166 SwitchInst(const SwitchInst &SI);
3167
3168 /// Create a new switch instruction, specifying a value to switch on and a
3169 /// default destination. The number of additional cases can be specified here
3170 /// to make memory allocation more efficient. This constructor can also
3171 /// auto-insert before another instruction.
3172 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3173 Instruction *InsertBefore);
3174
3175 /// Create a new switch instruction, specifying a value to switch on and a
3176 /// default destination. The number of additional cases can be specified here
3177 /// to make memory allocation more efficient. This constructor also
3178 /// auto-inserts at the end of the specified BasicBlock.
3179 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3180 BasicBlock *InsertAtEnd);
3181
3182 // allocate space for exactly zero operands
3183 void *operator new(size_t s) {
3184 return User::operator new(s);
3185 }
3186
3187 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3188 void growOperands();
3189
3190protected:
3191 // Note: Instruction needs to be a friend here to call cloneImpl.
3192 friend class Instruction;
3193
3194 SwitchInst *cloneImpl() const;
3195
3196public:
3197 // -2
3198 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3199
3200 template <typename CaseHandleT> class CaseIteratorImpl;
3201
3202 /// A handle to a particular switch case. It exposes a convenient interface
3203 /// to both the case value and the successor block.
3204 ///
3205 /// We define this as a template and instantiate it to form both a const and
3206 /// non-const handle.
3207 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3208 class CaseHandleImpl {
3209 // Directly befriend both const and non-const iterators.
3210 friend class SwitchInst::CaseIteratorImpl<
3211 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3212
3213 protected:
3214 // Expose the switch type we're parameterized with to the iterator.
3215 using SwitchInstType = SwitchInstT;
3216
3217 SwitchInstT *SI;
3218 ptrdiff_t Index;
3219
3220 CaseHandleImpl() = default;
3221 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3222
3223 public:
3224 /// Resolves case value for current case.
3225 ConstantIntT *getCaseValue() const {
3226 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3227, __PRETTY_FUNCTION__))
3227 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3227, __PRETTY_FUNCTION__))
;
3228 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3229 }
3230
3231 /// Resolves successor for current case.
3232 BasicBlockT *getCaseSuccessor() const {
3233 assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3235, __PRETTY_FUNCTION__))
3234 (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3235, __PRETTY_FUNCTION__))
3235 "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3235, __PRETTY_FUNCTION__))
;
3236 return SI->getSuccessor(getSuccessorIndex());
3237 }
3238
3239 /// Returns number of current case.
3240 unsigned getCaseIndex() const { return Index; }
3241
3242 /// Returns successor index for current case successor.
3243 unsigned getSuccessorIndex() const {
3244 assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3246, __PRETTY_FUNCTION__))
3245 (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3246, __PRETTY_FUNCTION__))
3246 "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3246, __PRETTY_FUNCTION__))
;
3247 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3248 }
3249
3250 bool operator==(const CaseHandleImpl &RHS) const {
3251 assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast
<void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3251, __PRETTY_FUNCTION__))
;
3252 return Index == RHS.Index;
3253 }
3254 };
3255
3256 using ConstCaseHandle =
3257 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3258
3259 class CaseHandle
3260 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3261 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3262
3263 public:
3264 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3265
3266 /// Sets the new value for current case.
3267 void setValue(ConstantInt *V) {
3268 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3269, __PRETTY_FUNCTION__))
3269 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3269, __PRETTY_FUNCTION__))
;
3270 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3271 }
3272
3273 /// Sets the new successor for current case.
3274 void setSuccessor(BasicBlock *S) {
3275 SI->setSuccessor(getSuccessorIndex(), S);
3276 }
3277 };
3278
3279 template <typename CaseHandleT>
3280 class CaseIteratorImpl
3281 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3282 std::random_access_iterator_tag,
3283 CaseHandleT> {
3284 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3285
3286 CaseHandleT Case;
3287
3288 public:
3289 /// Default constructed iterator is in an invalid state until assigned to
3290 /// a case for a particular switch.
3291 CaseIteratorImpl() = default;
3292
3293 /// Initializes case iterator for given SwitchInst and for given
3294 /// case number.
3295 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3296
3297 /// Initializes case iterator for given SwitchInst and for given
3298 /// successor index.
3299 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3300 unsigned SuccessorIndex) {
3301 assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3302, __PRETTY_FUNCTION__))
3302 "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3302, __PRETTY_FUNCTION__))
;
3303 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3304 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3305 }
3306
3307 /// Support converting to the const variant. This will be a no-op for const
3308 /// variant.
3309 operator CaseIteratorImpl<ConstCaseHandle>() const {
3310 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3311 }
3312
3313 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3314 // Check index correctness after addition.
3315 // Note: Index == getNumCases() means end().
3316 assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3318, __PRETTY_FUNCTION__))
3317 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3318, __PRETTY_FUNCTION__))
3318 "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3318, __PRETTY_FUNCTION__))
;
3319 Case.Index += N;
3320 return *this;
3321 }
3322 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3323 // Check index correctness after subtraction.
3324 // Note: Case.Index == getNumCases() means end().
3325 assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3327, __PRETTY_FUNCTION__))
3326 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3327, __PRETTY_FUNCTION__))
3327 "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3327, __PRETTY_FUNCTION__))
;
3328 Case.Index -= N;
3329 return *this;
3330 }
3331 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3332 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3332, __PRETTY_FUNCTION__))
;
3333 return Case.Index - RHS.Case.Index;
3334 }
3335 bool operator==(const CaseIteratorImpl &RHS) const {
3336 return Case == RHS.Case;
3337 }
3338 bool operator<(const CaseIteratorImpl &RHS) const {
3339 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3339, __PRETTY_FUNCTION__))
;
3340 return Case.Index < RHS.Case.Index;
3341 }
3342 CaseHandleT &operator*() { return Case; }
3343 const CaseHandleT &operator*() const { return Case; }
3344 };
3345
3346 using CaseIt = CaseIteratorImpl<CaseHandle>;
3347 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3348
3349 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3350 unsigned NumCases,
3351 Instruction *InsertBefore = nullptr) {
3352 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3353 }
3354
3355 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3356 unsigned NumCases, BasicBlock *InsertAtEnd) {
3357 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3358 }
3359
3360 /// Provide fast operand accessors
3361 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3362
3363 // Accessor Methods for Switch stmt
3364 Value *getCondition() const { return getOperand(0); }
3365 void setCondition(Value *V) { setOperand(0, V); }
3366
3367 BasicBlock *getDefaultDest() const {
3368 return cast<BasicBlock>(getOperand(1));
3369 }
3370
3371 void setDefaultDest(BasicBlock *DefaultCase) {
3372 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3373 }
3374
3375 /// Return the number of 'cases' in this switch instruction, excluding the
3376 /// default case.
3377 unsigned getNumCases() const {
3378 return getNumOperands()/2 - 1;
3379 }
3380
3381 /// Returns a read/write iterator that points to the first case in the
3382 /// SwitchInst.
3383 CaseIt case_begin() {
3384 return CaseIt(this, 0);
3385 }
3386
3387 /// Returns a read-only iterator that points to the first case in the
3388 /// SwitchInst.
3389 ConstCaseIt case_begin() const {
3390 return ConstCaseIt(this, 0);
3391 }
3392
3393 /// Returns a read/write iterator that points one past the last in the
3394 /// SwitchInst.
3395 CaseIt case_end() {
3396 return CaseIt(this, getNumCases());
3397 }
3398
3399 /// Returns a read-only iterator that points one past the last in the
3400 /// SwitchInst.
3401 ConstCaseIt case_end() const {
3402 return ConstCaseIt(this, getNumCases());
3403 }
3404
3405 /// Iteration adapter for range-for loops.
3406 iterator_range<CaseIt> cases() {
3407 return make_range(case_begin(), case_end());
3408 }
3409
3410 /// Constant iteration adapter for range-for loops.
3411 iterator_range<ConstCaseIt> cases() const {
3412 return make_range(case_begin(), case_end());
3413 }
3414
3415 /// Returns an iterator that points to the default case.
3416 /// Note: this iterator allows to resolve successor only. Attempt
3417 /// to resolve case value causes an assertion.
3418 /// Also note, that increment and decrement also causes an assertion and
3419 /// makes iterator invalid.
3420 CaseIt case_default() {
3421 return CaseIt(this, DefaultPseudoIndex);
3422 }
3423 ConstCaseIt case_default() const {
3424 return ConstCaseIt(this, DefaultPseudoIndex);
3425 }
3426
3427 /// Search all of the case values for the specified constant. If it is
3428 /// explicitly handled, return the case iterator of it, otherwise return
3429 /// default case iterator to indicate that it is handled by the default
3430 /// handler.
3431 CaseIt findCaseValue(const ConstantInt *C) {
3432 CaseIt I = llvm::find_if(
3433 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3434 if (I != case_end())
3435 return I;
3436
3437 return case_default();
3438 }
3439 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3440 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3441 return Case.getCaseValue() == C;
3442 });
3443 if (I != case_end())
3444 return I;
3445
3446 return case_default();
3447 }
3448
3449 /// Finds the unique case value for a given successor. Returns null if the
3450 /// successor is not found, not unique, or is the default case.
3451 ConstantInt *findCaseDest(BasicBlock *BB) {
3452 if (BB == getDefaultDest())
3453 return nullptr;
3454
3455 ConstantInt *CI = nullptr;
3456 for (auto Case : cases()) {
3457 if (Case.getCaseSuccessor() != BB)
3458 continue;
3459
3460 if (CI)
3461 return nullptr; // Multiple cases lead to BB.
3462
3463 CI = Case.getCaseValue();
3464 }
3465
3466 return CI;
3467 }
3468
3469 /// Add an entry to the switch instruction.
3470 /// Note:
3471 /// This action invalidates case_end(). Old case_end() iterator will
3472 /// point to the added case.
3473 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3474
3475 /// This method removes the specified case and its successor from the switch
3476 /// instruction. Note that this operation may reorder the remaining cases at
3477 /// index idx and above.
3478 /// Note:
3479 /// This action invalidates iterators for all cases following the one removed,
3480 /// including the case_end() iterator. It returns an iterator for the next
3481 /// case.
3482 CaseIt removeCase(CaseIt I);
3483
3484 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3485 BasicBlock *getSuccessor(unsigned idx) const {
3486 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3486, __PRETTY_FUNCTION__))
;
3487 return cast<BasicBlock>(getOperand(idx*2+1));
3488 }
3489 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3490 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3490, __PRETTY_FUNCTION__))
;
3491 setOperand(idx * 2 + 1, NewSucc);
3492 }
3493
3494 // Methods for support type inquiry through isa, cast, and dyn_cast:
3495 static bool classof(const Instruction *I) {
3496 return I->getOpcode() == Instruction::Switch;
3497 }
3498 static bool classof(const Value *V) {
3499 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3500 }
3501};
3502
3503/// A wrapper class to simplify modification of SwitchInst cases along with
3504/// their prof branch_weights metadata.
3505class SwitchInstProfUpdateWrapper {
3506 SwitchInst &SI;
3507 Optional<SmallVector<uint32_t, 8> > Weights = None;
3508 bool Changed = false;
3509
3510protected:
3511 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3512
3513 MDNode *buildProfBranchWeightsMD();
3514
3515 void init();
3516
3517public:
3518 using CaseWeightOpt = Optional<uint32_t>;
3519 SwitchInst *operator->() { return &SI; }
3520 SwitchInst &operator*() { return SI; }
3521 operator SwitchInst *() { return &SI; }
3522
3523 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3524
3525 ~SwitchInstProfUpdateWrapper() {
3526 if (Changed)
3527 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3528 }
3529
3530 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3531 /// correspondent branch weight.
3532 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3533
3534 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3535 /// specified branch weight for the added case.
3536 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3537
3538 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3539 /// this object to not touch the underlying SwitchInst in destructor.
3540 SymbolTableList<Instruction>::iterator eraseFromParent();
3541
3542 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3543 CaseWeightOpt getSuccessorWeight(unsigned idx);
3544
3545 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3546};
3547
3548template <>
3549struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3550};
3551
3552DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3552, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst
*>(this))[i_nocapture].get()); } void SwitchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3552, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst
::getNumOperands() const { return OperandTraits<SwitchInst
>::operands(this); } template <int Idx_nocapture> Use
&SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3553
3554//===----------------------------------------------------------------------===//
3555// IndirectBrInst Class
3556//===----------------------------------------------------------------------===//
3557
3558//===---------------------------------------------------------------------------
3559/// Indirect Branch Instruction.
3560///
3561class IndirectBrInst : public Instruction {
3562 unsigned ReservedSpace;
3563
3564 // Operand[0] = Address to jump to
3565 // Operand[n+1] = n-th destination
3566 IndirectBrInst(const IndirectBrInst &IBI);
3567
3568 /// Create a new indirectbr instruction, specifying an
3569 /// Address to jump to. The number of expected destinations can be specified
3570 /// here to make memory allocation more efficient. This constructor can also
3571 /// autoinsert before another instruction.
3572 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3573
3574 /// Create a new indirectbr instruction, specifying an
3575 /// Address to jump to. The number of expected destinations can be specified
3576 /// here to make memory allocation more efficient. This constructor also
3577 /// autoinserts at the end of the specified BasicBlock.
3578 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3579
3580 // allocate space for exactly zero operands
3581 void *operator new(size_t s) {
3582 return User::operator new(s);
3583 }
3584
3585 void init(Value *Address, unsigned NumDests);
3586 void growOperands();
3587
3588protected:
3589 // Note: Instruction needs to be a friend here to call cloneImpl.
3590 friend class Instruction;
3591
3592 IndirectBrInst *cloneImpl() const;
3593
3594public:
3595 /// Iterator type that casts an operand to a basic block.
3596 ///
3597 /// This only makes sense because the successors are stored as adjacent
3598 /// operands for indirectbr instructions.
3599 struct succ_op_iterator
3600 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3601 std::random_access_iterator_tag, BasicBlock *,
3602 ptrdiff_t, BasicBlock *, BasicBlock *> {
3603 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3604
3605 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3606 BasicBlock *operator->() const { return operator*(); }
3607 };
3608
3609 /// The const version of `succ_op_iterator`.
3610 struct const_succ_op_iterator
3611 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3612 std::random_access_iterator_tag,
3613 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3614 const BasicBlock *> {
3615 explicit const_succ_op_iterator(const_value_op_iterator I)
3616 : iterator_adaptor_base(I) {}
3617
3618 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3619 const BasicBlock *operator->() const { return operator*(); }
3620 };
3621
3622 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3623 Instruction *InsertBefore = nullptr) {
3624 return new IndirectBrInst(Address, NumDests, InsertBefore);
3625 }
3626
3627 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3628 BasicBlock *InsertAtEnd) {
3629 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3630 }
3631
3632 /// Provide fast operand accessors.
3633 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3634
3635 // Accessor Methods for IndirectBrInst instruction.
3636 Value *getAddress() { return getOperand(0); }
3637 const Value *getAddress() const { return getOperand(0); }
3638 void setAddress(Value *V) { setOperand(0, V); }
3639
3640 /// return the number of possible destinations in this
3641 /// indirectbr instruction.
3642 unsigned getNumDestinations() const { return getNumOperands()-1; }
3643
3644 /// Return the specified destination.
3645 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3646 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3647
3648 /// Add a destination.
3649 ///
3650 void addDestination(BasicBlock *Dest);
3651
3652 /// This method removes the specified successor from the
3653 /// indirectbr instruction.
3654 void removeDestination(unsigned i);
3655
3656 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3657 BasicBlock *getSuccessor(unsigned i) const {
3658 return cast<BasicBlock>(getOperand(i+1));
3659 }
3660 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3661 setOperand(i + 1, NewSucc);
3662 }
3663
3664 iterator_range<succ_op_iterator> successors() {
3665 return make_range(succ_op_iterator(std::next(value_op_begin())),
3666 succ_op_iterator(value_op_end()));
3667 }
3668
3669 iterator_range<const_succ_op_iterator> successors() const {
3670 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3671 const_succ_op_iterator(value_op_end()));
3672 }
3673
3674 // Methods for support type inquiry through isa, cast, and dyn_cast:
3675 static bool classof(const Instruction *I) {
3676 return I->getOpcode() == Instruction::IndirectBr;
3677 }
3678 static bool classof(const Value *V) {
3679 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3680 }
3681};
3682
3683template <>
3684struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3685};
3686
3687DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3687, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3687, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3688
3689//===----------------------------------------------------------------------===//
3690// InvokeInst Class
3691//===----------------------------------------------------------------------===//
3692
3693/// Invoke instruction. The SubclassData field is used to hold the
3694/// calling convention of the call.
3695///
3696class InvokeInst : public CallBase {
3697 /// The number of operands for this call beyond the called function,
3698 /// arguments, and operand bundles.
3699 static constexpr int NumExtraOperands = 2;
3700
3701 /// The index from the end of the operand array to the normal destination.
3702 static constexpr int NormalDestOpEndIdx = -3;
3703
3704 /// The index from the end of the operand array to the unwind destination.
3705 static constexpr int UnwindDestOpEndIdx = -2;
3706
3707 InvokeInst(const InvokeInst &BI);
3708
3709 /// Construct an InvokeInst given a range of arguments.
3710 ///
3711 /// Construct an InvokeInst from a range of arguments
3712 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3713 BasicBlock *IfException, ArrayRef<Value *> Args,
3714 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3715 const Twine &NameStr, Instruction *InsertBefore);
3716
3717 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3718 BasicBlock *IfException, ArrayRef<Value *> Args,
3719 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3720 const Twine &NameStr, BasicBlock *InsertAtEnd);
3721
3722 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3723 BasicBlock *IfException, ArrayRef<Value *> Args,
3724 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3725
3726 /// Compute the number of operands to allocate.
3727 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3728 // We need one operand for the called function, plus our extra operands and
3729 // the input operand counts provided.
3730 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3731 }
3732
3733protected:
3734 // Note: Instruction needs to be a friend here to call cloneImpl.
3735 friend class Instruction;
3736
3737 InvokeInst *cloneImpl() const;
3738
3739public:
3740 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3741 BasicBlock *IfException, ArrayRef<Value *> Args,
3742 const Twine &NameStr,
3743 Instruction *InsertBefore = nullptr) {
3744 int NumOperands = ComputeNumOperands(Args.size());
3745 return new (NumOperands)
3746 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3747 NameStr, InsertBefore);
3748 }
3749
3750 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3751 BasicBlock *IfException, ArrayRef<Value *> Args,
3752 ArrayRef<OperandBundleDef> Bundles = None,
3753 const Twine &NameStr = "",
3754 Instruction *InsertBefore = nullptr) {
3755 int NumOperands =
3756 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3757 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3758
3759 return new (NumOperands, DescriptorBytes)
3760 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3761 NameStr, InsertBefore);
3762 }
3763
3764 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3765 BasicBlock *IfException, ArrayRef<Value *> Args,
3766 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3767 int NumOperands = ComputeNumOperands(Args.size());
3768 return new (NumOperands)
3769 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3770 NameStr, InsertAtEnd);
3771 }
3772
3773 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3774 BasicBlock *IfException, ArrayRef<Value *> Args,
3775 ArrayRef<OperandBundleDef> Bundles,
3776 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3777 int NumOperands =
3778 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3779 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3780
3781 return new (NumOperands, DescriptorBytes)
3782 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3783 NameStr, InsertAtEnd);
3784 }
3785
3786 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3787 BasicBlock *IfException, ArrayRef<Value *> Args,
3788 const Twine &NameStr,
3789 Instruction *InsertBefore = nullptr) {
3790 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3791 IfException, Args, None, NameStr, InsertBefore);
3792 }
3793
3794 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3795 BasicBlock *IfException, ArrayRef<Value *> Args,
3796 ArrayRef<OperandBundleDef> Bundles = None,
3797 const Twine &NameStr = "",
3798 Instruction *InsertBefore = nullptr) {
3799 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3800 IfException, Args, Bundles, NameStr, InsertBefore);
3801 }
3802
3803 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3804 BasicBlock *IfException, ArrayRef<Value *> Args,
3805 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3806 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3807 IfException, Args, NameStr, InsertAtEnd);
3808 }
3809
3810 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3811 BasicBlock *IfException, ArrayRef<Value *> Args,
3812 ArrayRef<OperandBundleDef> Bundles,
3813 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3814 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3815 IfException, Args, Bundles, NameStr, InsertAtEnd);
3816 }
3817
3818 /// Create a clone of \p II with a different set of operand bundles and
3819 /// insert it before \p InsertPt.
3820 ///
3821 /// The returned invoke instruction is identical to \p II in every way except
3822 /// that the operand bundles for the new instruction are set to the operand
3823 /// bundles in \p Bundles.
3824 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3825 Instruction *InsertPt = nullptr);
3826
3827 /// Create a clone of \p II with a different set of operand bundles and
3828 /// insert it before \p InsertPt.
3829 ///
3830 /// The returned invoke instruction is identical to \p II in every way except
3831 /// that the operand bundle for the new instruction is set to the operand
3832 /// bundle in \p Bundle.
3833 static InvokeInst *CreateWithReplacedBundle(InvokeInst *II,
3834 OperandBundleDef Bundles,
3835 Instruction *InsertPt = nullptr);
3836
3837 // get*Dest - Return the destination basic blocks...
3838 BasicBlock *getNormalDest() const {
3839 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3840 }
3841 BasicBlock *getUnwindDest() const {
3842 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3843 }
3844 void setNormalDest(BasicBlock *B) {
3845 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3846 }
3847 void setUnwindDest(BasicBlock *B) {
3848 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3849 }
3850
3851 /// Get the landingpad instruction from the landing pad
3852 /// block (the unwind destination).
3853 LandingPadInst *getLandingPadInst() const;
3854
3855 BasicBlock *getSuccessor(unsigned i) const {
3856 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3856, __PRETTY_FUNCTION__))
;
3857 return i == 0 ? getNormalDest() : getUnwindDest();
3858 }
3859
3860 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3861 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 3861, __PRETTY_FUNCTION__))
;
3862 if (i == 0)
3863 setNormalDest(NewSucc);
3864 else
3865 setUnwindDest(NewSucc);
3866 }
3867
3868 unsigned getNumSuccessors() const { return 2; }
3869
3870 // Methods for support type inquiry through isa, cast, and dyn_cast:
3871 static bool classof(const Instruction *I) {
3872 return (I->getOpcode() == Instruction::Invoke);
3873 }
3874 static bool classof(const Value *V) {
3875 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3876 }
3877
3878private:
3879 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3880 // method so that subclasses cannot accidentally use it.
3881 template <typename Bitfield>
3882 void setSubclassData(typename Bitfield::Type Value) {
3883 Instruction::setSubclassData<Bitfield>(Value);
3884 }
3885};
3886
3887InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3888 BasicBlock *IfException, ArrayRef<Value *> Args,
3889 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3890 const Twine &NameStr, Instruction *InsertBefore)
3891 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3892 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3893 InsertBefore) {
3894 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3895}
3896
3897InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3898 BasicBlock *IfException, ArrayRef<Value *> Args,
3899 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3900 const Twine &NameStr, BasicBlock *InsertAtEnd)
3901 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3902 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3903 InsertAtEnd) {
3904 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3905}
3906
3907//===----------------------------------------------------------------------===//
3908// CallBrInst Class
3909//===----------------------------------------------------------------------===//
3910
3911/// CallBr instruction, tracking function calls that may not return control but
3912/// instead transfer it to a third location. The SubclassData field is used to
3913/// hold the calling convention of the call.
3914///
3915class CallBrInst : public CallBase {
3916
3917 unsigned NumIndirectDests;
3918
3919 CallBrInst(const CallBrInst &BI);
3920
3921 /// Construct a CallBrInst given a range of arguments.
3922 ///
3923 /// Construct a CallBrInst from a range of arguments
3924 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3925 ArrayRef<BasicBlock *> IndirectDests,
3926 ArrayRef<Value *> Args,
3927 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3928 const Twine &NameStr, Instruction *InsertBefore);
3929
3930 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3931 ArrayRef<BasicBlock *> IndirectDests,
3932 ArrayRef<Value *> Args,
3933 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3934 const Twine &NameStr, BasicBlock *InsertAtEnd);
3935
3936 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3937 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3938 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3939
3940 /// Should the Indirect Destinations change, scan + update the Arg list.
3941 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3942
3943 /// Compute the number of operands to allocate.
3944 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3945 int NumBundleInputs = 0) {
3946 // We need one operand for the called function, plus our extra operands and
3947 // the input operand counts provided.
3948 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3949 }
3950
3951protected:
3952 // Note: Instruction needs to be a friend here to call cloneImpl.
3953 friend class Instruction;
3954
3955 CallBrInst *cloneImpl() const;
3956
3957public:
3958 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3959 BasicBlock *DefaultDest,
3960 ArrayRef<BasicBlock *> IndirectDests,
3961 ArrayRef<Value *> Args, const Twine &NameStr,
3962 Instruction *InsertBefore = nullptr) {
3963 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3964 return new (NumOperands)
3965 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3966 NumOperands, NameStr, InsertBefore);
3967 }
3968
3969 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3970 BasicBlock *DefaultDest,
3971 ArrayRef<BasicBlock *> IndirectDests,
3972 ArrayRef<Value *> Args,
3973 ArrayRef<OperandBundleDef> Bundles = None,
3974 const Twine &NameStr = "",
3975 Instruction *InsertBefore = nullptr) {
3976 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3977 CountBundleInputs(Bundles));
3978 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3979
3980 return new (NumOperands, DescriptorBytes)
3981 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3982 NumOperands, NameStr, InsertBefore);
3983 }
3984
3985 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3986 BasicBlock *DefaultDest,
3987 ArrayRef<BasicBlock *> IndirectDests,
3988 ArrayRef<Value *> Args, const Twine &NameStr,
3989 BasicBlock *InsertAtEnd) {
3990 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3991 return new (NumOperands)
3992 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3993 NumOperands, NameStr, InsertAtEnd);
3994 }
3995
3996 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3997 BasicBlock *DefaultDest,
3998 ArrayRef<BasicBlock *> IndirectDests,
3999 ArrayRef<Value *> Args,
4000 ArrayRef<OperandBundleDef> Bundles,
4001 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4002 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4003 CountBundleInputs(Bundles));
4004 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4005
4006 return new (NumOperands, DescriptorBytes)
4007 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4008 NumOperands, NameStr, InsertAtEnd);
4009 }
4010
4011 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4012 ArrayRef<BasicBlock *> IndirectDests,
4013 ArrayRef<Value *> Args, const Twine &NameStr,
4014 Instruction *InsertBefore = nullptr) {
4015 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4016 IndirectDests, Args, NameStr, InsertBefore);
4017 }
4018
4019 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4020 ArrayRef<BasicBlock *> IndirectDests,
4021 ArrayRef<Value *> Args,
4022 ArrayRef<OperandBundleDef> Bundles = None,
4023 const Twine &NameStr = "",
4024 Instruction *InsertBefore = nullptr) {
4025 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4026 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4027 }
4028
4029 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4030 ArrayRef<BasicBlock *> IndirectDests,
4031 ArrayRef<Value *> Args, const Twine &NameStr,
4032 BasicBlock *InsertAtEnd) {
4033 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4034 IndirectDests, Args, NameStr, InsertAtEnd);
4035 }
4036
4037 static CallBrInst *Create(FunctionCallee Func,
4038 BasicBlock *DefaultDest,
4039 ArrayRef<BasicBlock *> IndirectDests,
4040 ArrayRef<Value *> Args,
4041 ArrayRef<OperandBundleDef> Bundles,
4042 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4043 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4044 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4045 }
4046
4047 /// Create a clone of \p CBI with a different set of operand bundles and
4048 /// insert it before \p InsertPt.
4049 ///
4050 /// The returned callbr instruction is identical to \p CBI in every way
4051 /// except that the operand bundles for the new instruction are set to the
4052 /// operand bundles in \p Bundles.
4053 static CallBrInst *Create(CallBrInst *CBI,
4054 ArrayRef<OperandBundleDef> Bundles,
4055 Instruction *InsertPt = nullptr);
4056
4057 /// Return the number of callbr indirect dest labels.
4058 ///
4059 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4060
4061 /// getIndirectDestLabel - Return the i-th indirect dest label.
4062 ///
4063 Value *getIndirectDestLabel(unsigned i) const {
4064 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4064, __PRETTY_FUNCTION__))
;
4065 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4066 1);
4067 }
4068
4069 Value *getIndirectDestLabelUse(unsigned i) const {
4070 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4070, __PRETTY_FUNCTION__))
;
4071 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4072 1);
4073 }
4074
4075 // Return the destination basic blocks...
4076 BasicBlock *getDefaultDest() const {
4077 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4078 }
4079 BasicBlock *getIndirectDest(unsigned i) const {
4080 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4081 }
4082 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4083 SmallVector<BasicBlock *, 16> IndirectDests;
4084 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4085 IndirectDests.push_back(getIndirectDest(i));
4086 return IndirectDests;
4087 }
4088 void setDefaultDest(BasicBlock *B) {
4089 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4090 }
4091 void setIndirectDest(unsigned i, BasicBlock *B) {
4092 updateArgBlockAddresses(i, B);
4093 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4094 }
4095
4096 BasicBlock *getSuccessor(unsigned i) const {
4097 assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4098, __PRETTY_FUNCTION__))
4098 "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4098, __PRETTY_FUNCTION__))
;
4099 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4100 }
4101
4102 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4103 assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4104, __PRETTY_FUNCTION__))
4104 "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4104, __PRETTY_FUNCTION__))
;
4105 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4106 }
4107
4108 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4109
4110 // Methods for support type inquiry through isa, cast, and dyn_cast:
4111 static bool classof(const Instruction *I) {
4112 return (I->getOpcode() == Instruction::CallBr);
4113 }
4114 static bool classof(const Value *V) {
4115 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4116 }
4117
4118private:
4119 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4120 // method so that subclasses cannot accidentally use it.
4121 template <typename Bitfield>
4122 void setSubclassData(typename Bitfield::Type Value) {
4123 Instruction::setSubclassData<Bitfield>(Value);
4124 }
4125};
4126
4127CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4128 ArrayRef<BasicBlock *> IndirectDests,
4129 ArrayRef<Value *> Args,
4130 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4131 const Twine &NameStr, Instruction *InsertBefore)
4132 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4133 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4134 InsertBefore) {
4135 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4136}
4137
4138CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4139 ArrayRef<BasicBlock *> IndirectDests,
4140 ArrayRef<Value *> Args,
4141 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4142 const Twine &NameStr, BasicBlock *InsertAtEnd)
4143 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4144 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4145 InsertAtEnd) {
4146 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4147}
4148
4149//===----------------------------------------------------------------------===//
4150// ResumeInst Class
4151//===----------------------------------------------------------------------===//
4152
4153//===---------------------------------------------------------------------------
4154/// Resume the propagation of an exception.
4155///
4156class ResumeInst : public Instruction {
4157 ResumeInst(const ResumeInst &RI);
4158
4159 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4160 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4161
4162protected:
4163 // Note: Instruction needs to be a friend here to call cloneImpl.
4164 friend class Instruction;
4165
4166 ResumeInst *cloneImpl() const;
4167
4168public:
4169 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4170 return new(1) ResumeInst(Exn, InsertBefore);
4171 }
4172
4173 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4174 return new(1) ResumeInst(Exn, InsertAtEnd);
4175 }
4176
4177 /// Provide fast operand accessors
4178 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4179
4180 /// Convenience accessor.
4181 Value *getValue() const { return Op<0>(); }
4182
4183 unsigned getNumSuccessors() const { return 0; }
4184
4185 // Methods for support type inquiry through isa, cast, and dyn_cast:
4186 static bool classof(const Instruction *I) {
4187 return I->getOpcode() == Instruction::Resume;
4188 }
4189 static bool classof(const Value *V) {
4190 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4191 }
4192
4193private:
4194 BasicBlock *getSuccessor(unsigned idx) const {
4195 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4195)
;
4196 }
4197
4198 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4199 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4199)
;
4200 }
4201};
4202
4203template <>
4204struct OperandTraits<ResumeInst> :
4205 public FixedNumOperandTraits<ResumeInst, 1> {
4206};
4207
4208DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4208, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst
*>(this))[i_nocapture].get()); } void ResumeInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4208, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst
::getNumOperands() const { return OperandTraits<ResumeInst
>::operands(this); } template <int Idx_nocapture> Use
&ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4209
4210//===----------------------------------------------------------------------===//
4211// CatchSwitchInst Class
4212//===----------------------------------------------------------------------===//
4213class CatchSwitchInst : public Instruction {
4214 using UnwindDestField = BoolBitfieldElementT<0>;
4215
4216 /// The number of operands actually allocated. NumOperands is
4217 /// the number actually in use.
4218 unsigned ReservedSpace;
4219
4220 // Operand[0] = Outer scope
4221 // Operand[1] = Unwind block destination
4222 // Operand[n] = BasicBlock to go to on match
4223 CatchSwitchInst(const CatchSwitchInst &CSI);
4224
4225 /// Create a new switch instruction, specifying a
4226 /// default destination. The number of additional handlers can be specified
4227 /// here to make memory allocation more efficient.
4228 /// This constructor can also autoinsert before another instruction.
4229 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4230 unsigned NumHandlers, const Twine &NameStr,
4231 Instruction *InsertBefore);
4232
4233 /// Create a new switch instruction, specifying a
4234 /// default destination. The number of additional handlers can be specified
4235 /// here to make memory allocation more efficient.
4236 /// This constructor also autoinserts at the end of the specified BasicBlock.
4237 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4238 unsigned NumHandlers, const Twine &NameStr,
4239 BasicBlock *InsertAtEnd);
4240
4241 // allocate space for exactly zero operands
4242 void *operator new(size_t s) { return User::operator new(s); }
4243
4244 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4245 void growOperands(unsigned Size);
4246
4247protected:
4248 // Note: Instruction needs to be a friend here to call cloneImpl.
4249 friend class Instruction;
4250
4251 CatchSwitchInst *cloneImpl() const;
4252
4253public:
4254 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4255 unsigned NumHandlers,
4256 const Twine &NameStr = "",
4257 Instruction *InsertBefore = nullptr) {
4258 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4259 InsertBefore);
4260 }
4261
4262 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4263 unsigned NumHandlers, const Twine &NameStr,
4264 BasicBlock *InsertAtEnd) {
4265 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4266 InsertAtEnd);
4267 }
4268
4269 /// Provide fast operand accessors
4270 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4271
4272 // Accessor Methods for CatchSwitch stmt
4273 Value *getParentPad() const { return getOperand(0); }
4274 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4275
4276 // Accessor Methods for CatchSwitch stmt
4277 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4278 bool unwindsToCaller() const { return !hasUnwindDest(); }
4279 BasicBlock *getUnwindDest() const {
4280 if (hasUnwindDest())
4281 return cast<BasicBlock>(getOperand(1));
4282 return nullptr;
4283 }
4284 void setUnwindDest(BasicBlock *UnwindDest) {
4285 assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail (
"UnwindDest", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4285, __PRETTY_FUNCTION__))
;
4286 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4286, __PRETTY_FUNCTION__))
;
4287 setOperand(1, UnwindDest);
4288 }
4289
4290 /// return the number of 'handlers' in this catchswitch
4291 /// instruction, except the default handler
4292 unsigned getNumHandlers() const {
4293 if (hasUnwindDest())
4294 return getNumOperands() - 2;
4295 return getNumOperands() - 1;
4296 }
4297
4298private:
4299 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4300 static const BasicBlock *handler_helper(const Value *V) {
4301 return cast<BasicBlock>(V);
4302 }
4303
4304public:
4305 using DerefFnTy = BasicBlock *(*)(Value *);
4306 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4307 using handler_range = iterator_range<handler_iterator>;
4308 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4309 using const_handler_iterator =
4310 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4311 using const_handler_range = iterator_range<const_handler_iterator>;
4312
4313 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4314 handler_iterator handler_begin() {
4315 op_iterator It = op_begin() + 1;
4316 if (hasUnwindDest())
4317 ++It;
4318 return handler_iterator(It, DerefFnTy(handler_helper));
4319 }
4320
4321 /// Returns an iterator that points to the first handler in the
4322 /// CatchSwitchInst.
4323 const_handler_iterator handler_begin() const {
4324 const_op_iterator It = op_begin() + 1;
4325 if (hasUnwindDest())
4326 ++It;
4327 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4328 }
4329
4330 /// Returns a read-only iterator that points one past the last
4331 /// handler in the CatchSwitchInst.
4332 handler_iterator handler_end() {
4333 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4334 }
4335
4336 /// Returns an iterator that points one past the last handler in the
4337 /// CatchSwitchInst.
4338 const_handler_iterator handler_end() const {
4339 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4340 }
4341
4342 /// iteration adapter for range-for loops.
4343 handler_range handlers() {
4344 return make_range(handler_begin(), handler_end());
4345 }
4346
4347 /// iteration adapter for range-for loops.
4348 const_handler_range handlers() const {
4349 return make_range(handler_begin(), handler_end());
4350 }
4351
4352 /// Add an entry to the switch instruction...
4353 /// Note:
4354 /// This action invalidates handler_end(). Old handler_end() iterator will
4355 /// point to the added handler.
4356 void addHandler(BasicBlock *Dest);
4357
4358 void removeHandler(handler_iterator HI);
4359
4360 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4361 BasicBlock *getSuccessor(unsigned Idx) const {
4362 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4363, __PRETTY_FUNCTION__))
4363 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4363, __PRETTY_FUNCTION__))
;
4364 return cast<BasicBlock>(getOperand(Idx + 1));
4365 }
4366 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4367 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4368, __PRETTY_FUNCTION__))
4368 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4368, __PRETTY_FUNCTION__))
;
4369 setOperand(Idx + 1, NewSucc);
4370 }
4371
4372 // Methods for support type inquiry through isa, cast, and dyn_cast:
4373 static bool classof(const Instruction *I) {
4374 return I->getOpcode() == Instruction::CatchSwitch;
4375 }
4376 static bool classof(const Value *V) {
4377 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4378 }
4379};
4380
4381template <>
4382struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4383
4384DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4384, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchSwitchInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4384, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchSwitchInst::getNumOperands() const { return OperandTraits
<CatchSwitchInst>::operands(this); } template <int Idx_nocapture
> Use &CatchSwitchInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchSwitchInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4385
4386//===----------------------------------------------------------------------===//
4387// CleanupPadInst Class
4388//===----------------------------------------------------------------------===//
4389class CleanupPadInst : public FuncletPadInst {
4390private:
4391 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4392 unsigned Values, const Twine &NameStr,
4393 Instruction *InsertBefore)
4394 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4395 NameStr, InsertBefore) {}
4396 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4397 unsigned Values, const Twine &NameStr,
4398 BasicBlock *InsertAtEnd)
4399 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4400 NameStr, InsertAtEnd) {}
4401
4402public:
4403 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4404 const Twine &NameStr = "",
4405 Instruction *InsertBefore = nullptr) {
4406 unsigned Values = 1 + Args.size();
4407 return new (Values)
4408 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4409 }
4410
4411 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4412 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4413 unsigned Values = 1 + Args.size();
4414 return new (Values)
4415 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4416 }
4417
4418 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4419 static bool classof(const Instruction *I) {
4420 return I->getOpcode() == Instruction::CleanupPad;
4421 }
4422 static bool classof(const Value *V) {
4423 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4424 }
4425};
4426
4427//===----------------------------------------------------------------------===//
4428// CatchPadInst Class
4429//===----------------------------------------------------------------------===//
4430class CatchPadInst : public FuncletPadInst {
4431private:
4432 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4433 unsigned Values, const Twine &NameStr,
4434 Instruction *InsertBefore)
4435 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4436 NameStr, InsertBefore) {}
4437 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4438 unsigned Values, const Twine &NameStr,
4439 BasicBlock *InsertAtEnd)
4440 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4441 NameStr, InsertAtEnd) {}
4442
4443public:
4444 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4445 const Twine &NameStr = "",
4446 Instruction *InsertBefore = nullptr) {
4447 unsigned Values = 1 + Args.size();
4448 return new (Values)
4449 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4450 }
4451
4452 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4453 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4454 unsigned Values = 1 + Args.size();
4455 return new (Values)
4456 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4457 }
4458
4459 /// Convenience accessors
4460 CatchSwitchInst *getCatchSwitch() const {
4461 return cast<CatchSwitchInst>(Op<-1>());
4462 }
4463 void setCatchSwitch(Value *CatchSwitch) {
4464 assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail (
"CatchSwitch", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4464, __PRETTY_FUNCTION__))
;
4465 Op<-1>() = CatchSwitch;
4466 }
4467
4468 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4469 static bool classof(const Instruction *I) {
4470 return I->getOpcode() == Instruction::CatchPad;
4471 }
4472 static bool classof(const Value *V) {
4473 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4474 }
4475};
4476
4477//===----------------------------------------------------------------------===//
4478// CatchReturnInst Class
4479//===----------------------------------------------------------------------===//
4480
4481class CatchReturnInst : public Instruction {
4482 CatchReturnInst(const CatchReturnInst &RI);
4483 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4484 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4485
4486 void init(Value *CatchPad, BasicBlock *BB);
4487
4488protected:
4489 // Note: Instruction needs to be a friend here to call cloneImpl.
4490 friend class Instruction;
4491
4492 CatchReturnInst *cloneImpl() const;
4493
4494public:
4495 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4496 Instruction *InsertBefore = nullptr) {
4497 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4497, __PRETTY_FUNCTION__))
;
4498 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4498, __PRETTY_FUNCTION__))
;
4499 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4500 }
4501
4502 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4503 BasicBlock *InsertAtEnd) {
4504 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4504, __PRETTY_FUNCTION__))
;
4505 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4505, __PRETTY_FUNCTION__))
;
4506 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4507 }
4508
4509 /// Provide fast operand accessors
4510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4511
4512 /// Convenience accessors.
4513 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4514 void setCatchPad(CatchPadInst *CatchPad) {
4515 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4515, __PRETTY_FUNCTION__))
;
4516 Op<0>() = CatchPad;
4517 }
4518
4519 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4520 void setSuccessor(BasicBlock *NewSucc) {
4521 assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4521, __PRETTY_FUNCTION__))
;
4522 Op<1>() = NewSucc;
4523 }
4524 unsigned getNumSuccessors() const { return 1; }
4525
4526 /// Get the parentPad of this catchret's catchpad's catchswitch.
4527 /// The successor block is implicitly a member of this funclet.
4528 Value *getCatchSwitchParentPad() const {
4529 return getCatchPad()->getCatchSwitch()->getParentPad();
4530 }
4531
4532 // Methods for support type inquiry through isa, cast, and dyn_cast:
4533 static bool classof(const Instruction *I) {
4534 return (I->getOpcode() == Instruction::CatchRet);
4535 }
4536 static bool classof(const Value *V) {
4537 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4538 }
4539
4540private:
4541 BasicBlock *getSuccessor(unsigned Idx) const {
4542 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4542, __PRETTY_FUNCTION__))
;
4543 return getSuccessor();
4544 }
4545
4546 void setSuccessor(unsigned Idx, BasicBlock *B) {
4547 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4547, __PRETTY_FUNCTION__))
;
4548 setSuccessor(B);
4549 }
4550};
4551
4552template <>
4553struct OperandTraits<CatchReturnInst>
4554 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4555
4556DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4556, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchReturnInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4556, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchReturnInst::getNumOperands() const { return OperandTraits
<CatchReturnInst>::operands(this); } template <int Idx_nocapture
> Use &CatchReturnInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchReturnInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4557
4558//===----------------------------------------------------------------------===//
4559// CleanupReturnInst Class
4560//===----------------------------------------------------------------------===//
4561
4562class CleanupReturnInst : public Instruction {
4563 using UnwindDestField = BoolBitfieldElementT<0>;
4564
4565private:
4566 CleanupReturnInst(const CleanupReturnInst &RI);
4567 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4568 Instruction *InsertBefore = nullptr);
4569 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4570 BasicBlock *InsertAtEnd);
4571
4572 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4573
4574protected:
4575 // Note: Instruction needs to be a friend here to call cloneImpl.
4576 friend class Instruction;
4577
4578 CleanupReturnInst *cloneImpl() const;
4579
4580public:
4581 static CleanupReturnInst *Create(Value *CleanupPad,
4582 BasicBlock *UnwindBB = nullptr,
4583 Instruction *InsertBefore = nullptr) {
4584 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4584, __PRETTY_FUNCTION__))
;
4585 unsigned Values = 1;
4586 if (UnwindBB)
4587 ++Values;
4588 return new (Values)
4589 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4590 }
4591
4592 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4593 BasicBlock *InsertAtEnd) {
4594 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4594, __PRETTY_FUNCTION__))
;
4595 unsigned Values = 1;
4596 if (UnwindBB)
4597 ++Values;
4598 return new (Values)
4599 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4600 }
4601
4602 /// Provide fast operand accessors
4603 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4604
4605 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4606 bool unwindsToCaller() const { return !hasUnwindDest(); }
4607
4608 /// Convenience accessor.
4609 CleanupPadInst *getCleanupPad() const {
4610 return cast<CleanupPadInst>(Op<0>());
4611 }
4612 void setCleanupPad(CleanupPadInst *CleanupPad) {
4613 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4613, __PRETTY_FUNCTION__))
;
4614 Op<0>() = CleanupPad;
4615 }
4616
4617 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4618
4619 BasicBlock *getUnwindDest() const {
4620 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4621 }
4622 void setUnwindDest(BasicBlock *NewDest) {
4623 assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4623, __PRETTY_FUNCTION__))
;
4624 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4624, __PRETTY_FUNCTION__))
;
4625 Op<1>() = NewDest;
4626 }
4627
4628 // Methods for support type inquiry through isa, cast, and dyn_cast:
4629 static bool classof(const Instruction *I) {
4630 return (I->getOpcode() == Instruction::CleanupRet);
4631 }
4632 static bool classof(const Value *V) {
4633 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4634 }
4635
4636private:
4637 BasicBlock *getSuccessor(unsigned Idx) const {
4638 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4638, __PRETTY_FUNCTION__))
;
4639 return getUnwindDest();
4640 }
4641
4642 void setSuccessor(unsigned Idx, BasicBlock *B) {
4643 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4643, __PRETTY_FUNCTION__))
;
4644 setUnwindDest(B);
4645 }
4646
4647 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4648 // method so that subclasses cannot accidentally use it.
4649 template <typename Bitfield>
4650 void setSubclassData(typename Bitfield::Type Value) {
4651 Instruction::setSubclassData<Bitfield>(Value);
4652 }
4653};
4654
4655template <>
4656struct OperandTraits<CleanupReturnInst>
4657 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4658
4659DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4659, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CleanupReturnInst>::op_begin(const_cast
<CleanupReturnInst*>(this))[i_nocapture].get()); } void
CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<CleanupReturnInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4659, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CleanupReturnInst::getNumOperands() const { return OperandTraits
<CleanupReturnInst>::operands(this); } template <int
Idx_nocapture> Use &CleanupReturnInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CleanupReturnInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
4660
4661//===----------------------------------------------------------------------===//
4662// UnreachableInst Class
4663//===----------------------------------------------------------------------===//
4664
4665//===---------------------------------------------------------------------------
4666/// This function has undefined behavior. In particular, the
4667/// presence of this instruction indicates some higher level knowledge that the
4668/// end of the block cannot be reached.
4669///
4670class UnreachableInst : public Instruction {
4671protected:
4672 // Note: Instruction needs to be a friend here to call cloneImpl.
4673 friend class Instruction;
4674
4675 UnreachableInst *cloneImpl() const;
4676
4677public:
4678 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4679 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4680
4681 // allocate space for exactly zero operands
4682 void *operator new(size_t s) {
4683 return User::operator new(s, 0);
4684 }
4685
4686 unsigned getNumSuccessors() const { return 0; }
4687
4688 // Methods for support type inquiry through isa, cast, and dyn_cast:
4689 static bool classof(const Instruction *I) {
4690 return I->getOpcode() == Instruction::Unreachable;
4691 }
4692 static bool classof(const Value *V) {
4693 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4694 }
4695
4696private:
4697 BasicBlock *getSuccessor(unsigned idx) const {
4698 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4698)
;
4699 }
4700
4701 void setSuccessor(unsigned idx, BasicBlock *B) {
4702 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 4702)
;
4703 }
4704};
4705
4706//===----------------------------------------------------------------------===//
4707// TruncInst Class
4708//===----------------------------------------------------------------------===//
4709
4710/// This class represents a truncation of integer types.
4711class TruncInst : public CastInst {
4712protected:
4713 // Note: Instruction needs to be a friend here to call cloneImpl.
4714 friend class Instruction;
4715
4716 /// Clone an identical TruncInst
4717 TruncInst *cloneImpl() const;
4718
4719public:
4720 /// Constructor with insert-before-instruction semantics
4721 TruncInst(
4722 Value *S, ///< The value to be truncated
4723 Type *Ty, ///< The (smaller) type to truncate to
4724 const Twine &NameStr = "", ///< A name for the new instruction
4725 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4726 );
4727
4728 /// Constructor with insert-at-end-of-block semantics
4729 TruncInst(
4730 Value *S, ///< The value to be truncated
4731 Type *Ty, ///< The (smaller) type to truncate to
4732 const Twine &NameStr, ///< A name for the new instruction
4733 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4734 );
4735
4736 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4737 static bool classof(const Instruction *I) {
4738 return I->getOpcode() == Trunc;
4739 }
4740 static bool classof(const Value *V) {
4741 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4742 }
4743};
4744
4745//===----------------------------------------------------------------------===//
4746// ZExtInst Class
4747//===----------------------------------------------------------------------===//
4748
4749/// This class represents zero extension of integer types.
4750class ZExtInst : public CastInst {
4751protected:
4752 // Note: Instruction needs to be a friend here to call cloneImpl.
4753 friend class Instruction;
4754
4755 /// Clone an identical ZExtInst
4756 ZExtInst *cloneImpl() const;
4757
4758public:
4759 /// Constructor with insert-before-instruction semantics
4760 ZExtInst(
4761 Value *S, ///< The value to be zero extended
4762 Type *Ty, ///< The type to zero extend to
4763 const Twine &NameStr = "", ///< A name for the new instruction
4764 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4765 );
4766
4767 /// Constructor with insert-at-end semantics.
4768 ZExtInst(
4769 Value *S, ///< The value to be zero extended
4770 Type *Ty, ///< The type to zero extend to
4771 const Twine &NameStr, ///< A name for the new instruction
4772 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4773 );
4774
4775 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4776 static bool classof(const Instruction *I) {
4777 return I->getOpcode() == ZExt;
4778 }
4779 static bool classof(const Value *V) {
4780 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4781 }
4782};
4783
4784//===----------------------------------------------------------------------===//
4785// SExtInst Class
4786//===----------------------------------------------------------------------===//
4787
4788/// This class represents a sign extension of integer types.
4789class SExtInst : public CastInst {
4790protected:
4791 // Note: Instruction needs to be a friend here to call cloneImpl.
4792 friend class Instruction;
4793
4794 /// Clone an identical SExtInst
4795 SExtInst *cloneImpl() const;
4796
4797public:
4798 /// Constructor with insert-before-instruction semantics
4799 SExtInst(
4800 Value *S, ///< The value to be sign extended
4801 Type *Ty, ///< The type to sign extend to
4802 const Twine &NameStr = "", ///< A name for the new instruction
4803 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4804 );
4805
4806 /// Constructor with insert-at-end-of-block semantics
4807 SExtInst(
4808 Value *S, ///< The value to be sign extended
4809 Type *Ty, ///< The type to sign extend to
4810 const Twine &NameStr, ///< A name for the new instruction
4811 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4812 );
4813
4814 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4815 static bool classof(const Instruction *I) {
4816 return I->getOpcode() == SExt;
4817 }
4818 static bool classof(const Value *V) {
4819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4820 }
4821};
4822
4823//===----------------------------------------------------------------------===//
4824// FPTruncInst Class
4825//===----------------------------------------------------------------------===//
4826
4827/// This class represents a truncation of floating point types.
4828class FPTruncInst : public CastInst {
4829protected:
4830 // Note: Instruction needs to be a friend here to call cloneImpl.
4831 friend class Instruction;
4832
4833 /// Clone an identical FPTruncInst
4834 FPTruncInst *cloneImpl() const;
4835
4836public:
4837 /// Constructor with insert-before-instruction semantics
4838 FPTruncInst(
4839 Value *S, ///< The value to be truncated
4840 Type *Ty, ///< The type to truncate to
4841 const Twine &NameStr = "", ///< A name for the new instruction
4842 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4843 );
4844
4845 /// Constructor with insert-before-instruction semantics
4846 FPTruncInst(
4847 Value *S, ///< The value to be truncated
4848 Type *Ty, ///< The type to truncate to
4849 const Twine &NameStr, ///< A name for the new instruction
4850 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4851 );
4852
4853 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4854 static bool classof(const Instruction *I) {
4855 return I->getOpcode() == FPTrunc;
4856 }
4857 static bool classof(const Value *V) {
4858 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4859 }
4860};
4861
4862//===----------------------------------------------------------------------===//
4863// FPExtInst Class
4864//===----------------------------------------------------------------------===//
4865
4866/// This class represents an extension of floating point types.
4867class FPExtInst : public CastInst {
4868protected:
4869 // Note: Instruction needs to be a friend here to call cloneImpl.
4870 friend class Instruction;
4871
4872 /// Clone an identical FPExtInst
4873 FPExtInst *cloneImpl() const;
4874
4875public:
4876 /// Constructor with insert-before-instruction semantics
4877 FPExtInst(
4878 Value *S, ///< The value to be extended
4879 Type *Ty, ///< The type to extend to
4880 const Twine &NameStr = "", ///< A name for the new instruction
4881 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4882 );
4883
4884 /// Constructor with insert-at-end-of-block semantics
4885 FPExtInst(
4886 Value *S, ///< The value to be extended
4887 Type *Ty, ///< The type to extend to
4888 const Twine &NameStr, ///< A name for the new instruction
4889 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4890 );
4891
4892 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4893 static bool classof(const Instruction *I) {
4894 return I->getOpcode() == FPExt;
4895 }
4896 static bool classof(const Value *V) {
4897 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4898 }
4899};
4900
4901//===----------------------------------------------------------------------===//
4902// UIToFPInst Class
4903//===----------------------------------------------------------------------===//
4904
4905/// This class represents a cast unsigned integer to floating point.
4906class UIToFPInst : public CastInst {
4907protected:
4908 // Note: Instruction needs to be a friend here to call cloneImpl.
4909 friend class Instruction;
4910
4911 /// Clone an identical UIToFPInst
4912 UIToFPInst *cloneImpl() const;
4913
4914public:
4915 /// Constructor with insert-before-instruction semantics
4916 UIToFPInst(
4917 Value *S, ///< The value to be converted
4918 Type *Ty, ///< The type to convert to
4919 const Twine &NameStr = "", ///< A name for the new instruction
4920 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4921 );
4922
4923 /// Constructor with insert-at-end-of-block semantics
4924 UIToFPInst(
4925 Value *S, ///< The value to be converted
4926 Type *Ty, ///< The type to convert to
4927 const Twine &NameStr, ///< A name for the new instruction
4928 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4929 );
4930
4931 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4932 static bool classof(const Instruction *I) {
4933 return I->getOpcode() == UIToFP;
4934 }
4935 static bool classof(const Value *V) {
4936 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4937 }
4938};
4939
4940//===----------------------------------------------------------------------===//
4941// SIToFPInst Class
4942//===----------------------------------------------------------------------===//
4943
4944/// This class represents a cast from signed integer to floating point.
4945class SIToFPInst : public CastInst {
4946protected:
4947 // Note: Instruction needs to be a friend here to call cloneImpl.
4948 friend class Instruction;
4949
4950 /// Clone an identical SIToFPInst
4951 SIToFPInst *cloneImpl() const;
4952
4953public:
4954 /// Constructor with insert-before-instruction semantics
4955 SIToFPInst(
4956 Value *S, ///< The value to be converted
4957 Type *Ty, ///< The type to convert to
4958 const Twine &NameStr = "", ///< A name for the new instruction
4959 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4960 );
4961
4962 /// Constructor with insert-at-end-of-block semantics
4963 SIToFPInst(
4964 Value *S, ///< The value to be converted
4965 Type *Ty, ///< The type to convert to
4966 const Twine &NameStr, ///< A name for the new instruction
4967 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4968 );
4969
4970 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4971 static bool classof(const Instruction *I) {
4972 return I->getOpcode() == SIToFP;
4973 }
4974 static bool classof(const Value *V) {
4975 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4976 }
4977};
4978
4979//===----------------------------------------------------------------------===//
4980// FPToUIInst Class
4981//===----------------------------------------------------------------------===//
4982
4983/// This class represents a cast from floating point to unsigned integer
4984class FPToUIInst : public CastInst {
4985protected:
4986 // Note: Instruction needs to be a friend here to call cloneImpl.
4987 friend class Instruction;
4988
4989 /// Clone an identical FPToUIInst
4990 FPToUIInst *cloneImpl() const;
4991
4992public:
4993 /// Constructor with insert-before-instruction semantics
4994 FPToUIInst(
4995 Value *S, ///< The value to be converted
4996 Type *Ty, ///< The type to convert to
4997 const Twine &NameStr = "", ///< A name for the new instruction
4998 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4999 );
5000
5001 /// Constructor with insert-at-end-of-block semantics
5002 FPToUIInst(
5003 Value *S, ///< The value to be converted
5004 Type *Ty, ///< The type to convert to
5005 const Twine &NameStr, ///< A name for the new instruction
5006 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5007 );
5008
5009 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5010 static bool classof(const Instruction *I) {
5011 return I->getOpcode() == FPToUI;
5012 }
5013 static bool classof(const Value *V) {
5014 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5015 }
5016};
5017
5018//===----------------------------------------------------------------------===//
5019// FPToSIInst Class
5020//===----------------------------------------------------------------------===//
5021
5022/// This class represents a cast from floating point to signed integer.
5023class FPToSIInst : public CastInst {
5024protected:
5025 // Note: Instruction needs to be a friend here to call cloneImpl.
5026 friend class Instruction;
5027
5028 /// Clone an identical FPToSIInst
5029 FPToSIInst *cloneImpl() const;
5030
5031public:
5032 /// Constructor with insert-before-instruction semantics
5033 FPToSIInst(
5034 Value *S, ///< The value to be converted
5035 Type *Ty, ///< The type to convert to
5036 const Twine &NameStr = "", ///< A name for the new instruction
5037 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5038 );
5039
5040 /// Constructor with insert-at-end-of-block semantics
5041 FPToSIInst(
5042 Value *S, ///< The value to be converted
5043 Type *Ty, ///< The type to convert to
5044 const Twine &NameStr, ///< A name for the new instruction
5045 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5046 );
5047
5048 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5049 static bool classof(const Instruction *I) {
5050 return I->getOpcode() == FPToSI;
5051 }
5052 static bool classof(const Value *V) {
5053 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5054 }
5055};
5056
5057//===----------------------------------------------------------------------===//
5058// IntToPtrInst Class
5059//===----------------------------------------------------------------------===//
5060
5061/// This class represents a cast from an integer to a pointer.
5062class IntToPtrInst : public CastInst {
5063public:
5064 // Note: Instruction needs to be a friend here to call cloneImpl.
5065 friend class Instruction;
5066
5067 /// Constructor with insert-before-instruction semantics
5068 IntToPtrInst(
5069 Value *S, ///< The value to be converted
5070 Type *Ty, ///< The type to convert to
5071 const Twine &NameStr = "", ///< A name for the new instruction
5072 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5073 );
5074
5075 /// Constructor with insert-at-end-of-block semantics
5076 IntToPtrInst(
5077 Value *S, ///< The value to be converted
5078 Type *Ty, ///< The type to convert to
5079 const Twine &NameStr, ///< A name for the new instruction
5080 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5081 );
5082
5083 /// Clone an identical IntToPtrInst.
5084 IntToPtrInst *cloneImpl() const;
5085
5086 /// Returns the address space of this instruction's pointer type.
5087 unsigned getAddressSpace() const {
5088 return getType()->getPointerAddressSpace();
5089 }
5090
5091 // Methods for support type inquiry through isa, cast, and dyn_cast:
5092 static bool classof(const Instruction *I) {
5093 return I->getOpcode() == IntToPtr;
5094 }
5095 static bool classof(const Value *V) {
5096 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5097 }
5098};
5099
5100//===----------------------------------------------------------------------===//
5101// PtrToIntInst Class
5102//===----------------------------------------------------------------------===//
5103
5104/// This class represents a cast from a pointer to an integer.
5105class PtrToIntInst : public CastInst {
5106protected:
5107 // Note: Instruction needs to be a friend here to call cloneImpl.
5108 friend class Instruction;
5109
5110 /// Clone an identical PtrToIntInst.
5111 PtrToIntInst *cloneImpl() const;
5112
5113public:
5114 /// Constructor with insert-before-instruction semantics
5115 PtrToIntInst(
5116 Value *S, ///< The value to be converted
5117 Type *Ty, ///< The type to convert to
5118 const Twine &NameStr = "", ///< A name for the new instruction
5119 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5120 );
5121
5122 /// Constructor with insert-at-end-of-block semantics
5123 PtrToIntInst(
5124 Value *S, ///< The value to be converted
5125 Type *Ty, ///< The type to convert to
5126 const Twine &NameStr, ///< A name for the new instruction
5127 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5128 );
5129
5130 /// Gets the pointer operand.
5131 Value *getPointerOperand() { return getOperand(0); }
5132 /// Gets the pointer operand.
5133 const Value *getPointerOperand() const { return getOperand(0); }
5134 /// Gets the operand index of the pointer operand.
5135 static unsigned getPointerOperandIndex() { return 0U; }
5136
5137 /// Returns the address space of the pointer operand.
5138 unsigned getPointerAddressSpace() const {
5139 return getPointerOperand()->getType()->getPointerAddressSpace();
5140 }
5141
5142 // Methods for support type inquiry through isa, cast, and dyn_cast:
5143 static bool classof(const Instruction *I) {
5144 return I->getOpcode() == PtrToInt;
5145 }
5146 static bool classof(const Value *V) {
5147 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5148 }
5149};
5150
5151//===----------------------------------------------------------------------===//
5152// BitCastInst Class
5153//===----------------------------------------------------------------------===//
5154
5155/// This class represents a no-op cast from one type to another.
5156class BitCastInst : public CastInst {
5157protected:
5158 // Note: Instruction needs to be a friend here to call cloneImpl.
5159 friend class Instruction;
5160
5161 /// Clone an identical BitCastInst.
5162 BitCastInst *cloneImpl() const;
5163
5164public:
5165 /// Constructor with insert-before-instruction semantics
5166 BitCastInst(
5167 Value *S, ///< The value to be casted
5168 Type *Ty, ///< The type to casted to
5169 const Twine &NameStr = "", ///< A name for the new instruction
5170 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5171 );
5172
5173 /// Constructor with insert-at-end-of-block semantics
5174 BitCastInst(
5175 Value *S, ///< The value to be casted
5176 Type *Ty, ///< The type to casted to
5177 const Twine &NameStr, ///< A name for the new instruction
5178 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5179 );
5180
5181 // Methods for support type inquiry through isa, cast, and dyn_cast:
5182 static bool classof(const Instruction *I) {
5183 return I->getOpcode() == BitCast;
5184 }
5185 static bool classof(const Value *V) {
5186 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5187 }
5188};
5189
5190//===----------------------------------------------------------------------===//
5191// AddrSpaceCastInst Class
5192//===----------------------------------------------------------------------===//
5193
5194/// This class represents a conversion between pointers from one address space
5195/// to another.
5196class AddrSpaceCastInst : public CastInst {
5197protected:
5198 // Note: Instruction needs to be a friend here to call cloneImpl.
5199 friend class Instruction;
5200
5201 /// Clone an identical AddrSpaceCastInst.
5202 AddrSpaceCastInst *cloneImpl() const;
5203
5204public:
5205 /// Constructor with insert-before-instruction semantics
5206 AddrSpaceCastInst(
5207 Value *S, ///< The value to be casted
5208 Type *Ty, ///< The type to casted to
5209 const Twine &NameStr = "", ///< A name for the new instruction
5210 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5211 );
5212
5213 /// Constructor with insert-at-end-of-block semantics
5214 AddrSpaceCastInst(
5215 Value *S, ///< The value to be casted
5216 Type *Ty, ///< The type to casted to
5217 const Twine &NameStr, ///< A name for the new instruction
5218 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5219 );
5220
5221 // Methods for support type inquiry through isa, cast, and dyn_cast:
5222 static bool classof(const Instruction *I) {
5223 return I->getOpcode() == AddrSpaceCast;
5224 }
5225 static bool classof(const Value *V) {
5226 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5227 }
5228
5229 /// Gets the pointer operand.
5230 Value *getPointerOperand() {
5231 return getOperand(0);
5232 }
5233
5234 /// Gets the pointer operand.
5235 const Value *getPointerOperand() const {
5236 return getOperand(0);
5237 }
5238
5239 /// Gets the operand index of the pointer operand.
5240 static unsigned getPointerOperandIndex() {
5241 return 0U;
5242 }
5243
5244 /// Returns the address space of the pointer operand.
5245 unsigned getSrcAddressSpace() const {
5246 return getPointerOperand()->getType()->getPointerAddressSpace();
5247 }
5248
5249 /// Returns the address space of the result.
5250 unsigned getDestAddressSpace() const {
5251 return getType()->getPointerAddressSpace();
5252 }
5253};
5254
5255/// A helper function that returns the pointer operand of a load or store
5256/// instruction. Returns nullptr if not load or store.
5257inline const Value *getLoadStorePointerOperand(const Value *V) {
5258 if (auto *Load = dyn_cast<LoadInst>(V))
5259 return Load->getPointerOperand();
5260 if (auto *Store = dyn_cast<StoreInst>(V))
5261 return Store->getPointerOperand();
5262 return nullptr;
5263}
5264inline Value *getLoadStorePointerOperand(Value *V) {
5265 return const_cast<Value *>(
5266 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5267}
5268
5269/// A helper function that returns the pointer operand of a load, store
5270/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5271inline const Value *getPointerOperand(const Value *V) {
5272 if (auto *Ptr = getLoadStorePointerOperand(V))
5273 return Ptr;
5274 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5275 return Gep->getPointerOperand();
5276 return nullptr;
5277}
5278inline Value *getPointerOperand(Value *V) {
5279 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5280}
5281
5282/// A helper function that returns the alignment of load or store instruction.
5283inline Align getLoadStoreAlignment(Value *I) {
5284 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 5285, __PRETTY_FUNCTION__))
5285 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 5285, __PRETTY_FUNCTION__))
;
5286 if (auto *LI = dyn_cast<LoadInst>(I))
5287 return LI->getAlign();
5288 return cast<StoreInst>(I)->getAlign();
5289}
5290
5291/// A helper function that returns the address space of the pointer operand of
5292/// load or store instruction.
5293inline unsigned getLoadStoreAddressSpace(Value *I) {
5294 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 5295, __PRETTY_FUNCTION__))
5295 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Instructions.h"
, 5295, __PRETTY_FUNCTION__))
;
5296 if (auto *LI = dyn_cast<LoadInst>(I))
5297 return LI->getPointerAddressSpace();
5298 return cast<StoreInst>(I)->getPointerAddressSpace();
5299}
5300
5301//===----------------------------------------------------------------------===//
5302// FreezeInst Class
5303//===----------------------------------------------------------------------===//
5304
5305/// This class represents a freeze function that returns random concrete
5306/// value if an operand is either a poison value or an undef value
5307class FreezeInst : public UnaryInstruction {
5308protected:
5309 // Note: Instruction needs to be a friend here to call cloneImpl.
5310 friend class Instruction;
5311
5312 /// Clone an identical FreezeInst
5313 FreezeInst *cloneImpl() const;
5314
5315public:
5316 explicit FreezeInst(Value *S,
5317 const Twine &NameStr = "",
5318 Instruction *InsertBefore = nullptr);
5319 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5320
5321 // Methods for support type inquiry through isa, cast, and dyn_cast:
5322 static inline bool classof(const Instruction *I) {
5323 return I->getOpcode() == Freeze;
5324 }
5325 static inline bool classof(const Value *V) {
5326 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5327 }
5328};
5329
5330} // end namespace llvm
5331
5332#endif // LLVM_IR_INSTRUCTIONS_H

/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h

1//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the Value class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_IR_VALUE_H
14#define LLVM_IR_VALUE_H
15
16#include "llvm-c/Types.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/ADT/iterator_range.h"
20#include "llvm/IR/Use.h"
21#include "llvm/Support/Alignment.h"
22#include "llvm/Support/CBindingWrapping.h"
23#include "llvm/Support/Casting.h"
24#include <cassert>
25#include <iterator>
26#include <memory>
27
28namespace llvm {
29
30class APInt;
31class Argument;
32class BasicBlock;
33class Constant;
34class ConstantData;
35class ConstantAggregate;
36class DataLayout;
37class Function;
38class GlobalAlias;
39class GlobalIFunc;
40class GlobalIndirectSymbol;
41class GlobalObject;
42class GlobalValue;
43class GlobalVariable;
44class InlineAsm;
45class Instruction;
46class LLVMContext;
47class MDNode;
48class Module;
49class ModuleSlotTracker;
50class raw_ostream;
51template<typename ValueTy> class StringMapEntry;
52class Twine;
53class Type;
54class User;
55
56using ValueName = StringMapEntry<Value *>;
57
58//===----------------------------------------------------------------------===//
59// Value Class
60//===----------------------------------------------------------------------===//
61
62/// LLVM Value Representation
63///
64/// This is a very important LLVM class. It is the base class of all values
65/// computed by a program that may be used as operands to other values. Value is
66/// the super class of other important classes such as Instruction and Function.
67/// All Values have a Type. Type is not a subclass of Value. Some values can
68/// have a name and they belong to some Module. Setting the name on the Value
69/// automatically updates the module's symbol table.
70///
71/// Every value has a "use list" that keeps track of which other Values are
72/// using this Value. A Value can also have an arbitrary number of ValueHandle
73/// objects that watch it and listen to RAUW and Destroy events. See
74/// llvm/IR/ValueHandle.h for details.
75class Value {
76 Type *VTy;
77 Use *UseList;
78
79 friend class ValueAsMetadata; // Allow access to IsUsedByMD.
80 friend class ValueHandleBase;
81
82 const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
83 unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
84
85protected:
86 /// Hold subclass data that can be dropped.
87 ///
88 /// This member is similar to SubclassData, however it is for holding
89 /// information which may be used to aid optimization, but which may be
90 /// cleared to zero without affecting conservative interpretation.
91 unsigned char SubclassOptionalData : 7;
92
93private:
94 /// Hold arbitrary subclass data.
95 ///
96 /// This member is defined by this class, but is not used for anything.
97 /// Subclasses can use it to hold whatever state they find useful. This
98 /// field is initialized to zero by the ctor.
99 unsigned short SubclassData;
100
101protected:
102 /// The number of operands in the subclass.
103 ///
104 /// This member is defined by this class, but not used for anything.
105 /// Subclasses can use it to store their number of operands, if they have
106 /// any.
107 ///
108 /// This is stored here to save space in User on 64-bit hosts. Since most
109 /// instances of Value have operands, 32-bit hosts aren't significantly
110 /// affected.
111 ///
112 /// Note, this should *NOT* be used directly by any class other than User.
113 /// User uses this value to find the Use list.
114 enum : unsigned { NumUserOperandsBits = 27 };
115 unsigned NumUserOperands : NumUserOperandsBits;
116
117 // Use the same type as the bitfield above so that MSVC will pack them.
118 unsigned IsUsedByMD : 1;
119 unsigned HasName : 1;
120 unsigned HasMetadata : 1; // Has metadata attached to this?
121 unsigned HasHungOffUses : 1;
122 unsigned HasDescriptor : 1;
123
124private:
125 template <typename UseT> // UseT == 'Use' or 'const Use'
126 class use_iterator_impl
127 : public std::iterator<std::forward_iterator_tag, UseT *> {
128 friend class Value;
129
130 UseT *U;
131
132 explicit use_iterator_impl(UseT *u) : U(u) {}
133
134 public:
135 use_iterator_impl() : U() {}
136
137 bool operator==(const use_iterator_impl &x) const { return U == x.U; }
138 bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
139
140 use_iterator_impl &operator++() { // Preincrement
141 assert(U && "Cannot increment end iterator!")((U && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("U && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 141, __PRETTY_FUNCTION__))
;
142 U = U->getNext();
143 return *this;
144 }
145
146 use_iterator_impl operator++(int) { // Postincrement
147 auto tmp = *this;
148 ++*this;
149 return tmp;
150 }
151
152 UseT &operator*() const {
153 assert(U && "Cannot dereference end iterator!")((U && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("U && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 153, __PRETTY_FUNCTION__))
;
154 return *U;
155 }
156
157 UseT *operator->() const { return &operator*(); }
158
159 operator use_iterator_impl<const UseT>() const {
160 return use_iterator_impl<const UseT>(U);
161 }
162 };
163
164 template <typename UserTy> // UserTy == 'User' or 'const User'
165 class user_iterator_impl
166 : public std::iterator<std::forward_iterator_tag, UserTy *> {
167 use_iterator_impl<Use> UI;
168 explicit user_iterator_impl(Use *U) : UI(U) {}
169 friend class Value;
170
171 public:
172 user_iterator_impl() = default;
173
174 bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
175 bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
176
177 /// Returns true if this iterator is equal to user_end() on the value.
178 bool atEnd() const { return *this == user_iterator_impl(); }
179
180 user_iterator_impl &operator++() { // Preincrement
181 ++UI;
182 return *this;
183 }
184
185 user_iterator_impl operator++(int) { // Postincrement
186 auto tmp = *this;
187 ++*this;
188 return tmp;
189 }
190
191 // Retrieve a pointer to the current User.
192 UserTy *operator*() const {
193 return UI->getUser();
194 }
195
196 UserTy *operator->() const { return operator*(); }
197
198 operator user_iterator_impl<const UserTy>() const {
199 return user_iterator_impl<const UserTy>(*UI);
200 }
201
202 Use &getUse() const { return *UI; }
203 };
204
205protected:
206 Value(Type *Ty, unsigned scid);
207
208 /// Value's destructor should be virtual by design, but that would require
209 /// that Value and all of its subclasses have a vtable that effectively
210 /// duplicates the information in the value ID. As a size optimization, the
211 /// destructor has been protected, and the caller should manually call
212 /// deleteValue.
213 ~Value(); // Use deleteValue() to delete a generic Value.
214
215public:
216 Value(const Value &) = delete;
217 Value &operator=(const Value &) = delete;
218
219 /// Delete a pointer to a generic Value.
220 void deleteValue();
221
222 /// Support for debugging, callable in GDB: V->dump()
223 void dump() const;
224
225 /// Implement operator<< on Value.
226 /// @{
227 void print(raw_ostream &O, bool IsForDebug = false) const;
228 void print(raw_ostream &O, ModuleSlotTracker &MST,
229 bool IsForDebug = false) const;
230 /// @}
231
232 /// Print the name of this Value out to the specified raw_ostream.
233 ///
234 /// This is useful when you just want to print 'int %reg126', not the
235 /// instruction that generated it. If you specify a Module for context, then
236 /// even constanst get pretty-printed; for example, the type of a null
237 /// pointer is printed symbolically.
238 /// @{
239 void printAsOperand(raw_ostream &O, bool PrintType = true,
240 const Module *M = nullptr) const;
241 void printAsOperand(raw_ostream &O, bool PrintType,
242 ModuleSlotTracker &MST) const;
243 /// @}
244
245 /// All values are typed, get the type of this value.
246 Type *getType() const { return VTy; }
247
248 /// All values hold a context through their type.
249 LLVMContext &getContext() const;
250
251 // All values can potentially be named.
252 bool hasName() const { return HasName; }
253 ValueName *getValueName() const;
254 void setValueName(ValueName *VN);
255
256private:
257 void destroyValueName();
258 enum class ReplaceMetadataUses { No, Yes };
259 void doRAUW(Value *New, ReplaceMetadataUses);
260 void setNameImpl(const Twine &Name);
261
262public:
263 /// Return a constant reference to the value's name.
264 ///
265 /// This guaranteed to return the same reference as long as the value is not
266 /// modified. If the value has a name, this does a hashtable lookup, so it's
267 /// not free.
268 StringRef getName() const;
269
270 /// Change the name of the value.
271 ///
272 /// Choose a new unique name if the provided name is taken.
273 ///
274 /// \param Name The new name; or "" if the value's name should be removed.
275 void setName(const Twine &Name);
276
277 /// Transfer the name from V to this value.
278 ///
279 /// After taking V's name, sets V's name to empty.
280 ///
281 /// \note It is an error to call V->takeName(V).
282 void takeName(Value *V);
283
284#ifndef NDEBUG
285 std::string getNameOrAsOperand() const;
286#endif
287
288 /// Change all uses of this to point to a new Value.
289 ///
290 /// Go through the uses list for this definition and make each use point to
291 /// "V" instead of "this". After this completes, 'this's use list is
292 /// guaranteed to be empty.
293 void replaceAllUsesWith(Value *V);
294
295 /// Change non-metadata uses of this to point to a new Value.
296 ///
297 /// Go through the uses list for this definition and make each use point to
298 /// "V" instead of "this". This function skips metadata entries in the list.
299 void replaceNonMetadataUsesWith(Value *V);
300
301 /// Go through the uses list for this definition and make each use point
302 /// to "V" if the callback ShouldReplace returns true for the given Use.
303 /// Unlike replaceAllUsesWith() this function does not support basic block
304 /// values or constant users.
305 void replaceUsesWithIf(Value *New,
306 llvm::function_ref<bool(Use &U)> ShouldReplace) {
307 assert(New && "Value::replaceUsesWithIf(<null>) is invalid!")((New && "Value::replaceUsesWithIf(<null>) is invalid!"
) ? static_cast<void> (0) : __assert_fail ("New && \"Value::replaceUsesWithIf(<null>) is invalid!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 307, __PRETTY_FUNCTION__))
;
308 assert(New->getType() == getType() &&((New->getType() == getType() && "replaceUses of value with new value of different type!"
) ? static_cast<void> (0) : __assert_fail ("New->getType() == getType() && \"replaceUses of value with new value of different type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 309, __PRETTY_FUNCTION__))
309 "replaceUses of value with new value of different type!")((New->getType() == getType() && "replaceUses of value with new value of different type!"
) ? static_cast<void> (0) : __assert_fail ("New->getType() == getType() && \"replaceUses of value with new value of different type!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 309, __PRETTY_FUNCTION__))
;
310
311 for (use_iterator UI = use_begin(), E = use_end(); UI != E;) {
312 Use &U = *UI;
313 ++UI;
314 if (!ShouldReplace(U))
315 continue;
316 U.set(New);
317 }
318 }
319
320 /// replaceUsesOutsideBlock - Go through the uses list for this definition and
321 /// make each use point to "V" instead of "this" when the use is outside the
322 /// block. 'This's use list is expected to have at least one element.
323 /// Unlike replaceAllUsesWith() this function does not support basic block
324 /// values or constant users.
325 void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
326
327 //----------------------------------------------------------------------
328 // Methods for handling the chain of uses of this Value.
329 //
330 // Materializing a function can introduce new uses, so these methods come in
331 // two variants:
332 // The methods that start with materialized_ check the uses that are
333 // currently known given which functions are materialized. Be very careful
334 // when using them since you might not get all uses.
335 // The methods that don't start with materialized_ assert that modules is
336 // fully materialized.
337 void assertModuleIsMaterializedImpl() const;
338 // This indirection exists so we can keep assertModuleIsMaterializedImpl()
339 // around in release builds of Value.cpp to be linked with other code built
340 // in debug mode. But this avoids calling it in any of the release built code.
341 void assertModuleIsMaterialized() const {
342#ifndef NDEBUG
343 assertModuleIsMaterializedImpl();
344#endif
345 }
346
347 bool use_empty() const {
348 assertModuleIsMaterialized();
349 return UseList == nullptr;
350 }
351
352 bool materialized_use_empty() const {
353 return UseList == nullptr;
354 }
355
356 using use_iterator = use_iterator_impl<Use>;
357 using const_use_iterator = use_iterator_impl<const Use>;
358
359 use_iterator materialized_use_begin() { return use_iterator(UseList); }
360 const_use_iterator materialized_use_begin() const {
361 return const_use_iterator(UseList);
362 }
363 use_iterator use_begin() {
364 assertModuleIsMaterialized();
365 return materialized_use_begin();
366 }
367 const_use_iterator use_begin() const {
368 assertModuleIsMaterialized();
369 return materialized_use_begin();
370 }
371 use_iterator use_end() { return use_iterator(); }
372 const_use_iterator use_end() const { return const_use_iterator(); }
373 iterator_range<use_iterator> materialized_uses() {
374 return make_range(materialized_use_begin(), use_end());
375 }
376 iterator_range<const_use_iterator> materialized_uses() const {
377 return make_range(materialized_use_begin(), use_end());
378 }
379 iterator_range<use_iterator> uses() {
380 assertModuleIsMaterialized();
381 return materialized_uses();
382 }
383 iterator_range<const_use_iterator> uses() const {
384 assertModuleIsMaterialized();
385 return materialized_uses();
386 }
387
388 bool user_empty() const {
389 assertModuleIsMaterialized();
390 return UseList == nullptr;
391 }
392
393 using user_iterator = user_iterator_impl<User>;
394 using const_user_iterator = user_iterator_impl<const User>;
395
396 user_iterator materialized_user_begin() { return user_iterator(UseList); }
397 const_user_iterator materialized_user_begin() const {
398 return const_user_iterator(UseList);
399 }
400 user_iterator user_begin() {
401 assertModuleIsMaterialized();
402 return materialized_user_begin();
403 }
404 const_user_iterator user_begin() const {
405 assertModuleIsMaterialized();
406 return materialized_user_begin();
407 }
408 user_iterator user_end() { return user_iterator(); }
409 const_user_iterator user_end() const { return const_user_iterator(); }
410 User *user_back() {
411 assertModuleIsMaterialized();
412 return *materialized_user_begin();
413 }
414 const User *user_back() const {
415 assertModuleIsMaterialized();
416 return *materialized_user_begin();
417 }
418 iterator_range<user_iterator> materialized_users() {
419 return make_range(materialized_user_begin(), user_end());
420 }
421 iterator_range<const_user_iterator> materialized_users() const {
422 return make_range(materialized_user_begin(), user_end());
423 }
424 iterator_range<user_iterator> users() {
425 assertModuleIsMaterialized();
426 return materialized_users();
427 }
428 iterator_range<const_user_iterator> users() const {
429 assertModuleIsMaterialized();
430 return materialized_users();
431 }
432
433 /// Return true if there is exactly one use of this value.
434 ///
435 /// This is specialized because it is a common request and does not require
436 /// traversing the whole use list.
437 bool hasOneUse() const { return hasSingleElement(uses()); }
13
Calling 'hasSingleElement<llvm::iterator_range<llvm::Value::use_iterator_impl<const llvm::Use>>>'
16
Returning from 'hasSingleElement<llvm::iterator_range<llvm::Value::use_iterator_impl<const llvm::Use>>>'
17
Returning value, which participates in a condition later
438
439 /// Return true if this Value has exactly N uses.
440 bool hasNUses(unsigned N) const;
441
442 /// Return true if this value has N uses or more.
443 ///
444 /// This is logically equivalent to getNumUses() >= N.
445 bool hasNUsesOrMore(unsigned N) const;
446
447 /// Return true if there is exactly one user of this value.
448 ///
449 /// Note that this is not the same as "has one use". If a value has one use,
450 /// then there certainly is a single user. But if value has several uses,
451 /// it is possible that all uses are in a single user, or not.
452 ///
453 /// This check is potentially costly, since it requires traversing,
454 /// in the worst case, the whole use list of a value.
455 bool hasOneUser() const;
456
457 /// Return true if there is exactly one use of this value that cannot be
458 /// dropped.
459 ///
460 /// This is specialized because it is a common request and does not require
461 /// traversing the whole use list.
462 Use *getSingleUndroppableUse();
463
464 /// Return true if there this value.
465 ///
466 /// This is specialized because it is a common request and does not require
467 /// traversing the whole use list.
468 bool hasNUndroppableUses(unsigned N) const;
469
470 /// Return true if this value has N uses or more.
471 ///
472 /// This is logically equivalent to getNumUses() >= N.
473 bool hasNUndroppableUsesOrMore(unsigned N) const;
474
475 /// Remove every uses that can safely be removed.
476 ///
477 /// This will remove for example uses in llvm.assume.
478 /// This should be used when performing want to perform a tranformation but
479 /// some Droppable uses pervent it.
480 /// This function optionally takes a filter to only remove some droppable
481 /// uses.
482 void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
483 [](const Use *) { return true; });
484
485 /// Remove every use of this value in \p User that can safely be removed.
486 void dropDroppableUsesIn(User &Usr);
487
488 /// Remove the droppable use \p U.
489 static void dropDroppableUse(Use &U);
490
491 /// Check if this value is used in the specified basic block.
492 bool isUsedInBasicBlock(const BasicBlock *BB) const;
493
494 /// This method computes the number of uses of this Value.
495 ///
496 /// This is a linear time operation. Use hasOneUse, hasNUses, or
497 /// hasNUsesOrMore to check for specific values.
498 unsigned getNumUses() const;
499
500 /// This method should only be used by the Use class.
501 void addUse(Use &U) { U.addToList(&UseList); }
502
503 /// Concrete subclass of this.
504 ///
505 /// An enumeration for keeping track of the concrete subclass of Value that
506 /// is actually instantiated. Values of this enumeration are kept in the
507 /// Value classes SubclassID field. They are used for concrete type
508 /// identification.
509 enum ValueTy {
510#define HANDLE_VALUE(Name) Name##Val,
511#include "llvm/IR/Value.def"
512
513 // Markers:
514#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
515#include "llvm/IR/Value.def"
516 };
517
518 /// Return an ID for the concrete type of this object.
519 ///
520 /// This is used to implement the classof checks. This should not be used
521 /// for any other purpose, as the values may change as LLVM evolves. Also,
522 /// note that for instructions, the Instruction's opcode is added to
523 /// InstructionVal. So this means three things:
524 /// # there is no value with code InstructionVal (no opcode==0).
525 /// # there are more possible values for the value type than in ValueTy enum.
526 /// # the InstructionVal enumerator must be the highest valued enumerator in
527 /// the ValueTy enum.
528 unsigned getValueID() const {
529 return SubclassID;
530 }
531
532 /// Return the raw optional flags value contained in this value.
533 ///
534 /// This should only be used when testing two Values for equivalence.
535 unsigned getRawSubclassOptionalData() const {
536 return SubclassOptionalData;
537 }
538
539 /// Clear the optional flags contained in this value.
540 void clearSubclassOptionalData() {
541 SubclassOptionalData = 0;
542 }
543
544 /// Check the optional flags for equality.
545 bool hasSameSubclassOptionalData(const Value *V) const {
546 return SubclassOptionalData == V->SubclassOptionalData;
547 }
548
549 /// Return true if there is a value handle associated with this value.
550 bool hasValueHandle() const { return HasValueHandle; }
551
552 /// Return true if there is metadata referencing this value.
553 bool isUsedByMetadata() const { return IsUsedByMD; }
554
555protected:
556 /// Get the current metadata attachments for the given kind, if any.
557 ///
558 /// These functions require that the value have at most a single attachment
559 /// of the given kind, and return \c nullptr if such an attachment is missing.
560 /// @{
561 MDNode *getMetadata(unsigned KindID) const;
562 MDNode *getMetadata(StringRef Kind) const;
563 /// @}
564
565 /// Appends all attachments with the given ID to \c MDs in insertion order.
566 /// If the Value has no attachments with the given ID, or if ID is invalid,
567 /// leaves MDs unchanged.
568 /// @{
569 void getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const;
570 void getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const;
571 /// @}
572
573 /// Appends all metadata attached to this value to \c MDs, sorting by
574 /// KindID. The first element of each pair returned is the KindID, the second
575 /// element is the metadata value. Attachments with the same ID appear in
576 /// insertion order.
577 void
578 getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;
579
580 /// Return true if this value has any metadata attached to it.
581 bool hasMetadata() const { return (bool)HasMetadata; }
582
583 /// Return true if this value has the given type of metadata attached.
584 /// @{
585 bool hasMetadata(unsigned KindID) const {
586 return getMetadata(KindID) != nullptr;
587 }
588 bool hasMetadata(StringRef Kind) const {
589 return getMetadata(Kind) != nullptr;
590 }
591 /// @}
592
593 /// Set a particular kind of metadata attachment.
594 ///
595 /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
596 /// replacing it if it already exists.
597 /// @{
598 void setMetadata(unsigned KindID, MDNode *Node);
599 void setMetadata(StringRef Kind, MDNode *Node);
600 /// @}
601
602 /// Add a metadata attachment.
603 /// @{
604 void addMetadata(unsigned KindID, MDNode &MD);
605 void addMetadata(StringRef Kind, MDNode &MD);
606 /// @}
607
608 /// Erase all metadata attachments with the given kind.
609 ///
610 /// \returns true if any metadata was removed.
611 bool eraseMetadata(unsigned KindID);
612
613 /// Erase all metadata attached to this Value.
614 void clearMetadata();
615
616public:
617 /// Return true if this value is a swifterror value.
618 ///
619 /// swifterror values can be either a function argument or an alloca with a
620 /// swifterror attribute.
621 bool isSwiftError() const;
622
623 /// Strip off pointer casts, all-zero GEPs and address space casts.
624 ///
625 /// Returns the original uncasted value. If this is called on a non-pointer
626 /// value, it returns 'this'.
627 const Value *stripPointerCasts() const;
628 Value *stripPointerCasts() {
629 return const_cast<Value *>(
630 static_cast<const Value *>(this)->stripPointerCasts());
631 }
632
633 /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
634 ///
635 /// Returns the original uncasted value. If this is called on a non-pointer
636 /// value, it returns 'this'.
637 const Value *stripPointerCastsAndAliases() const;
638 Value *stripPointerCastsAndAliases() {
639 return const_cast<Value *>(
640 static_cast<const Value *>(this)->stripPointerCastsAndAliases());
641 }
642
643 /// Strip off pointer casts, all-zero GEPs and address space casts
644 /// but ensures the representation of the result stays the same.
645 ///
646 /// Returns the original uncasted value with the same representation. If this
647 /// is called on a non-pointer value, it returns 'this'.
648 const Value *stripPointerCastsSameRepresentation() const;
649 Value *stripPointerCastsSameRepresentation() {
650 return const_cast<Value *>(static_cast<const Value *>(this)
651 ->stripPointerCastsSameRepresentation());
652 }
653
654 /// Strip off pointer casts, all-zero GEPs and invariant group info.
655 ///
656 /// Returns the original uncasted value. If this is called on a non-pointer
657 /// value, it returns 'this'. This function should be used only in
658 /// Alias analysis.
659 const Value *stripPointerCastsAndInvariantGroups() const;
660 Value *stripPointerCastsAndInvariantGroups() {
661 return const_cast<Value *>(static_cast<const Value *>(this)
662 ->stripPointerCastsAndInvariantGroups());
663 }
664
665 /// Strip off pointer casts and all-constant inbounds GEPs.
666 ///
667 /// Returns the original pointer value. If this is called on a non-pointer
668 /// value, it returns 'this'.
669 const Value *stripInBoundsConstantOffsets() const;
670 Value *stripInBoundsConstantOffsets() {
671 return const_cast<Value *>(
672 static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
673 }
674
675 /// Accumulate the constant offset this value has compared to a base pointer.
676 /// Only 'getelementptr' instructions (GEPs) are accumulated but other
677 /// instructions, e.g., casts, are stripped away as well.
678 /// The accumulated constant offset is added to \p Offset and the base
679 /// pointer is returned.
680 ///
681 /// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
682 /// the address space of 'this' pointer value, e.g., use
683 /// DataLayout::getIndexTypeSizeInBits(Ty).
684 ///
685 /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
686 /// accumulated even if the GEP is not "inbounds".
687 ///
688 /// If \p ExternalAnalysis is provided it will be used to calculate a offset
689 /// when a operand of GEP is not constant.
690 /// For example, for a value \p ExternalAnalysis might try to calculate a
691 /// lower bound. If \p ExternalAnalysis is successful, it should return true.
692 ///
693 /// If this is called on a non-pointer value, it returns 'this' and the
694 /// \p Offset is not modified.
695 ///
696 /// Note that this function will never return a nullptr. It will also never
697 /// manipulate the \p Offset in a way that would not match the difference
698 /// between the underlying value and the returned one. Thus, if no constant
699 /// offset was found, the returned value is the underlying one and \p Offset
700 /// is unchanged.
701 const Value *stripAndAccumulateConstantOffsets(
702 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
703 function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
704 nullptr) const;
705 Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
706 bool AllowNonInbounds) {
707 return const_cast<Value *>(
708 static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
709 DL, Offset, AllowNonInbounds));
710 }
711
712 /// This is a wrapper around stripAndAccumulateConstantOffsets with the
713 /// in-bounds requirement set to false.
714 const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
715 APInt &Offset) const {
716 return stripAndAccumulateConstantOffsets(DL, Offset,
717 /* AllowNonInbounds */ false);
718 }
719 Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
720 APInt &Offset) {
721 return stripAndAccumulateConstantOffsets(DL, Offset,
722 /* AllowNonInbounds */ false);
723 }
724
725 /// Strip off pointer casts and inbounds GEPs.
726 ///
727 /// Returns the original pointer value. If this is called on a non-pointer
728 /// value, it returns 'this'.
729 const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
730 [](const Value *) {}) const;
731 inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
732 [](const Value *) {}) {
733 return const_cast<Value *>(
734 static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
735 }
736
737 /// Returns the number of bytes known to be dereferenceable for the
738 /// pointer value.
739 ///
740 /// If CanBeNull is set by this function the pointer can either be null or be
741 /// dereferenceable up to the returned number of bytes.
742 uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
743 bool &CanBeNull) const;
744
745 /// Returns an alignment of the pointer value.
746 ///
747 /// Returns an alignment which is either specified explicitly, e.g. via
748 /// align attribute of a function argument, or guaranteed by DataLayout.
749 Align getPointerAlignment(const DataLayout &DL) const;
750
751 /// Translate PHI node to its predecessor from the given basic block.
752 ///
753 /// If this value is a PHI node with CurBB as its parent, return the value in
754 /// the PHI node corresponding to PredBB. If not, return ourself. This is
755 /// useful if you want to know the value something has in a predecessor
756 /// block.
757 const Value *DoPHITranslation(const BasicBlock *CurBB,
758 const BasicBlock *PredBB) const;
759 Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
760 return const_cast<Value *>(
761 static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
762 }
763
764 /// The maximum alignment for instructions.
765 ///
766 /// This is the greatest alignment value supported by load, store, and alloca
767 /// instructions, and global values.
768 static const unsigned MaxAlignmentExponent = 29;
769 static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
770
771 /// Mutate the type of this Value to be of the specified type.
772 ///
773 /// Note that this is an extremely dangerous operation which can create
774 /// completely invalid IR very easily. It is strongly recommended that you
775 /// recreate IR objects with the right types instead of mutating them in
776 /// place.
777 void mutateType(Type *Ty) {
778 VTy = Ty;
779 }
780
781 /// Sort the use-list.
782 ///
783 /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
784 /// expected to compare two \a Use references.
785 template <class Compare> void sortUseList(Compare Cmp);
786
787 /// Reverse the use-list.
788 void reverseUseList();
789
790private:
791 /// Merge two lists together.
792 ///
793 /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
794 /// "equal" items from L before items from R.
795 ///
796 /// \return the first element in the list.
797 ///
798 /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
799 template <class Compare>
800 static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
801 Use *Merged;
802 Use **Next = &Merged;
803
804 while (true) {
805 if (!L) {
806 *Next = R;
807 break;
808 }
809 if (!R) {
810 *Next = L;
811 break;
812 }
813 if (Cmp(*R, *L)) {
814 *Next = R;
815 Next = &R->Next;
816 R = R->Next;
817 } else {
818 *Next = L;
819 Next = &L->Next;
820 L = L->Next;
821 }
822 }
823
824 return Merged;
825 }
826
827protected:
828 unsigned short getSubclassDataFromValue() const { return SubclassData; }
829 void setValueSubclassData(unsigned short D) { SubclassData = D; }
830};
831
832struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
833
834/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
835/// Those don't work because Value and Instruction's destructors are protected,
836/// aren't virtual, and won't destroy the complete object.
837using unique_value = std::unique_ptr<Value, ValueDeleter>;
838
839inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
840 V.print(OS);
841 return OS;
842}
843
844void Use::set(Value *V) {
845 if (Val) removeFromList();
846 Val = V;
847 if (V) V->addUse(*this);
848}
849
850Value *Use::operator=(Value *RHS) {
851 set(RHS);
852 return RHS;
853}
854
855const Use &Use::operator=(const Use &RHS) {
856 set(RHS.Val);
857 return *this;
858}
859
860template <class Compare> void Value::sortUseList(Compare Cmp) {
861 if (!UseList || !UseList->Next)
862 // No need to sort 0 or 1 uses.
863 return;
864
865 // Note: this function completely ignores Prev pointers until the end when
866 // they're fixed en masse.
867
868 // Create a binomial vector of sorted lists, visiting uses one at a time and
869 // merging lists as necessary.
870 const unsigned MaxSlots = 32;
871 Use *Slots[MaxSlots];
872
873 // Collect the first use, turning it into a single-item list.
874 Use *Next = UseList->Next;
875 UseList->Next = nullptr;
876 unsigned NumSlots = 1;
877 Slots[0] = UseList;
878
879 // Collect all but the last use.
880 while (Next->Next) {
881 Use *Current = Next;
882 Next = Current->Next;
883
884 // Turn Current into a single-item list.
885 Current->Next = nullptr;
886
887 // Save Current in the first available slot, merging on collisions.
888 unsigned I;
889 for (I = 0; I < NumSlots; ++I) {
890 if (!Slots[I])
891 break;
892
893 // Merge two lists, doubling the size of Current and emptying slot I.
894 //
895 // Since the uses in Slots[I] originally preceded those in Current, send
896 // Slots[I] in as the left parameter to maintain a stable sort.
897 Current = mergeUseLists(Slots[I], Current, Cmp);
898 Slots[I] = nullptr;
899 }
900 // Check if this is a new slot.
901 if (I == NumSlots) {
902 ++NumSlots;
903 assert(NumSlots <= MaxSlots && "Use list bigger than 2^32")((NumSlots <= MaxSlots && "Use list bigger than 2^32"
) ? static_cast<void> (0) : __assert_fail ("NumSlots <= MaxSlots && \"Use list bigger than 2^32\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 903, __PRETTY_FUNCTION__))
;
904 }
905
906 // Found an open slot.
907 Slots[I] = Current;
908 }
909
910 // Merge all the lists together.
911 assert(Next && "Expected one more Use")((Next && "Expected one more Use") ? static_cast<void
> (0) : __assert_fail ("Next && \"Expected one more Use\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 911, __PRETTY_FUNCTION__))
;
912 assert(!Next->Next && "Expected only one Use")((!Next->Next && "Expected only one Use") ? static_cast
<void> (0) : __assert_fail ("!Next->Next && \"Expected only one Use\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/IR/Value.h"
, 912, __PRETTY_FUNCTION__))
;
913 UseList = Next;
914 for (unsigned I = 0; I < NumSlots; ++I)
915 if (Slots[I])
916 // Since the uses in Slots[I] originally preceded those in UseList, send
917 // Slots[I] in as the left parameter to maintain a stable sort.
918 UseList = mergeUseLists(Slots[I], UseList, Cmp);
919
920 // Fix the Prev pointers.
921 for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
922 I->Prev = Prev;
923 Prev = &I->Next;
924 }
925}
926
927// isa - Provide some specializations of isa so that we don't have to include
928// the subtype header files to test to see if the value is a subclass...
929//
930template <> struct isa_impl<Constant, Value> {
931 static inline bool doit(const Value &Val) {
932 static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
933 return Val.getValueID() <= Value::ConstantLastVal;
934 }
935};
936
937template <> struct isa_impl<ConstantData, Value> {
938 static inline bool doit(const Value &Val) {
939 return Val.getValueID() >= Value::ConstantDataFirstVal &&
940 Val.getValueID() <= Value::ConstantDataLastVal;
941 }
942};
943
944template <> struct isa_impl<ConstantAggregate, Value> {
945 static inline bool doit(const Value &Val) {
946 return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
947 Val.getValueID() <= Value::ConstantAggregateLastVal;
948 }
949};
950
951template <> struct isa_impl<Argument, Value> {
952 static inline bool doit (const Value &Val) {
953 return Val.getValueID() == Value::ArgumentVal;
954 }
955};
956
957template <> struct isa_impl<InlineAsm, Value> {
958 static inline bool doit(const Value &Val) {
959 return Val.getValueID() == Value::InlineAsmVal;
960 }
961};
962
963template <> struct isa_impl<Instruction, Value> {
964 static inline bool doit(const Value &Val) {
965 return Val.getValueID() >= Value::InstructionVal;
966 }
967};
968
969template <> struct isa_impl<BasicBlock, Value> {
970 static inline bool doit(const Value &Val) {
971 return Val.getValueID() == Value::BasicBlockVal;
972 }
973};
974
975template <> struct isa_impl<Function, Value> {
976 static inline bool doit(const Value &Val) {
977 return Val.getValueID() == Value::FunctionVal;
978 }
979};
980
981template <> struct isa_impl<GlobalVariable, Value> {
982 static inline bool doit(const Value &Val) {
983 return Val.getValueID() == Value::GlobalVariableVal;
984 }
985};
986
987template <> struct isa_impl<GlobalAlias, Value> {
988 static inline bool doit(const Value &Val) {
989 return Val.getValueID() == Value::GlobalAliasVal;
990 }
991};
992
993template <> struct isa_impl<GlobalIFunc, Value> {
994 static inline bool doit(const Value &Val) {
995 return Val.getValueID() == Value::GlobalIFuncVal;
996 }
997};
998
999template <> struct isa_impl<GlobalIndirectSymbol, Value> {
1000 static inline bool doit(const Value &Val) {
1001 return isa<GlobalAlias>(Val) || isa<GlobalIFunc>(Val);
1002 }
1003};
1004
1005template <> struct isa_impl<GlobalValue, Value> {
1006 static inline bool doit(const Value &Val) {
1007 return isa<GlobalObject>(Val) || isa<GlobalIndirectSymbol>(Val);
1008 }
1009};
1010
1011template <> struct isa_impl<GlobalObject, Value> {
1012 static inline bool doit(const Value &Val) {
1013 return isa<GlobalVariable>(Val) || isa<Function>(Val);
1014 }
1015};
1016
1017// Create wrappers for C Binding types (see CBindingWrapping.h).
1018DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)inline Value *unwrap(LLVMValueRef P) { return reinterpret_cast
<Value*>(P); } inline LLVMValueRef wrap(const Value *P)
{ return reinterpret_cast<LLVMValueRef>(const_cast<
Value*>(P)); } template<typename T> inline T *unwrap
(LLVMValueRef P) { return cast<T>(unwrap(P)); }
1019
1020// Specialized opaque value conversions.
1021inline Value **unwrap(LLVMValueRef *Vals) {
1022 return reinterpret_cast<Value**>(Vals);
1023}
1024
1025template<typename T>
1026inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
1027#ifndef NDEBUG
1028 for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
1029 unwrap<T>(*I); // For side effect of calling assert on invalid usage.
1030#endif
1031 (void)Length;
1032 return reinterpret_cast<T**>(Vals);
1033}
1034
1035inline LLVMValueRef *wrap(const Value **Vals) {
1036 return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
1037}
1038
1039} // end namespace llvm
1040
1041#endif // LLVM_IR_VALUE_H

/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/iterator.h"
21#include "llvm/ADT/iterator_range.h"
22#include "llvm/Config/abi-breaking.h"
23#include "llvm/Support/ErrorHandling.h"
24#include <algorithm>
25#include <cassert>
26#include <cstddef>
27#include <cstdint>
28#include <cstdlib>
29#include <functional>
30#include <initializer_list>
31#include <iterator>
32#include <limits>
33#include <memory>
34#include <tuple>
35#include <type_traits>
36#include <utility>
37
38#ifdef EXPENSIVE_CHECKS
39#include <random> // for std::mt19937
40#endif
41
42namespace llvm {
43
44// Only used by compiler if both template types are the same. Useful when
45// using SFINAE to test for the existence of member functions.
46template <typename T, T> struct SameType;
47
48namespace detail {
49
50template <typename RangeT>
51using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
52
53template <typename RangeT>
54using ValueOfRange = typename std::remove_reference<decltype(
55 *std::begin(std::declval<RangeT &>()))>::type;
56
57} // end namespace detail
58
59//===----------------------------------------------------------------------===//
60// Extra additions to <type_traits>
61//===----------------------------------------------------------------------===//
62
63template <typename T>
64struct negation : std::integral_constant<bool, !bool(T::value)> {};
65
66template <typename...> struct conjunction : std::true_type {};
67template <typename B1> struct conjunction<B1> : B1 {};
68template <typename B1, typename... Bn>
69struct conjunction<B1, Bn...>
70 : std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
71
72template <typename T> struct make_const_ptr {
73 using type =
74 typename std::add_pointer<typename std::add_const<T>::type>::type;
75};
76
77template <typename T> struct make_const_ref {
78 using type = typename std::add_lvalue_reference<
79 typename std::add_const<T>::type>::type;
80};
81
82/// Utilities for detecting if a given trait holds for some set of arguments
83/// 'Args'. For example, the given trait could be used to detect if a given type
84/// has a copy assignment operator:
85/// template<class T>
86/// using has_copy_assign_t = decltype(std::declval<T&>()
87/// = std::declval<const T&>());
88/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
89namespace detail {
90template <typename...> using void_t = void;
91template <class, template <class...> class Op, class... Args> struct detector {
92 using value_t = std::false_type;
93};
94template <template <class...> class Op, class... Args>
95struct detector<void_t<Op<Args...>>, Op, Args...> {
96 using value_t = std::true_type;
97};
98} // end namespace detail
99
100template <template <class...> class Op, class... Args>
101using is_detected = typename detail::detector<void, Op, Args...>::value_t;
102
103/// Check if a Callable type can be invoked with the given set of arg types.
104namespace detail {
105template <typename Callable, typename... Args>
106using is_invocable =
107 decltype(std::declval<Callable &>()(std::declval<Args>()...));
108} // namespace detail
109
110template <typename Callable, typename... Args>
111using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
112
113/// This class provides various trait information about a callable object.
114/// * To access the number of arguments: Traits::num_args
115/// * To access the type of an argument: Traits::arg_t<Index>
116/// * To access the type of the result: Traits::result_t
117template <typename T, bool isClass = std::is_class<T>::value>
118struct function_traits : public function_traits<decltype(&T::operator())> {};
119
120/// Overload for class function types.
121template <typename ClassType, typename ReturnType, typename... Args>
122struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
123 /// The number of arguments to this function.
124 enum { num_args = sizeof...(Args) };
125
126 /// The result type of this function.
127 using result_t = ReturnType;
128
129 /// The type of an argument to this function.
130 template <size_t Index>
131 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
132};
133/// Overload for class function types.
134template <typename ClassType, typename ReturnType, typename... Args>
135struct function_traits<ReturnType (ClassType::*)(Args...), false>
136 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
137/// Overload for non-class function types.
138template <typename ReturnType, typename... Args>
139struct function_traits<ReturnType (*)(Args...), false> {
140 /// The number of arguments to this function.
141 enum { num_args = sizeof...(Args) };
142
143 /// The result type of this function.
144 using result_t = ReturnType;
145
146 /// The type of an argument to this function.
147 template <size_t i>
148 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
149};
150/// Overload for non-class function type references.
151template <typename ReturnType, typename... Args>
152struct function_traits<ReturnType (&)(Args...), false>
153 : public function_traits<ReturnType (*)(Args...)> {};
154
155//===----------------------------------------------------------------------===//
156// Extra additions to <functional>
157//===----------------------------------------------------------------------===//
158
159template <class Ty> struct identity {
160 using argument_type = Ty;
161
162 Ty &operator()(Ty &self) const {
163 return self;
164 }
165 const Ty &operator()(const Ty &self) const {
166 return self;
167 }
168};
169
170/// An efficient, type-erasing, non-owning reference to a callable. This is
171/// intended for use as the type of a function parameter that is not used
172/// after the function in question returns.
173///
174/// This class does not own the callable, so it is not in general safe to store
175/// a function_ref.
176template<typename Fn> class function_ref;
177
178template<typename Ret, typename ...Params>
179class function_ref<Ret(Params...)> {
180 Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
181 intptr_t callable;
182
183 template<typename Callable>
184 static Ret callback_fn(intptr_t callable, Params ...params) {
185 return (*reinterpret_cast<Callable*>(callable))(
186 std::forward<Params>(params)...);
187 }
188
189public:
190 function_ref() = default;
191 function_ref(std::nullptr_t) {}
192
193 template <typename Callable>
194 function_ref(
195 Callable &&callable,
196 // This is not the copy-constructor.
197 std::enable_if_t<
198 !std::is_same<std::remove_cv_t<std::remove_reference_t<Callable>>,
199 function_ref>::value> * = nullptr,
200 // Functor must be callable and return a suitable type.
201 std::enable_if_t<std::is_void<Ret>::value ||
202 std::is_convertible<decltype(std::declval<Callable>()(
203 std::declval<Params>()...)),
204 Ret>::value> * = nullptr)
205 : callback(callback_fn<typename std::remove_reference<Callable>::type>),
206 callable(reinterpret_cast<intptr_t>(&callable)) {}
207
208 Ret operator()(Params ...params) const {
209 return callback(callable, std::forward<Params>(params)...);
210 }
211
212 explicit operator bool() const { return callback; }
213};
214
215//===----------------------------------------------------------------------===//
216// Extra additions to <iterator>
217//===----------------------------------------------------------------------===//
218
219namespace adl_detail {
220
221using std::begin;
222
223template <typename ContainerTy>
224decltype(auto) adl_begin(ContainerTy &&container) {
225 return begin(std::forward<ContainerTy>(container));
226}
227
228using std::end;
229
230template <typename ContainerTy>
231decltype(auto) adl_end(ContainerTy &&container) {
232 return end(std::forward<ContainerTy>(container));
233}
234
235using std::swap;
236
237template <typename T>
238void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
239 std::declval<T>()))) {
240 swap(std::forward<T>(lhs), std::forward<T>(rhs));
241}
242
243} // end namespace adl_detail
244
245template <typename ContainerTy>
246decltype(auto) adl_begin(ContainerTy &&container) {
247 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
248}
249
250template <typename ContainerTy>
251decltype(auto) adl_end(ContainerTy &&container) {
252 return adl_detail::adl_end(std::forward<ContainerTy>(container));
253}
254
255template <typename T>
256void adl_swap(T &&lhs, T &&rhs) noexcept(
257 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
258 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
259}
260
261/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
262template <typename T>
263constexpr bool empty(const T &RangeOrContainer) {
264 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
265}
266
267/// Returns true if the given container only contains a single element.
268template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
269 auto B = std::begin(C), E = std::end(C);
270 return B != E && std::next(B) == E;
14
Assuming the condition is true
15
Returning value, which participates in a condition later
271}
272
273/// Return a range covering \p RangeOrContainer with the first N elements
274/// excluded.
275template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
276 return make_range(std::next(adl_begin(RangeOrContainer), N),
277 adl_end(RangeOrContainer));
278}
279
280// mapped_iterator - This is a simple iterator adapter that causes a function to
281// be applied whenever operator* is invoked on the iterator.
282
283template <typename ItTy, typename FuncTy,
284 typename FuncReturnTy =
285 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
286class mapped_iterator
287 : public iterator_adaptor_base<
288 mapped_iterator<ItTy, FuncTy>, ItTy,
289 typename std::iterator_traits<ItTy>::iterator_category,
290 typename std::remove_reference<FuncReturnTy>::type> {
291public:
292 mapped_iterator(ItTy U, FuncTy F)
293 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
294
295 ItTy getCurrent() { return this->I; }
296
297 FuncReturnTy operator*() const { return F(*this->I); }
298
299private:
300 FuncTy F;
301};
302
303// map_iterator - Provide a convenient way to create mapped_iterators, just like
304// make_pair is useful for creating pairs...
305template <class ItTy, class FuncTy>
306inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
307 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
308}
309
310template <class ContainerTy, class FuncTy>
311auto map_range(ContainerTy &&C, FuncTy F) {
312 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
313}
314
315/// Helper to determine if type T has a member called rbegin().
316template <typename Ty> class has_rbegin_impl {
317 using yes = char[1];
318 using no = char[2];
319
320 template <typename Inner>
321 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
322
323 template <typename>
324 static no& test(...);
325
326public:
327 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
328};
329
330/// Metafunction to determine if T& or T has a member called rbegin().
331template <typename Ty>
332struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
333};
334
335// Returns an iterator_range over the given container which iterates in reverse.
336// Note that the container must have rbegin()/rend() methods for this to work.
337template <typename ContainerTy>
338auto reverse(ContainerTy &&C,
339 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
340 return make_range(C.rbegin(), C.rend());
341}
342
343// Returns a std::reverse_iterator wrapped around the given iterator.
344template <typename IteratorTy>
345std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
346 return std::reverse_iterator<IteratorTy>(It);
347}
348
349// Returns an iterator_range over the given container which iterates in reverse.
350// Note that the container must have begin()/end() methods which return
351// bidirectional iterators for this to work.
352template <typename ContainerTy>
353auto reverse(ContainerTy &&C,
354 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
355 return make_range(llvm::make_reverse_iterator(std::end(C)),
356 llvm::make_reverse_iterator(std::begin(C)));
357}
358
359/// An iterator adaptor that filters the elements of given inner iterators.
360///
361/// The predicate parameter should be a callable object that accepts the wrapped
362/// iterator's reference type and returns a bool. When incrementing or
363/// decrementing the iterator, it will call the predicate on each element and
364/// skip any where it returns false.
365///
366/// \code
367/// int A[] = { 1, 2, 3, 4 };
368/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
369/// // R contains { 1, 3 }.
370/// \endcode
371///
372/// Note: filter_iterator_base implements support for forward iteration.
373/// filter_iterator_impl exists to provide support for bidirectional iteration,
374/// conditional on whether the wrapped iterator supports it.
375template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
376class filter_iterator_base
377 : public iterator_adaptor_base<
378 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
379 WrappedIteratorT,
380 typename std::common_type<
381 IterTag, typename std::iterator_traits<
382 WrappedIteratorT>::iterator_category>::type> {
383 using BaseT = iterator_adaptor_base<
384 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
385 WrappedIteratorT,
386 typename std::common_type<
387 IterTag, typename std::iterator_traits<
388 WrappedIteratorT>::iterator_category>::type>;
389
390protected:
391 WrappedIteratorT End;
392 PredicateT Pred;
393
394 void findNextValid() {
395 while (this->I != End && !Pred(*this->I))
396 BaseT::operator++();
397 }
398
399 // Construct the iterator. The begin iterator needs to know where the end
400 // is, so that it can properly stop when it gets there. The end iterator only
401 // needs the predicate to support bidirectional iteration.
402 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
403 PredicateT Pred)
404 : BaseT(Begin), End(End), Pred(Pred) {
405 findNextValid();
406 }
407
408public:
409 using BaseT::operator++;
410
411 filter_iterator_base &operator++() {
412 BaseT::operator++();
413 findNextValid();
414 return *this;
415 }
416};
417
418/// Specialization of filter_iterator_base for forward iteration only.
419template <typename WrappedIteratorT, typename PredicateT,
420 typename IterTag = std::forward_iterator_tag>
421class filter_iterator_impl
422 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
423 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
424
425public:
426 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
427 PredicateT Pred)
428 : BaseT(Begin, End, Pred) {}
429};
430
431/// Specialization of filter_iterator_base for bidirectional iteration.
432template <typename WrappedIteratorT, typename PredicateT>
433class filter_iterator_impl<WrappedIteratorT, PredicateT,
434 std::bidirectional_iterator_tag>
435 : public filter_iterator_base<WrappedIteratorT, PredicateT,
436 std::bidirectional_iterator_tag> {
437 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
438 std::bidirectional_iterator_tag>;
439 void findPrevValid() {
440 while (!this->Pred(*this->I))
441 BaseT::operator--();
442 }
443
444public:
445 using BaseT::operator--;
446
447 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
448 PredicateT Pred)
449 : BaseT(Begin, End, Pred) {}
450
451 filter_iterator_impl &operator--() {
452 BaseT::operator--();
453 findPrevValid();
454 return *this;
455 }
456};
457
458namespace detail {
459
460template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
461 using type = std::forward_iterator_tag;
462};
463
464template <> struct fwd_or_bidi_tag_impl<true> {
465 using type = std::bidirectional_iterator_tag;
466};
467
468/// Helper which sets its type member to forward_iterator_tag if the category
469/// of \p IterT does not derive from bidirectional_iterator_tag, and to
470/// bidirectional_iterator_tag otherwise.
471template <typename IterT> struct fwd_or_bidi_tag {
472 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
473 std::bidirectional_iterator_tag,
474 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
475};
476
477} // namespace detail
478
479/// Defines filter_iterator to a suitable specialization of
480/// filter_iterator_impl, based on the underlying iterator's category.
481template <typename WrappedIteratorT, typename PredicateT>
482using filter_iterator = filter_iterator_impl<
483 WrappedIteratorT, PredicateT,
484 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
485
486/// Convenience function that takes a range of elements and a predicate,
487/// and return a new filter_iterator range.
488///
489/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
490/// lifetime of that temporary is not kept by the returned range object, and the
491/// temporary is going to be dropped on the floor after the make_iterator_range
492/// full expression that contains this function call.
493template <typename RangeT, typename PredicateT>
494iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
495make_filter_range(RangeT &&Range, PredicateT Pred) {
496 using FilterIteratorT =
497 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
498 return make_range(
499 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
500 std::end(std::forward<RangeT>(Range)), Pred),
501 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
502 std::end(std::forward<RangeT>(Range)), Pred));
503}
504
505/// A pseudo-iterator adaptor that is designed to implement "early increment"
506/// style loops.
507///
508/// This is *not a normal iterator* and should almost never be used directly. It
509/// is intended primarily to be used with range based for loops and some range
510/// algorithms.
511///
512/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
513/// somewhere between them. The constraints of these iterators are:
514///
515/// - On construction or after being incremented, it is comparable and
516/// dereferencable. It is *not* incrementable.
517/// - After being dereferenced, it is neither comparable nor dereferencable, it
518/// is only incrementable.
519///
520/// This means you can only dereference the iterator once, and you can only
521/// increment it once between dereferences.
522template <typename WrappedIteratorT>
523class early_inc_iterator_impl
524 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
525 WrappedIteratorT, std::input_iterator_tag> {
526 using BaseT =
527 iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
528 WrappedIteratorT, std::input_iterator_tag>;
529
530 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
531
532protected:
533#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
534 bool IsEarlyIncremented = false;
535#endif
536
537public:
538 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
539
540 using BaseT::operator*;
541 decltype(*std::declval<WrappedIteratorT>()) operator*() {
542#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
543 assert(!IsEarlyIncremented && "Cannot dereference twice!")((!IsEarlyIncremented && "Cannot dereference twice!")
? static_cast<void> (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 543, __PRETTY_FUNCTION__))
;
544 IsEarlyIncremented = true;
545#endif
546 return *(this->I)++;
547 }
548
549 using BaseT::operator++;
550 early_inc_iterator_impl &operator++() {
551#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
552 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")((IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? static_cast<void> (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 552, __PRETTY_FUNCTION__))
;
553 IsEarlyIncremented = false;
554#endif
555 return *this;
556 }
557
558 friend bool operator==(const early_inc_iterator_impl &LHS,
559 const early_inc_iterator_impl &RHS) {
560#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
561 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")((!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!"
) ? static_cast<void> (0) : __assert_fail ("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 561, __PRETTY_FUNCTION__))
;
562#endif
563 return (const BaseT &)LHS == (const BaseT &)RHS;
564 }
565};
566
567/// Make a range that does early increment to allow mutation of the underlying
568/// range without disrupting iteration.
569///
570/// The underlying iterator will be incremented immediately after it is
571/// dereferenced, allowing deletion of the current node or insertion of nodes to
572/// not disrupt iteration provided they do not invalidate the *next* iterator --
573/// the current iterator can be invalidated.
574///
575/// This requires a very exact pattern of use that is only really suitable to
576/// range based for loops and other range algorithms that explicitly guarantee
577/// to dereference exactly once each element, and to increment exactly once each
578/// element.
579template <typename RangeT>
580iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
581make_early_inc_range(RangeT &&Range) {
582 using EarlyIncIteratorT =
583 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
584 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
585 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
586}
587
588// forward declarations required by zip_shortest/zip_first/zip_longest
589template <typename R, typename UnaryPredicate>
590bool all_of(R &&range, UnaryPredicate P);
591template <typename R, typename UnaryPredicate>
592bool any_of(R &&range, UnaryPredicate P);
593
594namespace detail {
595
596using std::declval;
597
598// We have to alias this since inlining the actual type at the usage site
599// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
600template<typename... Iters> struct ZipTupleType {
601 using type = std::tuple<decltype(*declval<Iters>())...>;
602};
603
604template <typename ZipType, typename... Iters>
605using zip_traits = iterator_facade_base<
606 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
607 typename std::iterator_traits<
608 Iters>::iterator_category...>::type,
609 // ^ TODO: Implement random access methods.
610 typename ZipTupleType<Iters...>::type,
611 typename std::iterator_traits<typename std::tuple_element<
612 0, std::tuple<Iters...>>::type>::difference_type,
613 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
614 // inner iterators have the same difference_type. It would fail if, for
615 // instance, the second field's difference_type were non-numeric while the
616 // first is.
617 typename ZipTupleType<Iters...>::type *,
618 typename ZipTupleType<Iters...>::type>;
619
620template <typename ZipType, typename... Iters>
621struct zip_common : public zip_traits<ZipType, Iters...> {
622 using Base = zip_traits<ZipType, Iters...>;
623 using value_type = typename Base::value_type;
624
625 std::tuple<Iters...> iterators;
626
627protected:
628 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
629 return value_type(*std::get<Ns>(iterators)...);
630 }
631
632 template <size_t... Ns>
633 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
634 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
635 }
636
637 template <size_t... Ns>
638 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
639 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
640 }
641
642public:
643 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
644
645 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
646
647 const value_type operator*() const {
648 return deref(std::index_sequence_for<Iters...>{});
649 }
650
651 ZipType &operator++() {
652 iterators = tup_inc(std::index_sequence_for<Iters...>{});
653 return *reinterpret_cast<ZipType *>(this);
654 }
655
656 ZipType &operator--() {
657 static_assert(Base::IsBidirectional,
658 "All inner iterators must be at least bidirectional.");
659 iterators = tup_dec(std::index_sequence_for<Iters...>{});
660 return *reinterpret_cast<ZipType *>(this);
661 }
662};
663
664template <typename... Iters>
665struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
666 using Base = zip_common<zip_first<Iters...>, Iters...>;
667
668 bool operator==(const zip_first<Iters...> &other) const {
669 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
670 }
671
672 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
673};
674
675template <typename... Iters>
676class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
677 template <size_t... Ns>
678 bool test(const zip_shortest<Iters...> &other,
679 std::index_sequence<Ns...>) const {
680 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
681 std::get<Ns>(other.iterators)...},
682 identity<bool>{});
683 }
684
685public:
686 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
687
688 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
689
690 bool operator==(const zip_shortest<Iters...> &other) const {
691 return !test(other, std::index_sequence_for<Iters...>{});
692 }
693};
694
695template <template <typename...> class ItType, typename... Args> class zippy {
696public:
697 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
698 using iterator_category = typename iterator::iterator_category;
699 using value_type = typename iterator::value_type;
700 using difference_type = typename iterator::difference_type;
701 using pointer = typename iterator::pointer;
702 using reference = typename iterator::reference;
703
704private:
705 std::tuple<Args...> ts;
706
707 template <size_t... Ns>
708 iterator begin_impl(std::index_sequence<Ns...>) const {
709 return iterator(std::begin(std::get<Ns>(ts))...);
710 }
711 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
712 return iterator(std::end(std::get<Ns>(ts))...);
713 }
714
715public:
716 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
717
718 iterator begin() const {
719 return begin_impl(std::index_sequence_for<Args...>{});
720 }
721 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
722};
723
724} // end namespace detail
725
726/// zip iterator for two or more iteratable types.
727template <typename T, typename U, typename... Args>
728detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
729 Args &&... args) {
730 return detail::zippy<detail::zip_shortest, T, U, Args...>(
731 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
732}
733
734/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
735/// be the shortest.
736template <typename T, typename U, typename... Args>
737detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
738 Args &&... args) {
739 return detail::zippy<detail::zip_first, T, U, Args...>(
740 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
741}
742
743namespace detail {
744template <typename Iter>
745Iter next_or_end(const Iter &I, const Iter &End) {
746 if (I == End)
747 return End;
748 return std::next(I);
749}
750
751template <typename Iter>
752auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
753 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
754 if (I == End)
755 return None;
756 return *I;
757}
758
759template <typename Iter> struct ZipLongestItemType {
760 using type =
761 llvm::Optional<typename std::remove_const<typename std::remove_reference<
762 decltype(*std::declval<Iter>())>::type>::type>;
763};
764
765template <typename... Iters> struct ZipLongestTupleType {
766 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
767};
768
769template <typename... Iters>
770class zip_longest_iterator
771 : public iterator_facade_base<
772 zip_longest_iterator<Iters...>,
773 typename std::common_type<
774 std::forward_iterator_tag,
775 typename std::iterator_traits<Iters>::iterator_category...>::type,
776 typename ZipLongestTupleType<Iters...>::type,
777 typename std::iterator_traits<typename std::tuple_element<
778 0, std::tuple<Iters...>>::type>::difference_type,
779 typename ZipLongestTupleType<Iters...>::type *,
780 typename ZipLongestTupleType<Iters...>::type> {
781public:
782 using value_type = typename ZipLongestTupleType<Iters...>::type;
783
784private:
785 std::tuple<Iters...> iterators;
786 std::tuple<Iters...> end_iterators;
787
788 template <size_t... Ns>
789 bool test(const zip_longest_iterator<Iters...> &other,
790 std::index_sequence<Ns...>) const {
791 return llvm::any_of(
792 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
793 std::get<Ns>(other.iterators)...},
794 identity<bool>{});
795 }
796
797 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
798 return value_type(
799 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
800 }
801
802 template <size_t... Ns>
803 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
804 return std::tuple<Iters...>(
805 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
806 }
807
808public:
809 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
810 : iterators(std::forward<Iters>(ts.first)...),
811 end_iterators(std::forward<Iters>(ts.second)...) {}
812
813 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
814
815 value_type operator*() const {
816 return deref(std::index_sequence_for<Iters...>{});
817 }
818
819 zip_longest_iterator<Iters...> &operator++() {
820 iterators = tup_inc(std::index_sequence_for<Iters...>{});
821 return *this;
822 }
823
824 bool operator==(const zip_longest_iterator<Iters...> &other) const {
825 return !test(other, std::index_sequence_for<Iters...>{});
826 }
827};
828
829template <typename... Args> class zip_longest_range {
830public:
831 using iterator =
832 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
833 using iterator_category = typename iterator::iterator_category;
834 using value_type = typename iterator::value_type;
835 using difference_type = typename iterator::difference_type;
836 using pointer = typename iterator::pointer;
837 using reference = typename iterator::reference;
838
839private:
840 std::tuple<Args...> ts;
841
842 template <size_t... Ns>
843 iterator begin_impl(std::index_sequence<Ns...>) const {
844 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
845 adl_end(std::get<Ns>(ts)))...);
846 }
847
848 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
849 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
850 adl_end(std::get<Ns>(ts)))...);
851 }
852
853public:
854 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
855
856 iterator begin() const {
857 return begin_impl(std::index_sequence_for<Args...>{});
858 }
859 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
860};
861} // namespace detail
862
863/// Iterate over two or more iterators at the same time. Iteration continues
864/// until all iterators reach the end. The llvm::Optional only contains a value
865/// if the iterator has not reached the end.
866template <typename T, typename U, typename... Args>
867detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
868 Args &&... args) {
869 return detail::zip_longest_range<T, U, Args...>(
870 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
871}
872
873/// Iterator wrapper that concatenates sequences together.
874///
875/// This can concatenate different iterators, even with different types, into
876/// a single iterator provided the value types of all the concatenated
877/// iterators expose `reference` and `pointer` types that can be converted to
878/// `ValueT &` and `ValueT *` respectively. It doesn't support more
879/// interesting/customized pointer or reference types.
880///
881/// Currently this only supports forward or higher iterator categories as
882/// inputs and always exposes a forward iterator interface.
883template <typename ValueT, typename... IterTs>
884class concat_iterator
885 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
886 std::forward_iterator_tag, ValueT> {
887 using BaseT = typename concat_iterator::iterator_facade_base;
888
889 /// We store both the current and end iterators for each concatenated
890 /// sequence in a tuple of pairs.
891 ///
892 /// Note that something like iterator_range seems nice at first here, but the
893 /// range properties are of little benefit and end up getting in the way
894 /// because we need to do mutation on the current iterators.
895 std::tuple<IterTs...> Begins;
896 std::tuple<IterTs...> Ends;
897
898 /// Attempts to increment a specific iterator.
899 ///
900 /// Returns true if it was able to increment the iterator. Returns false if
901 /// the iterator is already at the end iterator.
902 template <size_t Index> bool incrementHelper() {
903 auto &Begin = std::get<Index>(Begins);
904 auto &End = std::get<Index>(Ends);
905 if (Begin == End)
906 return false;
907
908 ++Begin;
909 return true;
910 }
911
912 /// Increments the first non-end iterator.
913 ///
914 /// It is an error to call this with all iterators at the end.
915 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
916 // Build a sequence of functions to increment each iterator if possible.
917 bool (concat_iterator::*IncrementHelperFns[])() = {
918 &concat_iterator::incrementHelper<Ns>...};
919
920 // Loop over them, and stop as soon as we succeed at incrementing one.
921 for (auto &IncrementHelperFn : IncrementHelperFns)
922 if ((this->*IncrementHelperFn)())
923 return;
924
925 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 925)
;
926 }
927
928 /// Returns null if the specified iterator is at the end. Otherwise,
929 /// dereferences the iterator and returns the address of the resulting
930 /// reference.
931 template <size_t Index> ValueT *getHelper() const {
932 auto &Begin = std::get<Index>(Begins);
933 auto &End = std::get<Index>(Ends);
934 if (Begin == End)
935 return nullptr;
936
937 return &*Begin;
938 }
939
940 /// Finds the first non-end iterator, dereferences, and returns the resulting
941 /// reference.
942 ///
943 /// It is an error to call this with all iterators at the end.
944 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
945 // Build a sequence of functions to get from iterator if possible.
946 ValueT *(concat_iterator::*GetHelperFns[])() const = {
947 &concat_iterator::getHelper<Ns>...};
948
949 // Loop over them, and return the first result we find.
950 for (auto &GetHelperFn : GetHelperFns)
951 if (ValueT *P = (this->*GetHelperFn)())
952 return *P;
953
954 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 954)
;
955 }
956
957public:
958 /// Constructs an iterator from a sequence of ranges.
959 ///
960 /// We need the full range to know how to switch between each of the
961 /// iterators.
962 template <typename... RangeTs>
963 explicit concat_iterator(RangeTs &&... Ranges)
964 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
965
966 using BaseT::operator++;
967
968 concat_iterator &operator++() {
969 increment(std::index_sequence_for<IterTs...>());
970 return *this;
971 }
972
973 ValueT &operator*() const {
974 return get(std::index_sequence_for<IterTs...>());
975 }
976
977 bool operator==(const concat_iterator &RHS) const {
978 return Begins == RHS.Begins && Ends == RHS.Ends;
979 }
980};
981
982namespace detail {
983
984/// Helper to store a sequence of ranges being concatenated and access them.
985///
986/// This is designed to facilitate providing actual storage when temporaries
987/// are passed into the constructor such that we can use it as part of range
988/// based for loops.
989template <typename ValueT, typename... RangeTs> class concat_range {
990public:
991 using iterator =
992 concat_iterator<ValueT,
993 decltype(std::begin(std::declval<RangeTs &>()))...>;
994
995private:
996 std::tuple<RangeTs...> Ranges;
997
998 template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
999 return iterator(std::get<Ns>(Ranges)...);
1000 }
1001 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
1002 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1003 std::end(std::get<Ns>(Ranges)))...);
1004 }
1005
1006public:
1007 concat_range(RangeTs &&... Ranges)
1008 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1009
1010 iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
1011 iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
1012};
1013
1014} // end namespace detail
1015
1016/// Concatenated range across two or more ranges.
1017///
1018/// The desired value type must be explicitly specified.
1019template <typename ValueT, typename... RangeTs>
1020detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1021 static_assert(sizeof...(RangeTs) > 1,
1022 "Need more than one range to concatenate!");
1023 return detail::concat_range<ValueT, RangeTs...>(
1024 std::forward<RangeTs>(Ranges)...);
1025}
1026
1027/// A utility class used to implement an iterator that contains some base object
1028/// and an index. The iterator moves the index but keeps the base constant.
1029template <typename DerivedT, typename BaseT, typename T,
1030 typename PointerT = T *, typename ReferenceT = T &>
1031class indexed_accessor_iterator
1032 : public llvm::iterator_facade_base<DerivedT,
1033 std::random_access_iterator_tag, T,
1034 std::ptrdiff_t, PointerT, ReferenceT> {
1035public:
1036 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1037 assert(base == rhs.base && "incompatible iterators")((base == rhs.base && "incompatible iterators") ? static_cast
<void> (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1037, __PRETTY_FUNCTION__))
;
1038 return index - rhs.index;
1039 }
1040 bool operator==(const indexed_accessor_iterator &rhs) const {
1041 return base == rhs.base && index == rhs.index;
1042 }
1043 bool operator<(const indexed_accessor_iterator &rhs) const {
1044 assert(base == rhs.base && "incompatible iterators")((base == rhs.base && "incompatible iterators") ? static_cast
<void> (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1044, __PRETTY_FUNCTION__))
;
1045 return index < rhs.index;
1046 }
1047
1048 DerivedT &operator+=(ptrdiff_t offset) {
1049 this->index += offset;
1050 return static_cast<DerivedT &>(*this);
1051 }
1052 DerivedT &operator-=(ptrdiff_t offset) {
1053 this->index -= offset;
1054 return static_cast<DerivedT &>(*this);
1055 }
1056
1057 /// Returns the current index of the iterator.
1058 ptrdiff_t getIndex() const { return index; }
1059
1060 /// Returns the current base of the iterator.
1061 const BaseT &getBase() const { return base; }
1062
1063protected:
1064 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1065 : base(base), index(index) {}
1066 BaseT base;
1067 ptrdiff_t index;
1068};
1069
1070namespace detail {
1071/// The class represents the base of a range of indexed_accessor_iterators. It
1072/// provides support for many different range functionalities, e.g.
1073/// drop_front/slice/etc.. Derived range classes must implement the following
1074/// static methods:
1075/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1076/// - Dereference an iterator pointing to the base object at the given
1077/// index.
1078/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1079/// - Return a new base that is offset from the provide base by 'index'
1080/// elements.
1081template <typename DerivedT, typename BaseT, typename T,
1082 typename PointerT = T *, typename ReferenceT = T &>
1083class indexed_accessor_range_base {
1084public:
1085 using RangeBaseT =
1086 indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
1087
1088 /// An iterator element of this range.
1089 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1090 PointerT, ReferenceT> {
1091 public:
1092 // Index into this iterator, invoking a static method on the derived type.
1093 ReferenceT operator*() const {
1094 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1095 }
1096
1097 private:
1098 iterator(BaseT owner, ptrdiff_t curIndex)
1099 : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
1100 owner, curIndex) {}
1101
1102 /// Allow access to the constructor.
1103 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1104 ReferenceT>;
1105 };
1106
1107 indexed_accessor_range_base(iterator begin, iterator end)
1108 : base(offset_base(begin.getBase(), begin.getIndex())),
1109 count(end.getIndex() - begin.getIndex()) {}
1110 indexed_accessor_range_base(const iterator_range<iterator> &range)
1111 : indexed_accessor_range_base(range.begin(), range.end()) {}
1112 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1113 : base(base), count(count) {}
1114
1115 iterator begin() const { return iterator(base, 0); }
1116 iterator end() const { return iterator(base, count); }
1117 ReferenceT operator[](unsigned index) const {
1118 assert(index < size() && "invalid index for value range")((index < size() && "invalid index for value range"
) ? static_cast<void> (0) : __assert_fail ("index < size() && \"invalid index for value range\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1118, __PRETTY_FUNCTION__))
;
1119 return DerivedT::dereference_iterator(base, index);
1120 }
1121 ReferenceT front() const {
1122 assert(!empty() && "expected non-empty range")((!empty() && "expected non-empty range") ? static_cast
<void> (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1122, __PRETTY_FUNCTION__))
;
1123 return (*this)[0];
1124 }
1125 ReferenceT back() const {
1126 assert(!empty() && "expected non-empty range")((!empty() && "expected non-empty range") ? static_cast
<void> (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1126, __PRETTY_FUNCTION__))
;
1127 return (*this)[size() - 1];
1128 }
1129
1130 /// Compare this range with another.
1131 template <typename OtherT> bool operator==(const OtherT &other) const {
1132 return size() ==
1133 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1134 std::equal(begin(), end(), other.begin());
1135 }
1136 template <typename OtherT> bool operator!=(const OtherT &other) const {
1137 return !(*this == other);
1138 }
1139
1140 /// Return the size of this range.
1141 size_t size() const { return count; }
1142
1143 /// Return if the range is empty.
1144 bool empty() const { return size() == 0; }
1145
1146 /// Drop the first N elements, and keep M elements.
1147 DerivedT slice(size_t n, size_t m) const {
1148 assert(n + m <= size() && "invalid size specifiers")((n + m <= size() && "invalid size specifiers") ? static_cast
<void> (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1148, __PRETTY_FUNCTION__))
;
1149 return DerivedT(offset_base(base, n), m);
1150 }
1151
1152 /// Drop the first n elements.
1153 DerivedT drop_front(size_t n = 1) const {
1154 assert(size() >= n && "Dropping more elements than exist")((size() >= n && "Dropping more elements than exist"
) ? static_cast<void> (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1154, __PRETTY_FUNCTION__))
;
1155 return slice(n, size() - n);
1156 }
1157 /// Drop the last n elements.
1158 DerivedT drop_back(size_t n = 1) const {
1159 assert(size() >= n && "Dropping more elements than exist")((size() >= n && "Dropping more elements than exist"
) ? static_cast<void> (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1159, __PRETTY_FUNCTION__))
;
1160 return DerivedT(base, size() - n);
1161 }
1162
1163 /// Take the first n elements.
1164 DerivedT take_front(size_t n = 1) const {
1165 return n < size() ? drop_back(size() - n)
1166 : static_cast<const DerivedT &>(*this);
1167 }
1168
1169 /// Take the last n elements.
1170 DerivedT take_back(size_t n = 1) const {
1171 return n < size() ? drop_front(size() - n)
1172 : static_cast<const DerivedT &>(*this);
1173 }
1174
1175 /// Allow conversion to any type accepting an iterator_range.
1176 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1177 RangeT, iterator_range<iterator>>::value>>
1178 operator RangeT() const {
1179 return RangeT(iterator_range<iterator>(*this));
1180 }
1181
1182 /// Returns the base of this range.
1183 const BaseT &getBase() const { return base; }
1184
1185private:
1186 /// Offset the given base by the given amount.
1187 static BaseT offset_base(const BaseT &base, size_t n) {
1188 return n == 0 ? base : DerivedT::offset_base(base, n);
1189 }
1190
1191protected:
1192 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1193 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1194 indexed_accessor_range_base &
1195 operator=(const indexed_accessor_range_base &) = default;
1196
1197 /// The base that owns the provided range of values.
1198 BaseT base;
1199 /// The size from the owning range.
1200 ptrdiff_t count;
1201};
1202} // end namespace detail
1203
1204/// This class provides an implementation of a range of
1205/// indexed_accessor_iterators where the base is not indexable. Ranges with
1206/// bases that are offsetable should derive from indexed_accessor_range_base
1207/// instead. Derived range classes are expected to implement the following
1208/// static method:
1209/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1210/// - Dereference an iterator pointing to a parent base at the given index.
1211template <typename DerivedT, typename BaseT, typename T,
1212 typename PointerT = T *, typename ReferenceT = T &>
1213class indexed_accessor_range
1214 : public detail::indexed_accessor_range_base<
1215 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1216public:
1217 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1218 : detail::indexed_accessor_range_base<
1219 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1220 std::make_pair(base, startIndex), count) {}
1221 using detail::indexed_accessor_range_base<
1222 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1223 ReferenceT>::indexed_accessor_range_base;
1224
1225 /// Returns the current base of the range.
1226 const BaseT &getBase() const { return this->base.first; }
1227
1228 /// Returns the current start index of the range.
1229 ptrdiff_t getStartIndex() const { return this->base.second; }
1230
1231 /// See `detail::indexed_accessor_range_base` for details.
1232 static std::pair<BaseT, ptrdiff_t>
1233 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1234 // We encode the internal base as a pair of the derived base and a start
1235 // index into the derived base.
1236 return std::make_pair(base.first, base.second + index);
1237 }
1238 /// See `detail::indexed_accessor_range_base` for details.
1239 static ReferenceT
1240 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1241 ptrdiff_t index) {
1242 return DerivedT::dereference(base.first, base.second + index);
1243 }
1244};
1245
1246/// Given a container of pairs, return a range over the first elements.
1247template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1248 return llvm::map_range(
1249 std::forward<ContainerTy>(c),
1250 [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
1251 return elt.first;
1252 });
1253}
1254
1255/// Given a container of pairs, return a range over the second elements.
1256template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1257 return llvm::map_range(
1258 std::forward<ContainerTy>(c),
1259 [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
1260 return elt.second;
1261 });
1262}
1263
1264//===----------------------------------------------------------------------===//
1265// Extra additions to <utility>
1266//===----------------------------------------------------------------------===//
1267
1268/// Function object to check whether the first component of a std::pair
1269/// compares less than the first component of another std::pair.
1270struct less_first {
1271 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1272 return lhs.first < rhs.first;
1273 }
1274};
1275
1276/// Function object to check whether the second component of a std::pair
1277/// compares less than the second component of another std::pair.
1278struct less_second {
1279 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1280 return lhs.second < rhs.second;
1281 }
1282};
1283
1284/// \brief Function object to apply a binary function to the first component of
1285/// a std::pair.
1286template<typename FuncTy>
1287struct on_first {
1288 FuncTy func;
1289
1290 template <typename T>
1291 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1292 return func(lhs.first, rhs.first);
1293 }
1294};
1295
1296/// Utility type to build an inheritance chain that makes it easy to rank
1297/// overload candidates.
1298template <int N> struct rank : rank<N - 1> {};
1299template <> struct rank<0> {};
1300
1301/// traits class for checking whether type T is one of any of the given
1302/// types in the variadic list.
1303template <typename T, typename... Ts> struct is_one_of {
1304 static const bool value = false;
1305};
1306
1307template <typename T, typename U, typename... Ts>
1308struct is_one_of<T, U, Ts...> {
1309 static const bool value =
1310 std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
1311};
1312
1313/// traits class for checking whether type T is a base class for all
1314/// the given types in the variadic list.
1315template <typename T, typename... Ts> struct are_base_of {
1316 static const bool value = true;
1317};
1318
1319template <typename T, typename U, typename... Ts>
1320struct are_base_of<T, U, Ts...> {
1321 static const bool value =
1322 std::is_base_of<T, U>::value && are_base_of<T, Ts...>::value;
1323};
1324
1325//===----------------------------------------------------------------------===//
1326// Extra additions for arrays
1327//===----------------------------------------------------------------------===//
1328
1329// We have a copy here so that LLVM behaves the same when using different
1330// standard libraries.
1331template <class Iterator, class RNG>
1332void shuffle(Iterator first, Iterator last, RNG &&g) {
1333 // It would be better to use a std::uniform_int_distribution,
1334 // but that would be stdlib dependent.
1335 for (auto size = last - first; size > 1; ++first, (void)--size)
1336 std::iter_swap(first, first + g() % size);
1337}
1338
1339/// Find the length of an array.
1340template <class T, std::size_t N>
1341constexpr inline size_t array_lengthof(T (&)[N]) {
1342 return N;
1343}
1344
1345/// Adapt std::less<T> for array_pod_sort.
1346template<typename T>
1347inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1348 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1349 *reinterpret_cast<const T*>(P2)))
1350 return -1;
1351 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1352 *reinterpret_cast<const T*>(P1)))
1353 return 1;
1354 return 0;
1355}
1356
1357/// get_array_pod_sort_comparator - This is an internal helper function used to
1358/// get type deduction of T right.
1359template<typename T>
1360inline int (*get_array_pod_sort_comparator(const T &))
1361 (const void*, const void*) {
1362 return array_pod_sort_comparator<T>;
1363}
1364
1365#ifdef EXPENSIVE_CHECKS
1366namespace detail {
1367
1368inline unsigned presortShuffleEntropy() {
1369 static unsigned Result(std::random_device{}());
1370 return Result;
1371}
1372
1373template <class IteratorTy>
1374inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1375 std::mt19937 Generator(presortShuffleEntropy());
1376 std::shuffle(Start, End, Generator);
1377}
1378
1379} // end namespace detail
1380#endif
1381
1382/// array_pod_sort - This sorts an array with the specified start and end
1383/// extent. This is just like std::sort, except that it calls qsort instead of
1384/// using an inlined template. qsort is slightly slower than std::sort, but
1385/// most sorts are not performance critical in LLVM and std::sort has to be
1386/// template instantiated for each type, leading to significant measured code
1387/// bloat. This function should generally be used instead of std::sort where
1388/// possible.
1389///
1390/// This function assumes that you have simple POD-like types that can be
1391/// compared with std::less and can be moved with memcpy. If this isn't true,
1392/// you should use std::sort.
1393///
1394/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1395/// default to std::less.
1396template<class IteratorTy>
1397inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1398 // Don't inefficiently call qsort with one element or trigger undefined
1399 // behavior with an empty sequence.
1400 auto NElts = End - Start;
1401 if (NElts <= 1) return;
1402#ifdef EXPENSIVE_CHECKS
1403 detail::presortShuffle<IteratorTy>(Start, End);
1404#endif
1405 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1406}
1407
1408template <class IteratorTy>
1409inline void array_pod_sort(
1410 IteratorTy Start, IteratorTy End,
1411 int (*Compare)(
1412 const typename std::iterator_traits<IteratorTy>::value_type *,
1413 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1414 // Don't inefficiently call qsort with one element or trigger undefined
1415 // behavior with an empty sequence.
1416 auto NElts = End - Start;
1417 if (NElts <= 1) return;
1418#ifdef EXPENSIVE_CHECKS
1419 detail::presortShuffle<IteratorTy>(Start, End);
1420#endif
1421 qsort(&*Start, NElts, sizeof(*Start),
1422 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1423}
1424
1425namespace detail {
1426template <typename T>
1427// We can use qsort if the iterator type is a pointer and the underlying value
1428// is trivially copyable.
1429using sort_trivially_copyable = conjunction<
1430 std::is_pointer<T>,
1431 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1432} // namespace detail
1433
1434// Provide wrappers to std::sort which shuffle the elements before sorting
1435// to help uncover non-deterministic behavior (PR35135).
1436template <typename IteratorTy,
1437 std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
1438 int> = 0>
1439inline void sort(IteratorTy Start, IteratorTy End) {
1440#ifdef EXPENSIVE_CHECKS
1441 detail::presortShuffle<IteratorTy>(Start, End);
1442#endif
1443 std::sort(Start, End);
1444}
1445
1446// Forward trivially copyable types to array_pod_sort. This avoids a large
1447// amount of code bloat for a minor performance hit.
1448template <typename IteratorTy,
1449 std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
1450 int> = 0>
1451inline void sort(IteratorTy Start, IteratorTy End) {
1452 array_pod_sort(Start, End);
1453}
1454
1455template <typename Container> inline void sort(Container &&C) {
1456 llvm::sort(adl_begin(C), adl_end(C));
1457}
1458
1459template <typename IteratorTy, typename Compare>
1460inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1461#ifdef EXPENSIVE_CHECKS
1462 detail::presortShuffle<IteratorTy>(Start, End);
1463#endif
1464 std::sort(Start, End, Comp);
1465}
1466
1467template <typename Container, typename Compare>
1468inline void sort(Container &&C, Compare Comp) {
1469 llvm::sort(adl_begin(C), adl_end(C), Comp);
1470}
1471
1472//===----------------------------------------------------------------------===//
1473// Extra additions to <algorithm>
1474//===----------------------------------------------------------------------===//
1475
1476/// Get the size of a range. This is a wrapper function around std::distance
1477/// which is only enabled when the operation is O(1).
1478template <typename R>
1479auto size(R &&Range,
1480 std::enable_if_t<
1481 std::is_base_of<std::random_access_iterator_tag,
1482 typename std::iterator_traits<decltype(
1483 Range.begin())>::iterator_category>::value,
1484 void> * = nullptr) {
1485 return std::distance(Range.begin(), Range.end());
1486}
1487
1488/// Provide wrappers to std::for_each which take ranges instead of having to
1489/// pass begin/end explicitly.
1490template <typename R, typename UnaryFunction>
1491UnaryFunction for_each(R &&Range, UnaryFunction F) {
1492 return std::for_each(adl_begin(Range), adl_end(Range), F);
1493}
1494
1495/// Provide wrappers to std::all_of which take ranges instead of having to pass
1496/// begin/end explicitly.
1497template <typename R, typename UnaryPredicate>
1498bool all_of(R &&Range, UnaryPredicate P) {
1499 return std::all_of(adl_begin(Range), adl_end(Range), P);
1500}
1501
1502/// Provide wrappers to std::any_of which take ranges instead of having to pass
1503/// begin/end explicitly.
1504template <typename R, typename UnaryPredicate>
1505bool any_of(R &&Range, UnaryPredicate P) {
1506 return std::any_of(adl_begin(Range), adl_end(Range), P);
22
Calling 'any_of<llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
30
Returning from 'any_of<llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
31
Returning zero, which participates in a condition later
1507}
1508
1509/// Provide wrappers to std::none_of which take ranges instead of having to pass
1510/// begin/end explicitly.
1511template <typename R, typename UnaryPredicate>
1512bool none_of(R &&Range, UnaryPredicate P) {
1513 return std::none_of(adl_begin(Range), adl_end(Range), P);
1514}
1515
1516/// Provide wrappers to std::find which take ranges instead of having to pass
1517/// begin/end explicitly.
1518template <typename R, typename T> auto find(R &&Range, const T &Val) {
1519 return std::find(adl_begin(Range), adl_end(Range), Val);
1520}
1521
1522/// Provide wrappers to std::find_if which take ranges instead of having to pass
1523/// begin/end explicitly.
1524template <typename R, typename UnaryPredicate>
1525auto find_if(R &&Range, UnaryPredicate P) {
1526 return std::find_if(adl_begin(Range), adl_end(Range), P);
1527}
1528
1529template <typename R, typename UnaryPredicate>
1530auto find_if_not(R &&Range, UnaryPredicate P) {
1531 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1532}
1533
1534/// Provide wrappers to std::remove_if which take ranges instead of having to
1535/// pass begin/end explicitly.
1536template <typename R, typename UnaryPredicate>
1537auto remove_if(R &&Range, UnaryPredicate P) {
1538 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1539}
1540
1541/// Provide wrappers to std::copy_if which take ranges instead of having to
1542/// pass begin/end explicitly.
1543template <typename R, typename OutputIt, typename UnaryPredicate>
1544OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1545 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1546}
1547
1548template <typename R, typename OutputIt>
1549OutputIt copy(R &&Range, OutputIt Out) {
1550 return std::copy(adl_begin(Range), adl_end(Range), Out);
1551}
1552
1553/// Provide wrappers to std::move which take ranges instead of having to
1554/// pass begin/end explicitly.
1555template <typename R, typename OutputIt>
1556OutputIt move(R &&Range, OutputIt Out) {
1557 return std::move(adl_begin(Range), adl_end(Range), Out);
1558}
1559
1560/// Wrapper function around std::find to detect if an element exists
1561/// in a container.
1562template <typename R, typename E>
1563bool is_contained(R &&Range, const E &Element) {
1564 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
1565}
1566
1567/// Wrapper function around std::is_sorted to check if elements in a range \p R
1568/// are sorted with respect to a comparator \p C.
1569template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1570 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1571}
1572
1573/// Wrapper function around std::is_sorted to check if elements in a range \p R
1574/// are sorted in non-descending order.
1575template <typename R> bool is_sorted(R &&Range) {
1576 return std::is_sorted(adl_begin(Range), adl_end(Range));
1577}
1578
1579/// Wrapper function around std::count to count the number of times an element
1580/// \p Element occurs in the given range \p Range.
1581template <typename R, typename E> auto count(R &&Range, const E &Element) {
1582 return std::count(adl_begin(Range), adl_end(Range), Element);
1583}
1584
1585/// Wrapper function around std::count_if to count the number of times an
1586/// element satisfying a given predicate occurs in a range.
1587template <typename R, typename UnaryPredicate>
1588auto count_if(R &&Range, UnaryPredicate P) {
1589 return std::count_if(adl_begin(Range), adl_end(Range), P);
1590}
1591
1592/// Wrapper function around std::transform to apply a function to a range and
1593/// store the result elsewhere.
1594template <typename R, typename OutputIt, typename UnaryFunction>
1595OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1596 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1597}
1598
1599/// Provide wrappers to std::partition which take ranges instead of having to
1600/// pass begin/end explicitly.
1601template <typename R, typename UnaryPredicate>
1602auto partition(R &&Range, UnaryPredicate P) {
1603 return std::partition(adl_begin(Range), adl_end(Range), P);
1604}
1605
1606/// Provide wrappers to std::lower_bound which take ranges instead of having to
1607/// pass begin/end explicitly.
1608template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1609 return std::lower_bound(adl_begin(Range), adl_end(Range),
1610 std::forward<T>(Value));
1611}
1612
1613template <typename R, typename T, typename Compare>
1614auto lower_bound(R &&Range, T &&Value, Compare C) {
1615 return std::lower_bound(adl_begin(Range), adl_end(Range),
1616 std::forward<T>(Value), C);
1617}
1618
1619/// Provide wrappers to std::upper_bound which take ranges instead of having to
1620/// pass begin/end explicitly.
1621template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1622 return std::upper_bound(adl_begin(Range), adl_end(Range),
1623 std::forward<T>(Value));
1624}
1625
1626template <typename R, typename T, typename Compare>
1627auto upper_bound(R &&Range, T &&Value, Compare C) {
1628 return std::upper_bound(adl_begin(Range), adl_end(Range),
1629 std::forward<T>(Value), C);
1630}
1631
1632template <typename R>
1633void stable_sort(R &&Range) {
1634 std::stable_sort(adl_begin(Range), adl_end(Range));
1635}
1636
1637template <typename R, typename Compare>
1638void stable_sort(R &&Range, Compare C) {
1639 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1640}
1641
1642/// Binary search for the first iterator in a range where a predicate is false.
1643/// Requires that C is always true below some limit, and always false above it.
1644template <typename R, typename Predicate,
1645 typename Val = decltype(*adl_begin(std::declval<R>()))>
1646auto partition_point(R &&Range, Predicate P) {
1647 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1648}
1649
1650/// Wrapper function around std::equal to detect if all elements
1651/// in a container are same.
1652template <typename R>
1653bool is_splat(R &&Range) {
1654 size_t range_size = size(Range);
1655 return range_size != 0 && (range_size == 1 ||
1656 std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
1657}
1658
1659/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1660/// `erase_if` which is equivalent to:
1661///
1662/// C.erase(remove_if(C, pred), C.end());
1663///
1664/// This version works for any container with an erase method call accepting
1665/// two iterators.
1666template <typename Container, typename UnaryPredicate>
1667void erase_if(Container &C, UnaryPredicate P) {
1668 C.erase(remove_if(C, P), C.end());
1669}
1670
1671/// Wrapper function to remove a value from a container:
1672///
1673/// C.erase(remove(C.begin(), C.end(), V), C.end());
1674template <typename Container, typename ValueType>
1675void erase_value(Container &C, ValueType V) {
1676 C.erase(std::remove(C.begin(), C.end(), V), C.end());
1677}
1678
1679/// Wrapper function to append a range to a container.
1680///
1681/// C.insert(C.end(), R.begin(), R.end());
1682template <typename Container, typename Range>
1683inline void append_range(Container &C, Range &&R) {
1684 C.insert(C.end(), R.begin(), R.end());
1685}
1686
1687/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1688/// the range [ValIt, ValEnd) (which is not from the same container).
1689template<typename Container, typename RandomAccessIterator>
1690void replace(Container &Cont, typename Container::iterator ContIt,
1691 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
1692 RandomAccessIterator ValEnd) {
1693 while (true) {
1694 if (ValIt == ValEnd) {
1695 Cont.erase(ContIt, ContEnd);
1696 return;
1697 } else if (ContIt == ContEnd) {
1698 Cont.insert(ContIt, ValIt, ValEnd);
1699 return;
1700 }
1701 *ContIt++ = *ValIt++;
1702 }
1703}
1704
1705/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1706/// the range R.
1707template<typename Container, typename Range = std::initializer_list<
1708 typename Container::value_type>>
1709void replace(Container &Cont, typename Container::iterator ContIt,
1710 typename Container::iterator ContEnd, Range R) {
1711 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
1712}
1713
1714/// An STL-style algorithm similar to std::for_each that applies a second
1715/// functor between every pair of elements.
1716///
1717/// This provides the control flow logic to, for example, print a
1718/// comma-separated list:
1719/// \code
1720/// interleave(names.begin(), names.end(),
1721/// [&](StringRef name) { os << name; },
1722/// [&] { os << ", "; });
1723/// \endcode
1724template <typename ForwardIterator, typename UnaryFunctor,
1725 typename NullaryFunctor,
1726 typename = typename std::enable_if<
1727 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1728 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1729inline void interleave(ForwardIterator begin, ForwardIterator end,
1730 UnaryFunctor each_fn, NullaryFunctor between_fn) {
1731 if (begin == end)
1732 return;
1733 each_fn(*begin);
1734 ++begin;
1735 for (; begin != end; ++begin) {
1736 between_fn();
1737 each_fn(*begin);
1738 }
1739}
1740
1741template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
1742 typename = typename std::enable_if<
1743 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1744 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1745inline void interleave(const Container &c, UnaryFunctor each_fn,
1746 NullaryFunctor between_fn) {
1747 interleave(c.begin(), c.end(), each_fn, between_fn);
1748}
1749
1750/// Overload of interleave for the common case of string separator.
1751template <typename Container, typename UnaryFunctor, typename StreamT,
1752 typename T = detail::ValueOfRange<Container>>
1753inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
1754 const StringRef &separator) {
1755 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
1756}
1757template <typename Container, typename StreamT,
1758 typename T = detail::ValueOfRange<Container>>
1759inline void interleave(const Container &c, StreamT &os,
1760 const StringRef &separator) {
1761 interleave(
1762 c, os, [&](const T &a) { os << a; }, separator);
1763}
1764
1765template <typename Container, typename UnaryFunctor, typename StreamT,
1766 typename T = detail::ValueOfRange<Container>>
1767inline void interleaveComma(const Container &c, StreamT &os,
1768 UnaryFunctor each_fn) {
1769 interleave(c, os, each_fn, ", ");
1770}
1771template <typename Container, typename StreamT,
1772 typename T = detail::ValueOfRange<Container>>
1773inline void interleaveComma(const Container &c, StreamT &os) {
1774 interleaveComma(c, os, [&](const T &a) { os << a; });
1775}
1776
1777//===----------------------------------------------------------------------===//
1778// Extra additions to <memory>
1779//===----------------------------------------------------------------------===//
1780
1781struct FreeDeleter {
1782 void operator()(void* v) {
1783 ::free(v);
1784 }
1785};
1786
1787template<typename First, typename Second>
1788struct pair_hash {
1789 size_t operator()(const std::pair<First, Second> &P) const {
1790 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
1791 }
1792};
1793
1794/// Binary functor that adapts to any other binary functor after dereferencing
1795/// operands.
1796template <typename T> struct deref {
1797 T func;
1798
1799 // Could be further improved to cope with non-derivable functors and
1800 // non-binary functors (should be a variadic template member function
1801 // operator()).
1802 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
1803 assert(lhs)((lhs) ? static_cast<void> (0) : __assert_fail ("lhs", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1803, __PRETTY_FUNCTION__))
;
1804 assert(rhs)((rhs) ? static_cast<void> (0) : __assert_fail ("rhs", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1804, __PRETTY_FUNCTION__))
;
1805 return func(*lhs, *rhs);
1806 }
1807};
1808
1809namespace detail {
1810
1811template <typename R> class enumerator_iter;
1812
1813template <typename R> struct result_pair {
1814 using value_reference =
1815 typename std::iterator_traits<IterOfRange<R>>::reference;
1816
1817 friend class enumerator_iter<R>;
1818
1819 result_pair() = default;
1820 result_pair(std::size_t Index, IterOfRange<R> Iter)
1821 : Index(Index), Iter(Iter) {}
1822
1823 result_pair<R>(const result_pair<R> &Other)
1824 : Index(Other.Index), Iter(Other.Iter) {}
1825 result_pair<R> &operator=(const result_pair<R> &Other) {
1826 Index = Other.Index;
1827 Iter = Other.Iter;
1828 return *this;
1829 }
1830
1831 std::size_t index() const { return Index; }
1832 const value_reference value() const { return *Iter; }
1833 value_reference value() { return *Iter; }
1834
1835private:
1836 std::size_t Index = std::numeric_limits<std::size_t>::max();
1837 IterOfRange<R> Iter;
1838};
1839
1840template <typename R>
1841class enumerator_iter
1842 : public iterator_facade_base<
1843 enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
1844 typename std::iterator_traits<IterOfRange<R>>::difference_type,
1845 typename std::iterator_traits<IterOfRange<R>>::pointer,
1846 typename std::iterator_traits<IterOfRange<R>>::reference> {
1847 using result_type = result_pair<R>;
1848
1849public:
1850 explicit enumerator_iter(IterOfRange<R> EndIter)
1851 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
1852
1853 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
1854 : Result(Index, Iter) {}
1855
1856 result_type &operator*() { return Result; }
1857 const result_type &operator*() const { return Result; }
1858
1859 enumerator_iter<R> &operator++() {
1860 assert(Result.Index != std::numeric_limits<size_t>::max())((Result.Index != std::numeric_limits<size_t>::max()) ?
static_cast<void> (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1860, __PRETTY_FUNCTION__))
;
1861 ++Result.Iter;
1862 ++Result.Index;
1863 return *this;
1864 }
1865
1866 bool operator==(const enumerator_iter<R> &RHS) const {
1867 // Don't compare indices here, only iterators. It's possible for an end
1868 // iterator to have different indices depending on whether it was created
1869 // by calling std::end() versus incrementing a valid iterator.
1870 return Result.Iter == RHS.Result.Iter;
1871 }
1872
1873 enumerator_iter<R>(const enumerator_iter<R> &Other) : Result(Other.Result) {}
1874 enumerator_iter<R> &operator=(const enumerator_iter<R> &Other) {
1875 Result = Other.Result;
1876 return *this;
1877 }
1878
1879private:
1880 result_type Result;
1881};
1882
1883template <typename R> class enumerator {
1884public:
1885 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
1886
1887 enumerator_iter<R> begin() {
1888 return enumerator_iter<R>(0, std::begin(TheRange));
1889 }
1890
1891 enumerator_iter<R> end() {
1892 return enumerator_iter<R>(std::end(TheRange));
1893 }
1894
1895private:
1896 R TheRange;
1897};
1898
1899} // end namespace detail
1900
1901/// Given an input range, returns a new range whose values are are pair (A,B)
1902/// such that A is the 0-based index of the item in the sequence, and B is
1903/// the value from the original sequence. Example:
1904///
1905/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
1906/// for (auto X : enumerate(Items)) {
1907/// printf("Item %d - %c\n", X.index(), X.value());
1908/// }
1909///
1910/// Output:
1911/// Item 0 - A
1912/// Item 1 - B
1913/// Item 2 - C
1914/// Item 3 - D
1915///
1916template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
1917 return detail::enumerator<R>(std::forward<R>(TheRange));
1918}
1919
1920namespace detail {
1921
1922template <typename F, typename Tuple, std::size_t... I>
1923decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
1924 return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
1925}
1926
1927} // end namespace detail
1928
1929/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
1930/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
1931/// return the result.
1932template <typename F, typename Tuple>
1933decltype(auto) apply_tuple(F &&f, Tuple &&t) {
1934 using Indices = std::make_index_sequence<
1935 std::tuple_size<typename std::decay<Tuple>::type>::value>;
1936
1937 return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
1938 Indices{});
1939}
1940
1941/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
1942/// time. Not meant for use with random-access iterators.
1943/// Can optionally take a predicate to filter lazily some items.
1944template <typename IterTy,
1945 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1946bool hasNItems(
1947 IterTy &&Begin, IterTy &&End, unsigned N,
1948 Pred &&ShouldBeCounted =
1949 [](const decltype(*std::declval<IterTy>()) &) { return true; },
1950 std::enable_if_t<
1951 !std::is_base_of<std::random_access_iterator_tag,
1952 typename std::iterator_traits<std::remove_reference_t<
1953 decltype(Begin)>>::iterator_category>::value,
1954 void> * = nullptr) {
1955 for (; N; ++Begin) {
1956 if (Begin == End)
1957 return false; // Too few.
1958 N -= ShouldBeCounted(*Begin);
1959 }
1960 for (; Begin != End; ++Begin)
1961 if (ShouldBeCounted(*Begin))
1962 return false; // Too many.
1963 return true;
1964}
1965
1966/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
1967/// time. Not meant for use with random-access iterators.
1968/// Can optionally take a predicate to lazily filter some items.
1969template <typename IterTy,
1970 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1971bool hasNItemsOrMore(
1972 IterTy &&Begin, IterTy &&End, unsigned N,
1973 Pred &&ShouldBeCounted =
1974 [](const decltype(*std::declval<IterTy>()) &) { return true; },
1975 std::enable_if_t<
1976 !std::is_base_of<std::random_access_iterator_tag,
1977 typename std::iterator_traits<std::remove_reference_t<
1978 decltype(Begin)>>::iterator_category>::value,
1979 void> * = nullptr) {
1980 for (; N; ++Begin) {
1981 if (Begin == End)
1982 return false; // Too few.
1983 N -= ShouldBeCounted(*Begin);
1984 }
1985 return true;
1986}
1987
1988/// Returns true if the sequence [Begin, End) has N or less items. Can
1989/// optionally take a predicate to lazily filter some items.
1990template <typename IterTy,
1991 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1992bool hasNItemsOrLess(
1993 IterTy &&Begin, IterTy &&End, unsigned N,
1994 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
1995 return true;
1996 }) {
1997 assert(N != std::numeric_limits<unsigned>::max())((N != std::numeric_limits<unsigned>::max()) ? static_cast
<void> (0) : __assert_fail ("N != std::numeric_limits<unsigned>::max()"
, "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/STLExtras.h"
, 1997, __PRETTY_FUNCTION__))
;
1998 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
1999}
2000
2001/// Returns true if the given container has exactly N items
2002template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
2003 return hasNItems(std::begin(C), std::end(C), N);
2004}
2005
2006/// Returns true if the given container has N or more items
2007template <typename ContainerTy>
2008bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
2009 return hasNItemsOrMore(std::begin(C), std::end(C), N);
2010}
2011
2012/// Returns true if the given container has N or less items
2013template <typename ContainerTy>
2014bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2015 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2016}
2017
2018/// Returns a raw pointer that represents the same address as the argument.
2019///
2020/// This implementation can be removed once we move to C++20 where it's defined
2021/// as std::to_address().
2022///
2023/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2024/// not been implemented.
2025template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2026template <class T> constexpr T *to_address(T *P) { return P; }
2027
2028} // end namespace llvm
2029
2030#endif // LLVM_ADT_STLEXTRAS_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/stl_algo.h

1// Algorithm implementation -*- C++ -*-
2
3// Copyright (C) 2001-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1994
28 * Hewlett-Packard Company
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Hewlett-Packard Company makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1996
40 * Silicon Graphics Computer Systems, Inc.
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Silicon Graphics makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 */
50
51/** @file bits/stl_algo.h
52 * This is an internal header file, included by other library headers.
53 * Do not attempt to use it directly. @headername{algorithm}
54 */
55
56#ifndef _STL_ALGO_H1
57#define _STL_ALGO_H1 1
58
59#include <cstdlib> // for rand
60#include <bits/algorithmfwd.h>
61#include <bits/stl_heap.h>
62#include <bits/stl_tempbuf.h> // for _Temporary_buffer
63#include <bits/predefined_ops.h>
64
65#if __cplusplus201402L >= 201103L
66#include <bits/uniform_int_dist.h>
67#endif
68
69// See concept_check.h for the __glibcxx_*_requires macros.
70
71namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
72{
73_GLIBCXX_BEGIN_NAMESPACE_VERSION
74
75 /// Swaps the median value of *__a, *__b and *__c under __comp to *__result
76 template<typename _Iterator, typename _Compare>
77 void
78 __move_median_to_first(_Iterator __result,_Iterator __a, _Iterator __b,
79 _Iterator __c, _Compare __comp)
80 {
81 if (__comp(__a, __b))
82 {
83 if (__comp(__b, __c))
84 std::iter_swap(__result, __b);
85 else if (__comp(__a, __c))
86 std::iter_swap(__result, __c);
87 else
88 std::iter_swap(__result, __a);
89 }
90 else if (__comp(__a, __c))
91 std::iter_swap(__result, __a);
92 else if (__comp(__b, __c))
93 std::iter_swap(__result, __c);
94 else
95 std::iter_swap(__result, __b);
96 }
97
98 /// This is an overload used by find algos for the Input Iterator case.
99 template<typename _InputIterator, typename _Predicate>
100 inline _InputIterator
101 __find_if(_InputIterator __first, _InputIterator __last,
102 _Predicate __pred, input_iterator_tag)
103 {
104 while (__first != __last && !__pred(__first))
105 ++__first;
106 return __first;
107 }
108
109 /// This is an overload used by find algos for the RAI case.
110 template<typename _RandomAccessIterator, typename _Predicate>
111 _RandomAccessIterator
112 __find_if(_RandomAccessIterator __first, _RandomAccessIterator __last,
113 _Predicate __pred, random_access_iterator_tag)
114 {
115 typename iterator_traits<_RandomAccessIterator>::difference_type
116 __trip_count = (__last - __first) >> 2;
117
118 for (; __trip_count > 0; --__trip_count)
119 {
120 if (__pred(__first))
121 return __first;
122 ++__first;
123
124 if (__pred(__first))
125 return __first;
126 ++__first;
127
128 if (__pred(__first))
129 return __first;
130 ++__first;
131
132 if (__pred(__first))
133 return __first;
134 ++__first;
135 }
136
137 switch (__last - __first)
138 {
139 case 3:
140 if (__pred(__first))
141 return __first;
142 ++__first;
143 case 2:
144 if (__pred(__first))
145 return __first;
146 ++__first;
147 case 1:
148 if (__pred(__first))
149 return __first;
150 ++__first;
151 case 0:
152 default:
153 return __last;
154 }
155 }
156
157 template<typename _Iterator, typename _Predicate>
158 inline _Iterator
159 __find_if(_Iterator __first, _Iterator __last, _Predicate __pred)
160 {
161 return __find_if(__first, __last, __pred,
162 std::__iterator_category(__first));
163 }
164
165 /// Provided for stable_partition to use.
166 template<typename _InputIterator, typename _Predicate>
167 inline _InputIterator
168 __find_if_not(_InputIterator __first, _InputIterator __last,
169 _Predicate __pred)
170 {
171 return std::__find_if(__first, __last,
172 __gnu_cxx::__ops::__negate(__pred),
173 std::__iterator_category(__first));
174 }
175
176 /// Like find_if_not(), but uses and updates a count of the
177 /// remaining range length instead of comparing against an end
178 /// iterator.
179 template<typename _InputIterator, typename _Predicate, typename _Distance>
180 _InputIterator
181 __find_if_not_n(_InputIterator __first, _Distance& __len, _Predicate __pred)
182 {
183 for (; __len; --__len, ++__first)
184 if (!__pred(__first))
185 break;
186 return __first;
187 }
188
189 // set_difference
190 // set_intersection
191 // set_symmetric_difference
192 // set_union
193 // for_each
194 // find
195 // find_if
196 // find_first_of
197 // adjacent_find
198 // count
199 // count_if
200 // search
201
202 template<typename _ForwardIterator1, typename _ForwardIterator2,
203 typename _BinaryPredicate>
204 _ForwardIterator1
205 __search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
206 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
207 _BinaryPredicate __predicate)
208 {
209 // Test for empty ranges
210 if (__first1 == __last1 || __first2 == __last2)
211 return __first1;
212
213 // Test for a pattern of length 1.
214 _ForwardIterator2 __p1(__first2);
215 if (++__p1 == __last2)
216 return std::__find_if(__first1, __last1,
217 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
218
219 // General case.
220 _ForwardIterator2 __p;
221 _ForwardIterator1 __current = __first1;
222
223 for (;;)
224 {
225 __first1 =
226 std::__find_if(__first1, __last1,
227 __gnu_cxx::__ops::__iter_comp_iter(__predicate, __first2));
228
229 if (__first1 == __last1)
230 return __last1;
231
232 __p = __p1;
233 __current = __first1;
234 if (++__current == __last1)
235 return __last1;
236
237 while (__predicate(__current, __p))
238 {
239 if (++__p == __last2)
240 return __first1;
241 if (++__current == __last1)
242 return __last1;
243 }
244 ++__first1;
245 }
246 return __first1;
247 }
248
249 // search_n
250
251 /**
252 * This is an helper function for search_n overloaded for forward iterators.
253 */
254 template<typename _ForwardIterator, typename _Integer,
255 typename _UnaryPredicate>
256 _ForwardIterator
257 __search_n_aux(_ForwardIterator __first, _ForwardIterator __last,
258 _Integer __count, _UnaryPredicate __unary_pred,
259 std::forward_iterator_tag)
260 {
261 __first = std::__find_if(__first, __last, __unary_pred);
262 while (__first != __last)
263 {
264 typename iterator_traits<_ForwardIterator>::difference_type
265 __n = __count;
266 _ForwardIterator __i = __first;
267 ++__i;
268 while (__i != __last && __n != 1 && __unary_pred(__i))
269 {
270 ++__i;
271 --__n;
272 }
273 if (__n == 1)
274 return __first;
275 if (__i == __last)
276 return __last;
277 __first = std::__find_if(++__i, __last, __unary_pred);
278 }
279 return __last;
280 }
281
282 /**
283 * This is an helper function for search_n overloaded for random access
284 * iterators.
285 */
286 template<typename _RandomAccessIter, typename _Integer,
287 typename _UnaryPredicate>
288 _RandomAccessIter
289 __search_n_aux(_RandomAccessIter __first, _RandomAccessIter __last,
290 _Integer __count, _UnaryPredicate __unary_pred,
291 std::random_access_iterator_tag)
292 {
293 typedef typename std::iterator_traits<_RandomAccessIter>::difference_type
294 _DistanceType;
295
296 _DistanceType __tailSize = __last - __first;
297 _DistanceType __remainder = __count;
298
299 while (__remainder <= __tailSize) // the main loop...
300 {
301 __first += __remainder;
302 __tailSize -= __remainder;
303 // __first here is always pointing to one past the last element of
304 // next possible match.
305 _RandomAccessIter __backTrack = __first;
306 while (__unary_pred(--__backTrack))
307 {
308 if (--__remainder == 0)
309 return (__first - __count); // Success
310 }
311 __remainder = __count + 1 - (__first - __backTrack);
312 }
313 return __last; // Failure
314 }
315
316 template<typename _ForwardIterator, typename _Integer,
317 typename _UnaryPredicate>
318 _ForwardIterator
319 __search_n(_ForwardIterator __first, _ForwardIterator __last,
320 _Integer __count,
321 _UnaryPredicate __unary_pred)
322 {
323 if (__count <= 0)
324 return __first;
325
326 if (__count == 1)
327 return std::__find_if(__first, __last, __unary_pred);
328
329 return std::__search_n_aux(__first, __last, __count, __unary_pred,
330 std::__iterator_category(__first));
331 }
332
333 // find_end for forward iterators.
334 template<typename _ForwardIterator1, typename _ForwardIterator2,
335 typename _BinaryPredicate>
336 _ForwardIterator1
337 __find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
338 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
339 forward_iterator_tag, forward_iterator_tag,
340 _BinaryPredicate __comp)
341 {
342 if (__first2 == __last2)
343 return __last1;
344
345 _ForwardIterator1 __result = __last1;
346 while (1)
347 {
348 _ForwardIterator1 __new_result
349 = std::__search(__first1, __last1, __first2, __last2, __comp);
350 if (__new_result == __last1)
351 return __result;
352 else
353 {
354 __result = __new_result;
355 __first1 = __new_result;
356 ++__first1;
357 }
358 }
359 }
360
361 // find_end for bidirectional iterators (much faster).
362 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
363 typename _BinaryPredicate>
364 _BidirectionalIterator1
365 __find_end(_BidirectionalIterator1 __first1,
366 _BidirectionalIterator1 __last1,
367 _BidirectionalIterator2 __first2,
368 _BidirectionalIterator2 __last2,
369 bidirectional_iterator_tag, bidirectional_iterator_tag,
370 _BinaryPredicate __comp)
371 {
372 // concept requirements
373 __glibcxx_function_requires(_BidirectionalIteratorConcept<
374 _BidirectionalIterator1>)
375 __glibcxx_function_requires(_BidirectionalIteratorConcept<
376 _BidirectionalIterator2>)
377
378 typedef reverse_iterator<_BidirectionalIterator1> _RevIterator1;
379 typedef reverse_iterator<_BidirectionalIterator2> _RevIterator2;
380
381 _RevIterator1 __rlast1(__first1);
382 _RevIterator2 __rlast2(__first2);
383 _RevIterator1 __rresult = std::__search(_RevIterator1(__last1), __rlast1,
384 _RevIterator2(__last2), __rlast2,
385 __comp);
386
387 if (__rresult == __rlast1)
388 return __last1;
389 else
390 {
391 _BidirectionalIterator1 __result = __rresult.base();
392 std::advance(__result, -std::distance(__first2, __last2));
393 return __result;
394 }
395 }
396
397 /**
398 * @brief Find last matching subsequence in a sequence.
399 * @ingroup non_mutating_algorithms
400 * @param __first1 Start of range to search.
401 * @param __last1 End of range to search.
402 * @param __first2 Start of sequence to match.
403 * @param __last2 End of sequence to match.
404 * @return The last iterator @c i in the range
405 * @p [__first1,__last1-(__last2-__first2)) such that @c *(i+N) ==
406 * @p *(__first2+N) for each @c N in the range @p
407 * [0,__last2-__first2), or @p __last1 if no such iterator exists.
408 *
409 * Searches the range @p [__first1,__last1) for a sub-sequence that
410 * compares equal value-by-value with the sequence given by @p
411 * [__first2,__last2) and returns an iterator to the __first
412 * element of the sub-sequence, or @p __last1 if the sub-sequence
413 * is not found. The sub-sequence will be the last such
414 * subsequence contained in [__first1,__last1).
415 *
416 * Because the sub-sequence must lie completely within the range @p
417 * [__first1,__last1) it must start at a position less than @p
418 * __last1-(__last2-__first2) where @p __last2-__first2 is the
419 * length of the sub-sequence. This means that the returned
420 * iterator @c i will be in the range @p
421 * [__first1,__last1-(__last2-__first2))
422 */
423 template<typename _ForwardIterator1, typename _ForwardIterator2>
424 inline _ForwardIterator1
425 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
426 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
427 {
428 // concept requirements
429 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
430 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
431 __glibcxx_function_requires(_EqualOpConcept<
432 typename iterator_traits<_ForwardIterator1>::value_type,
433 typename iterator_traits<_ForwardIterator2>::value_type>)
434 __glibcxx_requires_valid_range(__first1, __last1);
435 __glibcxx_requires_valid_range(__first2, __last2);
436
437 return std::__find_end(__first1, __last1, __first2, __last2,
438 std::__iterator_category(__first1),
439 std::__iterator_category(__first2),
440 __gnu_cxx::__ops::__iter_equal_to_iter());
441 }
442
443 /**
444 * @brief Find last matching subsequence in a sequence using a predicate.
445 * @ingroup non_mutating_algorithms
446 * @param __first1 Start of range to search.
447 * @param __last1 End of range to search.
448 * @param __first2 Start of sequence to match.
449 * @param __last2 End of sequence to match.
450 * @param __comp The predicate to use.
451 * @return The last iterator @c i in the range @p
452 * [__first1,__last1-(__last2-__first2)) such that @c
453 * predicate(*(i+N), @p (__first2+N)) is true for each @c N in the
454 * range @p [0,__last2-__first2), or @p __last1 if no such iterator
455 * exists.
456 *
457 * Searches the range @p [__first1,__last1) for a sub-sequence that
458 * compares equal value-by-value with the sequence given by @p
459 * [__first2,__last2) using comp as a predicate and returns an
460 * iterator to the first element of the sub-sequence, or @p __last1
461 * if the sub-sequence is not found. The sub-sequence will be the
462 * last such subsequence contained in [__first,__last1).
463 *
464 * Because the sub-sequence must lie completely within the range @p
465 * [__first1,__last1) it must start at a position less than @p
466 * __last1-(__last2-__first2) where @p __last2-__first2 is the
467 * length of the sub-sequence. This means that the returned
468 * iterator @c i will be in the range @p
469 * [__first1,__last1-(__last2-__first2))
470 */
471 template<typename _ForwardIterator1, typename _ForwardIterator2,
472 typename _BinaryPredicate>
473 inline _ForwardIterator1
474 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
475 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
476 _BinaryPredicate __comp)
477 {
478 // concept requirements
479 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
480 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
481 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
482 typename iterator_traits<_ForwardIterator1>::value_type,
483 typename iterator_traits<_ForwardIterator2>::value_type>)
484 __glibcxx_requires_valid_range(__first1, __last1);
485 __glibcxx_requires_valid_range(__first2, __last2);
486
487 return std::__find_end(__first1, __last1, __first2, __last2,
488 std::__iterator_category(__first1),
489 std::__iterator_category(__first2),
490 __gnu_cxx::__ops::__iter_comp_iter(__comp));
491 }
492
493#if __cplusplus201402L >= 201103L
494 /**
495 * @brief Checks that a predicate is true for all the elements
496 * of a sequence.
497 * @ingroup non_mutating_algorithms
498 * @param __first An input iterator.
499 * @param __last An input iterator.
500 * @param __pred A predicate.
501 * @return True if the check is true, false otherwise.
502 *
503 * Returns true if @p __pred is true for each element in the range
504 * @p [__first,__last), and false otherwise.
505 */
506 template<typename _InputIterator, typename _Predicate>
507 inline bool
508 all_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
509 { return __last == std::find_if_not(__first, __last, __pred); }
510
511 /**
512 * @brief Checks that a predicate is false for all the elements
513 * of a sequence.
514 * @ingroup non_mutating_algorithms
515 * @param __first An input iterator.
516 * @param __last An input iterator.
517 * @param __pred A predicate.
518 * @return True if the check is true, false otherwise.
519 *
520 * Returns true if @p __pred is false for each element in the range
521 * @p [__first,__last), and false otherwise.
522 */
523 template<typename _InputIterator, typename _Predicate>
524 inline bool
525 none_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
526 { return __last == _GLIBCXX_STD_Astd::find_if(__first, __last, __pred); }
24
Calling 'operator=='
26
Returning from 'operator=='
27
Returning the value 1, which participates in a condition later
527
528 /**
529 * @brief Checks that a predicate is false for at least an element
530 * of a sequence.
531 * @ingroup non_mutating_algorithms
532 * @param __first An input iterator.
533 * @param __last An input iterator.
534 * @param __pred A predicate.
535 * @return True if the check is true, false otherwise.
536 *
537 * Returns true if an element exists in the range @p
538 * [__first,__last) such that @p __pred is true, and false
539 * otherwise.
540 */
541 template<typename _InputIterator, typename _Predicate>
542 inline bool
543 any_of(_InputIterator __first, _InputIterator __last, _Predicate __pred)
544 { return !std::none_of(__first, __last, __pred); }
23
Calling 'none_of<llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
28
Returning from 'none_of<llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>, (lambda at /build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp:2915:19)>'
29
Returning zero, which participates in a condition later
545
546 /**
547 * @brief Find the first element in a sequence for which a
548 * predicate is false.
549 * @ingroup non_mutating_algorithms
550 * @param __first An input iterator.
551 * @param __last An input iterator.
552 * @param __pred A predicate.
553 * @return The first iterator @c i in the range @p [__first,__last)
554 * such that @p __pred(*i) is false, or @p __last if no such iterator exists.
555 */
556 template<typename _InputIterator, typename _Predicate>
557 inline _InputIterator
558 find_if_not(_InputIterator __first, _InputIterator __last,
559 _Predicate __pred)
560 {
561 // concept requirements
562 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
563 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
564 typename iterator_traits<_InputIterator>::value_type>)
565 __glibcxx_requires_valid_range(__first, __last);
566 return std::__find_if_not(__first, __last,
567 __gnu_cxx::__ops::__pred_iter(__pred));
568 }
569
570 /**
571 * @brief Checks whether the sequence is partitioned.
572 * @ingroup mutating_algorithms
573 * @param __first An input iterator.
574 * @param __last An input iterator.
575 * @param __pred A predicate.
576 * @return True if the range @p [__first,__last) is partioned by @p __pred,
577 * i.e. if all elements that satisfy @p __pred appear before those that
578 * do not.
579 */
580 template<typename _InputIterator, typename _Predicate>
581 inline bool
582 is_partitioned(_InputIterator __first, _InputIterator __last,
583 _Predicate __pred)
584 {
585 __first = std::find_if_not(__first, __last, __pred);
586 return std::none_of(__first, __last, __pred);
587 }
588
589 /**
590 * @brief Find the partition point of a partitioned range.
591 * @ingroup mutating_algorithms
592 * @param __first An iterator.
593 * @param __last Another iterator.
594 * @param __pred A predicate.
595 * @return An iterator @p mid such that @p all_of(__first, mid, __pred)
596 * and @p none_of(mid, __last, __pred) are both true.
597 */
598 template<typename _ForwardIterator, typename _Predicate>
599 _ForwardIterator
600 partition_point(_ForwardIterator __first, _ForwardIterator __last,
601 _Predicate __pred)
602 {
603 // concept requirements
604 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
605 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
606 typename iterator_traits<_ForwardIterator>::value_type>)
607
608 // A specific debug-mode test will be necessary...
609 __glibcxx_requires_valid_range(__first, __last);
610
611 typedef typename iterator_traits<_ForwardIterator>::difference_type
612 _DistanceType;
613
614 _DistanceType __len = std::distance(__first, __last);
615 _DistanceType __half;
616 _ForwardIterator __middle;
617
618 while (__len > 0)
619 {
620 __half = __len >> 1;
621 __middle = __first;
622 std::advance(__middle, __half);
623 if (__pred(*__middle))
624 {
625 __first = __middle;
626 ++__first;
627 __len = __len - __half - 1;
628 }
629 else
630 __len = __half;
631 }
632 return __first;
633 }
634#endif
635
636 template<typename _InputIterator, typename _OutputIterator,
637 typename _Predicate>
638 _OutputIterator
639 __remove_copy_if(_InputIterator __first, _InputIterator __last,
640 _OutputIterator __result, _Predicate __pred)
641 {
642 for (; __first != __last; ++__first)
643 if (!__pred(__first))
644 {
645 *__result = *__first;
646 ++__result;
647 }
648 return __result;
649 }
650
651 /**
652 * @brief Copy a sequence, removing elements of a given value.
653 * @ingroup mutating_algorithms
654 * @param __first An input iterator.
655 * @param __last An input iterator.
656 * @param __result An output iterator.
657 * @param __value The value to be removed.
658 * @return An iterator designating the end of the resulting sequence.
659 *
660 * Copies each element in the range @p [__first,__last) not equal
661 * to @p __value to the range beginning at @p __result.
662 * remove_copy() is stable, so the relative order of elements that
663 * are copied is unchanged.
664 */
665 template<typename _InputIterator, typename _OutputIterator, typename _Tp>
666 inline _OutputIterator
667 remove_copy(_InputIterator __first, _InputIterator __last,
668 _OutputIterator __result, const _Tp& __value)
669 {
670 // concept requirements
671 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
672 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
673 typename iterator_traits<_InputIterator>::value_type>)
674 __glibcxx_function_requires(_EqualOpConcept<
675 typename iterator_traits<_InputIterator>::value_type, _Tp>)
676 __glibcxx_requires_valid_range(__first, __last);
677
678 return std::__remove_copy_if(__first, __last, __result,
679 __gnu_cxx::__ops::__iter_equals_val(__value));
680 }
681
682 /**
683 * @brief Copy a sequence, removing elements for which a predicate is true.
684 * @ingroup mutating_algorithms
685 * @param __first An input iterator.
686 * @param __last An input iterator.
687 * @param __result An output iterator.
688 * @param __pred A predicate.
689 * @return An iterator designating the end of the resulting sequence.
690 *
691 * Copies each element in the range @p [__first,__last) for which
692 * @p __pred returns false to the range beginning at @p __result.
693 *
694 * remove_copy_if() is stable, so the relative order of elements that are
695 * copied is unchanged.
696 */
697 template<typename _InputIterator, typename _OutputIterator,
698 typename _Predicate>
699 inline _OutputIterator
700 remove_copy_if(_InputIterator __first, _InputIterator __last,
701 _OutputIterator __result, _Predicate __pred)
702 {
703 // concept requirements
704 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
705 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
706 typename iterator_traits<_InputIterator>::value_type>)
707 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
708 typename iterator_traits<_InputIterator>::value_type>)
709 __glibcxx_requires_valid_range(__first, __last);
710
711 return std::__remove_copy_if(__first, __last, __result,
712 __gnu_cxx::__ops::__pred_iter(__pred));
713 }
714
715#if __cplusplus201402L >= 201103L
716 /**
717 * @brief Copy the elements of a sequence for which a predicate is true.
718 * @ingroup mutating_algorithms
719 * @param __first An input iterator.
720 * @param __last An input iterator.
721 * @param __result An output iterator.
722 * @param __pred A predicate.
723 * @return An iterator designating the end of the resulting sequence.
724 *
725 * Copies each element in the range @p [__first,__last) for which
726 * @p __pred returns true to the range beginning at @p __result.
727 *
728 * copy_if() is stable, so the relative order of elements that are
729 * copied is unchanged.
730 */
731 template<typename _InputIterator, typename _OutputIterator,
732 typename _Predicate>
733 _OutputIterator
734 copy_if(_InputIterator __first, _InputIterator __last,
735 _OutputIterator __result, _Predicate __pred)
736 {
737 // concept requirements
738 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
739 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
740 typename iterator_traits<_InputIterator>::value_type>)
741 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
742 typename iterator_traits<_InputIterator>::value_type>)
743 __glibcxx_requires_valid_range(__first, __last);
744
745 for (; __first != __last; ++__first)
746 if (__pred(*__first))
747 {
748 *__result = *__first;
749 ++__result;
750 }
751 return __result;
752 }
753
754 template<typename _InputIterator, typename _Size, typename _OutputIterator>
755 _OutputIterator
756 __copy_n(_InputIterator __first, _Size __n,
757 _OutputIterator __result, input_iterator_tag)
758 {
759 if (__n > 0)
760 {
761 while (true)
762 {
763 *__result = *__first;
764 ++__result;
765 if (--__n > 0)
766 ++__first;
767 else
768 break;
769 }
770 }
771 return __result;
772 }
773
774 template<typename _RandomAccessIterator, typename _Size,
775 typename _OutputIterator>
776 inline _OutputIterator
777 __copy_n(_RandomAccessIterator __first, _Size __n,
778 _OutputIterator __result, random_access_iterator_tag)
779 { return std::copy(__first, __first + __n, __result); }
780
781 /**
782 * @brief Copies the range [first,first+n) into [result,result+n).
783 * @ingroup mutating_algorithms
784 * @param __first An input iterator.
785 * @param __n The number of elements to copy.
786 * @param __result An output iterator.
787 * @return result+n.
788 *
789 * This inline function will boil down to a call to @c memmove whenever
790 * possible. Failing that, if random access iterators are passed, then the
791 * loop count will be known (and therefore a candidate for compiler
792 * optimizations such as unrolling).
793 */
794 template<typename _InputIterator, typename _Size, typename _OutputIterator>
795 inline _OutputIterator
796 copy_n(_InputIterator __first, _Size __n, _OutputIterator __result)
797 {
798 // concept requirements
799 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
800 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
801 typename iterator_traits<_InputIterator>::value_type>)
802
803 return std::__copy_n(__first, __n, __result,
804 std::__iterator_category(__first));
805 }
806
807 /**
808 * @brief Copy the elements of a sequence to separate output sequences
809 * depending on the truth value of a predicate.
810 * @ingroup mutating_algorithms
811 * @param __first An input iterator.
812 * @param __last An input iterator.
813 * @param __out_true An output iterator.
814 * @param __out_false An output iterator.
815 * @param __pred A predicate.
816 * @return A pair designating the ends of the resulting sequences.
817 *
818 * Copies each element in the range @p [__first,__last) for which
819 * @p __pred returns true to the range beginning at @p out_true
820 * and each element for which @p __pred returns false to @p __out_false.
821 */
822 template<typename _InputIterator, typename _OutputIterator1,
823 typename _OutputIterator2, typename _Predicate>
824 pair<_OutputIterator1, _OutputIterator2>
825 partition_copy(_InputIterator __first, _InputIterator __last,
826 _OutputIterator1 __out_true, _OutputIterator2 __out_false,
827 _Predicate __pred)
828 {
829 // concept requirements
830 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
831 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator1,
832 typename iterator_traits<_InputIterator>::value_type>)
833 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator2,
834 typename iterator_traits<_InputIterator>::value_type>)
835 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
836 typename iterator_traits<_InputIterator>::value_type>)
837 __glibcxx_requires_valid_range(__first, __last);
838
839 for (; __first != __last; ++__first)
840 if (__pred(*__first))
841 {
842 *__out_true = *__first;
843 ++__out_true;
844 }
845 else
846 {
847 *__out_false = *__first;
848 ++__out_false;
849 }
850
851 return pair<_OutputIterator1, _OutputIterator2>(__out_true, __out_false);
852 }
853#endif
854
855 template<typename _ForwardIterator, typename _Predicate>
856 _ForwardIterator
857 __remove_if(_ForwardIterator __first, _ForwardIterator __last,
858 _Predicate __pred)
859 {
860 __first = std::__find_if(__first, __last, __pred);
861 if (__first == __last)
862 return __first;
863 _ForwardIterator __result = __first;
864 ++__first;
865 for (; __first != __last; ++__first)
866 if (!__pred(__first))
867 {
868 *__result = _GLIBCXX_MOVE(*__first)std::move(*__first);
869 ++__result;
870 }
871 return __result;
872 }
873
874 /**
875 * @brief Remove elements from a sequence.
876 * @ingroup mutating_algorithms
877 * @param __first An input iterator.
878 * @param __last An input iterator.
879 * @param __value The value to be removed.
880 * @return An iterator designating the end of the resulting sequence.
881 *
882 * All elements equal to @p __value are removed from the range
883 * @p [__first,__last).
884 *
885 * remove() is stable, so the relative order of elements that are
886 * not removed is unchanged.
887 *
888 * Elements between the end of the resulting sequence and @p __last
889 * are still present, but their value is unspecified.
890 */
891 template<typename _ForwardIterator, typename _Tp>
892 inline _ForwardIterator
893 remove(_ForwardIterator __first, _ForwardIterator __last,
894 const _Tp& __value)
895 {
896 // concept requirements
897 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
898 _ForwardIterator>)
899 __glibcxx_function_requires(_EqualOpConcept<
900 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
901 __glibcxx_requires_valid_range(__first, __last);
902
903 return std::__remove_if(__first, __last,
904 __gnu_cxx::__ops::__iter_equals_val(__value));
905 }
906
907 /**
908 * @brief Remove elements from a sequence using a predicate.
909 * @ingroup mutating_algorithms
910 * @param __first A forward iterator.
911 * @param __last A forward iterator.
912 * @param __pred A predicate.
913 * @return An iterator designating the end of the resulting sequence.
914 *
915 * All elements for which @p __pred returns true are removed from the range
916 * @p [__first,__last).
917 *
918 * remove_if() is stable, so the relative order of elements that are
919 * not removed is unchanged.
920 *
921 * Elements between the end of the resulting sequence and @p __last
922 * are still present, but their value is unspecified.
923 */
924 template<typename _ForwardIterator, typename _Predicate>
925 inline _ForwardIterator
926 remove_if(_ForwardIterator __first, _ForwardIterator __last,
927 _Predicate __pred)
928 {
929 // concept requirements
930 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
931 _ForwardIterator>)
932 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
933 typename iterator_traits<_ForwardIterator>::value_type>)
934 __glibcxx_requires_valid_range(__first, __last);
935
936 return std::__remove_if(__first, __last,
937 __gnu_cxx::__ops::__pred_iter(__pred));
938 }
939
940 template<typename _ForwardIterator, typename _BinaryPredicate>
941 _ForwardIterator
942 __adjacent_find(_ForwardIterator __first, _ForwardIterator __last,
943 _BinaryPredicate __binary_pred)
944 {
945 if (__first == __last)
946 return __last;
947 _ForwardIterator __next = __first;
948 while (++__next != __last)
949 {
950 if (__binary_pred(__first, __next))
951 return __first;
952 __first = __next;
953 }
954 return __last;
955 }
956
957 template<typename _ForwardIterator, typename _BinaryPredicate>
958 _ForwardIterator
959 __unique(_ForwardIterator __first, _ForwardIterator __last,
960 _BinaryPredicate __binary_pred)
961 {
962 // Skip the beginning, if already unique.
963 __first = std::__adjacent_find(__first, __last, __binary_pred);
964 if (__first == __last)
965 return __last;
966
967 // Do the real copy work.
968 _ForwardIterator __dest = __first;
969 ++__first;
970 while (++__first != __last)
971 if (!__binary_pred(__dest, __first))
972 *++__dest = _GLIBCXX_MOVE(*__first)std::move(*__first);
973 return ++__dest;
974 }
975
976 /**
977 * @brief Remove consecutive duplicate values from a sequence.
978 * @ingroup mutating_algorithms
979 * @param __first A forward iterator.
980 * @param __last A forward iterator.
981 * @return An iterator designating the end of the resulting sequence.
982 *
983 * Removes all but the first element from each group of consecutive
984 * values that compare equal.
985 * unique() is stable, so the relative order of elements that are
986 * not removed is unchanged.
987 * Elements between the end of the resulting sequence and @p __last
988 * are still present, but their value is unspecified.
989 */
990 template<typename _ForwardIterator>
991 inline _ForwardIterator
992 unique(_ForwardIterator __first, _ForwardIterator __last)
993 {
994 // concept requirements
995 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
996 _ForwardIterator>)
997 __glibcxx_function_requires(_EqualityComparableConcept<
998 typename iterator_traits<_ForwardIterator>::value_type>)
999 __glibcxx_requires_valid_range(__first, __last);
1000
1001 return std::__unique(__first, __last,
1002 __gnu_cxx::__ops::__iter_equal_to_iter());
1003 }
1004
1005 /**
1006 * @brief Remove consecutive values from a sequence using a predicate.
1007 * @ingroup mutating_algorithms
1008 * @param __first A forward iterator.
1009 * @param __last A forward iterator.
1010 * @param __binary_pred A binary predicate.
1011 * @return An iterator designating the end of the resulting sequence.
1012 *
1013 * Removes all but the first element from each group of consecutive
1014 * values for which @p __binary_pred returns true.
1015 * unique() is stable, so the relative order of elements that are
1016 * not removed is unchanged.
1017 * Elements between the end of the resulting sequence and @p __last
1018 * are still present, but their value is unspecified.
1019 */
1020 template<typename _ForwardIterator, typename _BinaryPredicate>
1021 inline _ForwardIterator
1022 unique(_ForwardIterator __first, _ForwardIterator __last,
1023 _BinaryPredicate __binary_pred)
1024 {
1025 // concept requirements
1026 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1027 _ForwardIterator>)
1028 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1029 typename iterator_traits<_ForwardIterator>::value_type,
1030 typename iterator_traits<_ForwardIterator>::value_type>)
1031 __glibcxx_requires_valid_range(__first, __last);
1032
1033 return std::__unique(__first, __last,
1034 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred));
1035 }
1036
1037 /**
1038 * This is an uglified
1039 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1040 * _BinaryPredicate)
1041 * overloaded for forward iterators and output iterator as result.
1042 */
1043 template<typename _ForwardIterator, typename _OutputIterator,
1044 typename _BinaryPredicate>
1045 _OutputIterator
1046 __unique_copy(_ForwardIterator __first, _ForwardIterator __last,
1047 _OutputIterator __result, _BinaryPredicate __binary_pred,
1048 forward_iterator_tag, output_iterator_tag)
1049 {
1050 // concept requirements -- iterators already checked
1051 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1052 typename iterator_traits<_ForwardIterator>::value_type,
1053 typename iterator_traits<_ForwardIterator>::value_type>)
1054
1055 _ForwardIterator __next = __first;
1056 *__result = *__first;
1057 while (++__next != __last)
1058 if (!__binary_pred(__first, __next))
1059 {
1060 __first = __next;
1061 *++__result = *__first;
1062 }
1063 return ++__result;
1064 }
1065
1066 /**
1067 * This is an uglified
1068 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1069 * _BinaryPredicate)
1070 * overloaded for input iterators and output iterator as result.
1071 */
1072 template<typename _InputIterator, typename _OutputIterator,
1073 typename _BinaryPredicate>
1074 _OutputIterator
1075 __unique_copy(_InputIterator __first, _InputIterator __last,
1076 _OutputIterator __result, _BinaryPredicate __binary_pred,
1077 input_iterator_tag, output_iterator_tag)
1078 {
1079 // concept requirements -- iterators already checked
1080 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1081 typename iterator_traits<_InputIterator>::value_type,
1082 typename iterator_traits<_InputIterator>::value_type>)
1083
1084 typename iterator_traits<_InputIterator>::value_type __value = *__first;
1085 __decltype(__gnu_cxx::__ops::__iter_comp_val(__binary_pred))
1086 __rebound_pred
1087 = __gnu_cxx::__ops::__iter_comp_val(__binary_pred);
1088 *__result = __value;
1089 while (++__first != __last)
1090 if (!__rebound_pred(__first, __value))
1091 {
1092 __value = *__first;
1093 *++__result = __value;
1094 }
1095 return ++__result;
1096 }
1097
1098 /**
1099 * This is an uglified
1100 * unique_copy(_InputIterator, _InputIterator, _OutputIterator,
1101 * _BinaryPredicate)
1102 * overloaded for input iterators and forward iterator as result.
1103 */
1104 template<typename _InputIterator, typename _ForwardIterator,
1105 typename _BinaryPredicate>
1106 _ForwardIterator
1107 __unique_copy(_InputIterator __first, _InputIterator __last,
1108 _ForwardIterator __result, _BinaryPredicate __binary_pred,
1109 input_iterator_tag, forward_iterator_tag)
1110 {
1111 // concept requirements -- iterators already checked
1112 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
1113 typename iterator_traits<_ForwardIterator>::value_type,
1114 typename iterator_traits<_InputIterator>::value_type>)
1115 *__result = *__first;
1116 while (++__first != __last)
1117 if (!__binary_pred(__result, __first))
1118 *++__result = *__first;
1119 return ++__result;
1120 }
1121
1122 /**
1123 * This is an uglified reverse(_BidirectionalIterator,
1124 * _BidirectionalIterator)
1125 * overloaded for bidirectional iterators.
1126 */
1127 template<typename _BidirectionalIterator>
1128 void
1129 __reverse(_BidirectionalIterator __first, _BidirectionalIterator __last,
1130 bidirectional_iterator_tag)
1131 {
1132 while (true)
1133 if (__first == __last || __first == --__last)
1134 return;
1135 else
1136 {
1137 std::iter_swap(__first, __last);
1138 ++__first;
1139 }
1140 }
1141
1142 /**
1143 * This is an uglified reverse(_BidirectionalIterator,
1144 * _BidirectionalIterator)
1145 * overloaded for random access iterators.
1146 */
1147 template<typename _RandomAccessIterator>
1148 void
1149 __reverse(_RandomAccessIterator __first, _RandomAccessIterator __last,
1150 random_access_iterator_tag)
1151 {
1152 if (__first == __last)
1153 return;
1154 --__last;
1155 while (__first < __last)
1156 {
1157 std::iter_swap(__first, __last);
1158 ++__first;
1159 --__last;
1160 }
1161 }
1162
1163 /**
1164 * @brief Reverse a sequence.
1165 * @ingroup mutating_algorithms
1166 * @param __first A bidirectional iterator.
1167 * @param __last A bidirectional iterator.
1168 * @return reverse() returns no value.
1169 *
1170 * Reverses the order of the elements in the range @p [__first,__last),
1171 * so that the first element becomes the last etc.
1172 * For every @c i such that @p 0<=i<=(__last-__first)/2), @p reverse()
1173 * swaps @p *(__first+i) and @p *(__last-(i+1))
1174 */
1175 template<typename _BidirectionalIterator>
1176 inline void
1177 reverse(_BidirectionalIterator __first, _BidirectionalIterator __last)
1178 {
1179 // concept requirements
1180 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1181 _BidirectionalIterator>)
1182 __glibcxx_requires_valid_range(__first, __last);
1183 std::__reverse(__first, __last, std::__iterator_category(__first));
1184 }
1185
1186 /**
1187 * @brief Copy a sequence, reversing its elements.
1188 * @ingroup mutating_algorithms
1189 * @param __first A bidirectional iterator.
1190 * @param __last A bidirectional iterator.
1191 * @param __result An output iterator.
1192 * @return An iterator designating the end of the resulting sequence.
1193 *
1194 * Copies the elements in the range @p [__first,__last) to the
1195 * range @p [__result,__result+(__last-__first)) such that the
1196 * order of the elements is reversed. For every @c i such that @p
1197 * 0<=i<=(__last-__first), @p reverse_copy() performs the
1198 * assignment @p *(__result+(__last-__first)-1-i) = *(__first+i).
1199 * The ranges @p [__first,__last) and @p
1200 * [__result,__result+(__last-__first)) must not overlap.
1201 */
1202 template<typename _BidirectionalIterator, typename _OutputIterator>
1203 _OutputIterator
1204 reverse_copy(_BidirectionalIterator __first, _BidirectionalIterator __last,
1205 _OutputIterator __result)
1206 {
1207 // concept requirements
1208 __glibcxx_function_requires(_BidirectionalIteratorConcept<
1209 _BidirectionalIterator>)
1210 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1211 typename iterator_traits<_BidirectionalIterator>::value_type>)
1212 __glibcxx_requires_valid_range(__first, __last);
1213
1214 while (__first != __last)
1215 {
1216 --__last;
1217 *__result = *__last;
1218 ++__result;
1219 }
1220 return __result;
1221 }
1222
1223 /**
1224 * This is a helper function for the rotate algorithm specialized on RAIs.
1225 * It returns the greatest common divisor of two integer values.
1226 */
1227 template<typename _EuclideanRingElement>
1228 _EuclideanRingElement
1229 __gcd(_EuclideanRingElement __m, _EuclideanRingElement __n)
1230 {
1231 while (__n != 0)
1232 {
1233 _EuclideanRingElement __t = __m % __n;
1234 __m = __n;
1235 __n = __t;
1236 }
1237 return __m;
1238 }
1239
1240 inline namespace _V2
1241 {
1242
1243 /// This is a helper function for the rotate algorithm.
1244 template<typename _ForwardIterator>
1245 _ForwardIterator
1246 __rotate(_ForwardIterator __first,
1247 _ForwardIterator __middle,
1248 _ForwardIterator __last,
1249 forward_iterator_tag)
1250 {
1251 if (__first == __middle)
1252 return __last;
1253 else if (__last == __middle)
1254 return __first;
1255
1256 _ForwardIterator __first2 = __middle;
1257 do
1258 {
1259 std::iter_swap(__first, __first2);
1260 ++__first;
1261 ++__first2;
1262 if (__first == __middle)
1263 __middle = __first2;
1264 }
1265 while (__first2 != __last);
1266
1267 _ForwardIterator __ret = __first;
1268
1269 __first2 = __middle;
1270
1271 while (__first2 != __last)
1272 {
1273 std::iter_swap(__first, __first2);
1274 ++__first;
1275 ++__first2;
1276 if (__first == __middle)
1277 __middle = __first2;
1278 else if (__first2 == __last)
1279 __first2 = __middle;
1280 }
1281 return __ret;
1282 }
1283
1284 /// This is a helper function for the rotate algorithm.
1285 template<typename _BidirectionalIterator>
1286 _BidirectionalIterator
1287 __rotate(_BidirectionalIterator __first,
1288 _BidirectionalIterator __middle,
1289 _BidirectionalIterator __last,
1290 bidirectional_iterator_tag)
1291 {
1292 // concept requirements
1293 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
1294 _BidirectionalIterator>)
1295
1296 if (__first == __middle)
1297 return __last;
1298 else if (__last == __middle)
1299 return __first;
1300
1301 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1302 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1303
1304 while (__first != __middle && __middle != __last)
1305 {
1306 std::iter_swap(__first, --__last);
1307 ++__first;
1308 }
1309
1310 if (__first == __middle)
1311 {
1312 std::__reverse(__middle, __last, bidirectional_iterator_tag());
1313 return __last;
1314 }
1315 else
1316 {
1317 std::__reverse(__first, __middle, bidirectional_iterator_tag());
1318 return __first;
1319 }
1320 }
1321
1322 /// This is a helper function for the rotate algorithm.
1323 template<typename _RandomAccessIterator>
1324 _RandomAccessIterator
1325 __rotate(_RandomAccessIterator __first,
1326 _RandomAccessIterator __middle,
1327 _RandomAccessIterator __last,
1328 random_access_iterator_tag)
1329 {
1330 // concept requirements
1331 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1332 _RandomAccessIterator>)
1333
1334 if (__first == __middle)
1335 return __last;
1336 else if (__last == __middle)
1337 return __first;
1338
1339 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
1340 _Distance;
1341 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1342 _ValueType;
1343
1344 _Distance __n = __last - __first;
1345 _Distance __k = __middle - __first;
1346
1347 if (__k == __n - __k)
1348 {
1349 std::swap_ranges(__first, __middle, __middle);
1350 return __middle;
1351 }
1352
1353 _RandomAccessIterator __p = __first;
1354 _RandomAccessIterator __ret = __first + (__last - __middle);
1355
1356 for (;;)
1357 {
1358 if (__k < __n - __k)
1359 {
1360 if (__is_pod(_ValueType) && __k == 1)
1361 {
1362 _ValueType __t = _GLIBCXX_MOVE(*__p)std::move(*__p);
1363 _GLIBCXX_MOVE3(__p + 1, __p + __n, __p)std::move(__p + 1, __p + __n, __p);
1364 *(__p + __n - 1) = _GLIBCXX_MOVE(__t)std::move(__t);
1365 return __ret;
1366 }
1367 _RandomAccessIterator __q = __p + __k;
1368 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1369 {
1370 std::iter_swap(__p, __q);
1371 ++__p;
1372 ++__q;
1373 }
1374 __n %= __k;
1375 if (__n == 0)
1376 return __ret;
1377 std::swap(__n, __k);
1378 __k = __n - __k;
1379 }
1380 else
1381 {
1382 __k = __n - __k;
1383 if (__is_pod(_ValueType) && __k == 1)
1384 {
1385 _ValueType __t = _GLIBCXX_MOVE(*(__p + __n - 1))std::move(*(__p + __n - 1));
1386 _GLIBCXX_MOVE_BACKWARD3(__p, __p + __n - 1, __p + __n)std::move_backward(__p, __p + __n - 1, __p + __n);
1387 *__p = _GLIBCXX_MOVE(__t)std::move(__t);
1388 return __ret;
1389 }
1390 _RandomAccessIterator __q = __p + __n;
1391 __p = __q - __k;
1392 for (_Distance __i = 0; __i < __n - __k; ++ __i)
1393 {
1394 --__p;
1395 --__q;
1396 std::iter_swap(__p, __q);
1397 }
1398 __n %= __k;
1399 if (__n == 0)
1400 return __ret;
1401 std::swap(__n, __k);
1402 }
1403 }
1404 }
1405
1406 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1407 // DR 488. rotate throws away useful information
1408 /**
1409 * @brief Rotate the elements of a sequence.
1410 * @ingroup mutating_algorithms
1411 * @param __first A forward iterator.
1412 * @param __middle A forward iterator.
1413 * @param __last A forward iterator.
1414 * @return first + (last - middle).
1415 *
1416 * Rotates the elements of the range @p [__first,__last) by
1417 * @p (__middle - __first) positions so that the element at @p __middle
1418 * is moved to @p __first, the element at @p __middle+1 is moved to
1419 * @p __first+1 and so on for each element in the range
1420 * @p [__first,__last).
1421 *
1422 * This effectively swaps the ranges @p [__first,__middle) and
1423 * @p [__middle,__last).
1424 *
1425 * Performs
1426 * @p *(__first+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1427 * for each @p n in the range @p [0,__last-__first).
1428 */
1429 template<typename _ForwardIterator>
1430 inline _ForwardIterator
1431 rotate(_ForwardIterator __first, _ForwardIterator __middle,
1432 _ForwardIterator __last)
1433 {
1434 // concept requirements
1435 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1436 _ForwardIterator>)
1437 __glibcxx_requires_valid_range(__first, __middle);
1438 __glibcxx_requires_valid_range(__middle, __last);
1439
1440 return std::__rotate(__first, __middle, __last,
1441 std::__iterator_category(__first));
1442 }
1443
1444 } // namespace _V2
1445
1446 /**
1447 * @brief Copy a sequence, rotating its elements.
1448 * @ingroup mutating_algorithms
1449 * @param __first A forward iterator.
1450 * @param __middle A forward iterator.
1451 * @param __last A forward iterator.
1452 * @param __result An output iterator.
1453 * @return An iterator designating the end of the resulting sequence.
1454 *
1455 * Copies the elements of the range @p [__first,__last) to the
1456 * range beginning at @result, rotating the copied elements by
1457 * @p (__middle-__first) positions so that the element at @p __middle
1458 * is moved to @p __result, the element at @p __middle+1 is moved
1459 * to @p __result+1 and so on for each element in the range @p
1460 * [__first,__last).
1461 *
1462 * Performs
1463 * @p *(__result+(n+(__last-__middle))%(__last-__first))=*(__first+n)
1464 * for each @p n in the range @p [0,__last-__first).
1465 */
1466 template<typename _ForwardIterator, typename _OutputIterator>
1467 inline _OutputIterator
1468 rotate_copy(_ForwardIterator __first, _ForwardIterator __middle,
1469 _ForwardIterator __last, _OutputIterator __result)
1470 {
1471 // concept requirements
1472 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
1473 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
1474 typename iterator_traits<_ForwardIterator>::value_type>)
1475 __glibcxx_requires_valid_range(__first, __middle);
1476 __glibcxx_requires_valid_range(__middle, __last);
1477
1478 return std::copy(__first, __middle,
1479 std::copy(__middle, __last, __result));
1480 }
1481
1482 /// This is a helper function...
1483 template<typename _ForwardIterator, typename _Predicate>
1484 _ForwardIterator
1485 __partition(_ForwardIterator __first, _ForwardIterator __last,
1486 _Predicate __pred, forward_iterator_tag)
1487 {
1488 if (__first == __last)
1489 return __first;
1490
1491 while (__pred(*__first))
1492 if (++__first == __last)
1493 return __first;
1494
1495 _ForwardIterator __next = __first;
1496
1497 while (++__next != __last)
1498 if (__pred(*__next))
1499 {
1500 std::iter_swap(__first, __next);
1501 ++__first;
1502 }
1503
1504 return __first;
1505 }
1506
1507 /// This is a helper function...
1508 template<typename _BidirectionalIterator, typename _Predicate>
1509 _BidirectionalIterator
1510 __partition(_BidirectionalIterator __first, _BidirectionalIterator __last,
1511 _Predicate __pred, bidirectional_iterator_tag)
1512 {
1513 while (true)
1514 {
1515 while (true)
1516 if (__first == __last)
1517 return __first;
1518 else if (__pred(*__first))
1519 ++__first;
1520 else
1521 break;
1522 --__last;
1523 while (true)
1524 if (__first == __last)
1525 return __first;
1526 else if (!bool(__pred(*__last)))
1527 --__last;
1528 else
1529 break;
1530 std::iter_swap(__first, __last);
1531 ++__first;
1532 }
1533 }
1534
1535 // partition
1536
1537 /// This is a helper function...
1538 /// Requires __first != __last and !__pred(__first)
1539 /// and __len == distance(__first, __last).
1540 ///
1541 /// !__pred(__first) allows us to guarantee that we don't
1542 /// move-assign an element onto itself.
1543 template<typename _ForwardIterator, typename _Pointer, typename _Predicate,
1544 typename _Distance>
1545 _ForwardIterator
1546 __stable_partition_adaptive(_ForwardIterator __first,
1547 _ForwardIterator __last,
1548 _Predicate __pred, _Distance __len,
1549 _Pointer __buffer,
1550 _Distance __buffer_size)
1551 {
1552 if (__len == 1)
1553 return __first;
1554
1555 if (__len <= __buffer_size)
1556 {
1557 _ForwardIterator __result1 = __first;
1558 _Pointer __result2 = __buffer;
1559
1560 // The precondition guarantees that !__pred(__first), so
1561 // move that element to the buffer before starting the loop.
1562 // This ensures that we only call __pred once per element.
1563 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1564 ++__result2;
1565 ++__first;
1566 for (; __first != __last; ++__first)
1567 if (__pred(__first))
1568 {
1569 *__result1 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1570 ++__result1;
1571 }
1572 else
1573 {
1574 *__result2 = _GLIBCXX_MOVE(*__first)std::move(*__first);
1575 ++__result2;
1576 }
1577
1578 _GLIBCXX_MOVE3(__buffer, __result2, __result1)std::move(__buffer, __result2, __result1);
1579 return __result1;
1580 }
1581
1582 _ForwardIterator __middle = __first;
1583 std::advance(__middle, __len / 2);
1584 _ForwardIterator __left_split =
1585 std::__stable_partition_adaptive(__first, __middle, __pred,
1586 __len / 2, __buffer,
1587 __buffer_size);
1588
1589 // Advance past true-predicate values to satisfy this
1590 // function's preconditions.
1591 _Distance __right_len = __len - __len / 2;
1592 _ForwardIterator __right_split =
1593 std::__find_if_not_n(__middle, __right_len, __pred);
1594
1595 if (__right_len)
1596 __right_split =
1597 std::__stable_partition_adaptive(__right_split, __last, __pred,
1598 __right_len,
1599 __buffer, __buffer_size);
1600
1601 std::rotate(__left_split, __middle, __right_split);
1602 std::advance(__left_split, std::distance(__middle, __right_split));
1603 return __left_split;
1604 }
1605
1606 template<typename _ForwardIterator, typename _Predicate>
1607 _ForwardIterator
1608 __stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1609 _Predicate __pred)
1610 {
1611 __first = std::__find_if_not(__first, __last, __pred);
1612
1613 if (__first == __last)
1614 return __first;
1615
1616 typedef typename iterator_traits<_ForwardIterator>::value_type
1617 _ValueType;
1618 typedef typename iterator_traits<_ForwardIterator>::difference_type
1619 _DistanceType;
1620
1621 _Temporary_buffer<_ForwardIterator, _ValueType> __buf(__first, __last);
1622 return
1623 std::__stable_partition_adaptive(__first, __last, __pred,
1624 _DistanceType(__buf.requested_size()),
1625 __buf.begin(),
1626 _DistanceType(__buf.size()));
1627 }
1628
1629 /**
1630 * @brief Move elements for which a predicate is true to the beginning
1631 * of a sequence, preserving relative ordering.
1632 * @ingroup mutating_algorithms
1633 * @param __first A forward iterator.
1634 * @param __last A forward iterator.
1635 * @param __pred A predicate functor.
1636 * @return An iterator @p middle such that @p __pred(i) is true for each
1637 * iterator @p i in the range @p [first,middle) and false for each @p i
1638 * in the range @p [middle,last).
1639 *
1640 * Performs the same function as @p partition() with the additional
1641 * guarantee that the relative ordering of elements in each group is
1642 * preserved, so any two elements @p x and @p y in the range
1643 * @p [__first,__last) such that @p __pred(x)==__pred(y) will have the same
1644 * relative ordering after calling @p stable_partition().
1645 */
1646 template<typename _ForwardIterator, typename _Predicate>
1647 inline _ForwardIterator
1648 stable_partition(_ForwardIterator __first, _ForwardIterator __last,
1649 _Predicate __pred)
1650 {
1651 // concept requirements
1652 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
1653 _ForwardIterator>)
1654 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
1655 typename iterator_traits<_ForwardIterator>::value_type>)
1656 __glibcxx_requires_valid_range(__first, __last);
1657
1658 return std::__stable_partition(__first, __last,
1659 __gnu_cxx::__ops::__pred_iter(__pred));
1660 }
1661
1662 /// This is a helper function for the sort routines.
1663 template<typename _RandomAccessIterator, typename _Compare>
1664 void
1665 __heap_select(_RandomAccessIterator __first,
1666 _RandomAccessIterator __middle,
1667 _RandomAccessIterator __last, _Compare __comp)
1668 {
1669 std::__make_heap(__first, __middle, __comp);
1670 for (_RandomAccessIterator __i = __middle; __i < __last; ++__i)
1671 if (__comp(__i, __first))
1672 std::__pop_heap(__first, __middle, __i, __comp);
1673 }
1674
1675 // partial_sort
1676
1677 template<typename _InputIterator, typename _RandomAccessIterator,
1678 typename _Compare>
1679 _RandomAccessIterator
1680 __partial_sort_copy(_InputIterator __first, _InputIterator __last,
1681 _RandomAccessIterator __result_first,
1682 _RandomAccessIterator __result_last,
1683 _Compare __comp)
1684 {
1685 typedef typename iterator_traits<_InputIterator>::value_type
1686 _InputValueType;
1687 typedef iterator_traits<_RandomAccessIterator> _RItTraits;
1688 typedef typename _RItTraits::difference_type _DistanceType;
1689
1690 if (__result_first == __result_last)
1691 return __result_last;
1692 _RandomAccessIterator __result_real_last = __result_first;
1693 while (__first != __last && __result_real_last != __result_last)
1694 {
1695 *__result_real_last = *__first;
1696 ++__result_real_last;
1697 ++__first;
1698 }
1699
1700 std::__make_heap(__result_first, __result_real_last, __comp);
1701 while (__first != __last)
1702 {
1703 if (__comp(__first, __result_first))
1704 std::__adjust_heap(__result_first, _DistanceType(0),
1705 _DistanceType(__result_real_last
1706 - __result_first),
1707 _InputValueType(*__first), __comp);
1708 ++__first;
1709 }
1710 std::__sort_heap(__result_first, __result_real_last, __comp);
1711 return __result_real_last;
1712 }
1713
1714 /**
1715 * @brief Copy the smallest elements of a sequence.
1716 * @ingroup sorting_algorithms
1717 * @param __first An iterator.
1718 * @param __last Another iterator.
1719 * @param __result_first A random-access iterator.
1720 * @param __result_last Another random-access iterator.
1721 * @return An iterator indicating the end of the resulting sequence.
1722 *
1723 * Copies and sorts the smallest N values from the range @p [__first,__last)
1724 * to the range beginning at @p __result_first, where the number of
1725 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1726 * @p (__result_last-__result_first).
1727 * After the sort if @e i and @e j are iterators in the range
1728 * @p [__result_first,__result_first+N) such that i precedes j then
1729 * *j<*i is false.
1730 * The value returned is @p __result_first+N.
1731 */
1732 template<typename _InputIterator, typename _RandomAccessIterator>
1733 inline _RandomAccessIterator
1734 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1735 _RandomAccessIterator __result_first,
1736 _RandomAccessIterator __result_last)
1737 {
1738#ifdef _GLIBCXX_CONCEPT_CHECKS
1739 typedef typename iterator_traits<_InputIterator>::value_type
1740 _InputValueType;
1741 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1742 _OutputValueType;
1743#endif
1744
1745 // concept requirements
1746 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1747 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1748 _OutputValueType>)
1749 __glibcxx_function_requires(_LessThanOpConcept<_InputValueType,
1750 _OutputValueType>)
1751 __glibcxx_function_requires(_LessThanComparableConcept<_OutputValueType>)
1752 __glibcxx_requires_valid_range(__first, __last);
1753 __glibcxx_requires_irreflexive(__first, __last);
1754 __glibcxx_requires_valid_range(__result_first, __result_last);
1755
1756 return std::__partial_sort_copy(__first, __last,
1757 __result_first, __result_last,
1758 __gnu_cxx::__ops::__iter_less_iter());
1759 }
1760
1761 /**
1762 * @brief Copy the smallest elements of a sequence using a predicate for
1763 * comparison.
1764 * @ingroup sorting_algorithms
1765 * @param __first An input iterator.
1766 * @param __last Another input iterator.
1767 * @param __result_first A random-access iterator.
1768 * @param __result_last Another random-access iterator.
1769 * @param __comp A comparison functor.
1770 * @return An iterator indicating the end of the resulting sequence.
1771 *
1772 * Copies and sorts the smallest N values from the range @p [__first,__last)
1773 * to the range beginning at @p result_first, where the number of
1774 * elements to be copied, @p N, is the smaller of @p (__last-__first) and
1775 * @p (__result_last-__result_first).
1776 * After the sort if @e i and @e j are iterators in the range
1777 * @p [__result_first,__result_first+N) such that i precedes j then
1778 * @p __comp(*j,*i) is false.
1779 * The value returned is @p __result_first+N.
1780 */
1781 template<typename _InputIterator, typename _RandomAccessIterator,
1782 typename _Compare>
1783 inline _RandomAccessIterator
1784 partial_sort_copy(_InputIterator __first, _InputIterator __last,
1785 _RandomAccessIterator __result_first,
1786 _RandomAccessIterator __result_last,
1787 _Compare __comp)
1788 {
1789#ifdef _GLIBCXX_CONCEPT_CHECKS
1790 typedef typename iterator_traits<_InputIterator>::value_type
1791 _InputValueType;
1792 typedef typename iterator_traits<_RandomAccessIterator>::value_type
1793 _OutputValueType;
1794#endif
1795
1796 // concept requirements
1797 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
1798 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
1799 _RandomAccessIterator>)
1800 __glibcxx_function_requires(_ConvertibleConcept<_InputValueType,
1801 _OutputValueType>)
1802 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1803 _InputValueType, _OutputValueType>)
1804 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
1805 _OutputValueType, _OutputValueType>)
1806 __glibcxx_requires_valid_range(__first, __last);
1807 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
1808 __glibcxx_requires_valid_range(__result_first, __result_last);
1809
1810 return std::__partial_sort_copy(__first, __last,
1811 __result_first, __result_last,
1812 __gnu_cxx::__ops::__iter_comp_iter(__comp));
1813 }
1814
1815 /// This is a helper function for the sort routine.
1816 template<typename _RandomAccessIterator, typename _Compare>
1817 void
1818 __unguarded_linear_insert(_RandomAccessIterator __last,
1819 _Compare __comp)
1820 {
1821 typename iterator_traits<_RandomAccessIterator>::value_type
1822 __val = _GLIBCXX_MOVE(*__last)std::move(*__last);
1823 _RandomAccessIterator __next = __last;
1824 --__next;
1825 while (__comp(__val, __next))
1826 {
1827 *__last = _GLIBCXX_MOVE(*__next)std::move(*__next);
1828 __last = __next;
1829 --__next;
1830 }
1831 *__last = _GLIBCXX_MOVE(__val)std::move(__val);
1832 }
1833
1834 /// This is a helper function for the sort routine.
1835 template<typename _RandomAccessIterator, typename _Compare>
1836 void
1837 __insertion_sort(_RandomAccessIterator __first,
1838 _RandomAccessIterator __last, _Compare __comp)
1839 {
1840 if (__first == __last) return;
1841
1842 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
1843 {
1844 if (__comp(__i, __first))
1845 {
1846 typename iterator_traits<_RandomAccessIterator>::value_type
1847 __val = _GLIBCXX_MOVE(*__i)std::move(*__i);
1848 _GLIBCXX_MOVE_BACKWARD3(__first, __i, __i + 1)std::move_backward(__first, __i, __i + 1);
1849 *__first = _GLIBCXX_MOVE(__val)std::move(__val);
1850 }
1851 else
1852 std::__unguarded_linear_insert(__i,
1853 __gnu_cxx::__ops::__val_comp_iter(__comp));
1854 }
1855 }
1856
1857 /// This is a helper function for the sort routine.
1858 template<typename _RandomAccessIterator, typename _Compare>
1859 inline void
1860 __unguarded_insertion_sort(_RandomAccessIterator __first,
1861 _RandomAccessIterator __last, _Compare __comp)
1862 {
1863 for (_RandomAccessIterator __i = __first; __i != __last; ++__i)
1864 std::__unguarded_linear_insert(__i,
1865 __gnu_cxx::__ops::__val_comp_iter(__comp));
1866 }
1867
1868 /**
1869 * @doctodo
1870 * This controls some aspect of the sort routines.
1871 */
1872 enum { _S_threshold = 16 };
1873
1874 /// This is a helper function for the sort routine.
1875 template<typename _RandomAccessIterator, typename _Compare>
1876 void
1877 __final_insertion_sort(_RandomAccessIterator __first,
1878 _RandomAccessIterator __last, _Compare __comp)
1879 {
1880 if (__last - __first > int(_S_threshold))
1881 {
1882 std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
1883 std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
1884 __comp);
1885 }
1886 else
1887 std::__insertion_sort(__first, __last, __comp);
1888 }
1889
1890 /// This is a helper function...
1891 template<typename _RandomAccessIterator, typename _Compare>
1892 _RandomAccessIterator
1893 __unguarded_partition(_RandomAccessIterator __first,
1894 _RandomAccessIterator __last,
1895 _RandomAccessIterator __pivot, _Compare __comp)
1896 {
1897 while (true)
1898 {
1899 while (__comp(__first, __pivot))
1900 ++__first;
1901 --__last;
1902 while (__comp(__pivot, __last))
1903 --__last;
1904 if (!(__first < __last))
1905 return __first;
1906 std::iter_swap(__first, __last);
1907 ++__first;
1908 }
1909 }
1910
1911 /// This is a helper function...
1912 template<typename _RandomAccessIterator, typename _Compare>
1913 inline _RandomAccessIterator
1914 __unguarded_partition_pivot(_RandomAccessIterator __first,
1915 _RandomAccessIterator __last, _Compare __comp)
1916 {
1917 _RandomAccessIterator __mid = __first + (__last - __first) / 2;
1918 std::__move_median_to_first(__first, __first + 1, __mid, __last - 1,
1919 __comp);
1920 return std::__unguarded_partition(__first + 1, __last, __first, __comp);
1921 }
1922
1923 template<typename _RandomAccessIterator, typename _Compare>
1924 inline void
1925 __partial_sort(_RandomAccessIterator __first,
1926 _RandomAccessIterator __middle,
1927 _RandomAccessIterator __last,
1928 _Compare __comp)
1929 {
1930 std::__heap_select(__first, __middle, __last, __comp);
1931 std::__sort_heap(__first, __middle, __comp);
1932 }
1933
1934 /// This is a helper function for the sort routine.
1935 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1936 void
1937 __introsort_loop(_RandomAccessIterator __first,
1938 _RandomAccessIterator __last,
1939 _Size __depth_limit, _Compare __comp)
1940 {
1941 while (__last - __first > int(_S_threshold))
1942 {
1943 if (__depth_limit == 0)
1944 {
1945 std::__partial_sort(__first, __last, __last, __comp);
1946 return;
1947 }
1948 --__depth_limit;
1949 _RandomAccessIterator __cut =
1950 std::__unguarded_partition_pivot(__first, __last, __comp);
1951 std::__introsort_loop(__cut, __last, __depth_limit, __comp);
1952 __last = __cut;
1953 }
1954 }
1955
1956 // sort
1957
1958 template<typename _RandomAccessIterator, typename _Compare>
1959 inline void
1960 __sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
1961 _Compare __comp)
1962 {
1963 if (__first != __last)
1964 {
1965 std::__introsort_loop(__first, __last,
1966 std::__lg(__last - __first) * 2,
1967 __comp);
1968 std::__final_insertion_sort(__first, __last, __comp);
1969 }
1970 }
1971
1972 template<typename _RandomAccessIterator, typename _Size, typename _Compare>
1973 void
1974 __introselect(_RandomAccessIterator __first, _RandomAccessIterator __nth,
1975 _RandomAccessIterator __last, _Size __depth_limit,
1976 _Compare __comp)
1977 {
1978 while (__last - __first > 3)
1979 {
1980 if (__depth_limit == 0)
1981 {
1982 std::__heap_select(__first, __nth + 1, __last, __comp);
1983 // Place the nth largest element in its final position.
1984 std::iter_swap(__first, __nth);
1985 return;
1986 }
1987 --__depth_limit;
1988 _RandomAccessIterator __cut =
1989 std::__unguarded_partition_pivot(__first, __last, __comp);
1990 if (__cut <= __nth)
1991 __first = __cut;
1992 else
1993 __last = __cut;
1994 }
1995 std::__insertion_sort(__first, __last, __comp);
1996 }
1997
1998 // nth_element
1999
2000 // lower_bound moved to stl_algobase.h
2001
2002 /**
2003 * @brief Finds the first position in which @p __val could be inserted
2004 * without changing the ordering.
2005 * @ingroup binary_search_algorithms
2006 * @param __first An iterator.
2007 * @param __last Another iterator.
2008 * @param __val The search term.
2009 * @param __comp A functor to use for comparisons.
2010 * @return An iterator pointing to the first element <em>not less
2011 * than</em> @p __val, or end() if every element is less
2012 * than @p __val.
2013 * @ingroup binary_search_algorithms
2014 *
2015 * The comparison function should have the same effects on ordering as
2016 * the function used for the initial sort.
2017 */
2018 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2019 inline _ForwardIterator
2020 lower_bound(_ForwardIterator __first, _ForwardIterator __last,
2021 const _Tp& __val, _Compare __comp)
2022 {
2023 // concept requirements
2024 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2025 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2026 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2027 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2028 __val, __comp);
2029
2030 return std::__lower_bound(__first, __last, __val,
2031 __gnu_cxx::__ops::__iter_comp_val(__comp));
2032 }
2033
2034 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2035 _ForwardIterator
2036 __upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2037 const _Tp& __val, _Compare __comp)
2038 {
2039 typedef typename iterator_traits<_ForwardIterator>::difference_type
2040 _DistanceType;
2041
2042 _DistanceType __len = std::distance(__first, __last);
2043
2044 while (__len > 0)
2045 {
2046 _DistanceType __half = __len >> 1;
2047 _ForwardIterator __middle = __first;
2048 std::advance(__middle, __half);
2049 if (__comp(__val, __middle))
2050 __len = __half;
2051 else
2052 {
2053 __first = __middle;
2054 ++__first;
2055 __len = __len - __half - 1;
2056 }
2057 }
2058 return __first;
2059 }
2060
2061 /**
2062 * @brief Finds the last position in which @p __val could be inserted
2063 * without changing the ordering.
2064 * @ingroup binary_search_algorithms
2065 * @param __first An iterator.
2066 * @param __last Another iterator.
2067 * @param __val The search term.
2068 * @return An iterator pointing to the first element greater than @p __val,
2069 * or end() if no elements are greater than @p __val.
2070 * @ingroup binary_search_algorithms
2071 */
2072 template<typename _ForwardIterator, typename _Tp>
2073 inline _ForwardIterator
2074 upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2075 const _Tp& __val)
2076 {
2077 // concept requirements
2078 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2079 __glibcxx_function_requires(_LessThanOpConcept<
2080 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2081 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2082
2083 return std::__upper_bound(__first, __last, __val,
2084 __gnu_cxx::__ops::__val_less_iter());
2085 }
2086
2087 /**
2088 * @brief Finds the last position in which @p __val could be inserted
2089 * without changing the ordering.
2090 * @ingroup binary_search_algorithms
2091 * @param __first An iterator.
2092 * @param __last Another iterator.
2093 * @param __val The search term.
2094 * @param __comp A functor to use for comparisons.
2095 * @return An iterator pointing to the first element greater than @p __val,
2096 * or end() if no elements are greater than @p __val.
2097 * @ingroup binary_search_algorithms
2098 *
2099 * The comparison function should have the same effects on ordering as
2100 * the function used for the initial sort.
2101 */
2102 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2103 inline _ForwardIterator
2104 upper_bound(_ForwardIterator __first, _ForwardIterator __last,
2105 const _Tp& __val, _Compare __comp)
2106 {
2107 // concept requirements
2108 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2109 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2110 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2111 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2112 __val, __comp);
2113
2114 return std::__upper_bound(__first, __last, __val,
2115 __gnu_cxx::__ops::__val_comp_iter(__comp));
2116 }
2117
2118 template<typename _ForwardIterator, typename _Tp,
2119 typename _CompareItTp, typename _CompareTpIt>
2120 pair<_ForwardIterator, _ForwardIterator>
2121 __equal_range(_ForwardIterator __first, _ForwardIterator __last,
2122 const _Tp& __val,
2123 _CompareItTp __comp_it_val, _CompareTpIt __comp_val_it)
2124 {
2125 typedef typename iterator_traits<_ForwardIterator>::difference_type
2126 _DistanceType;
2127
2128 _DistanceType __len = std::distance(__first, __last);
2129
2130 while (__len > 0)
2131 {
2132 _DistanceType __half = __len >> 1;
2133 _ForwardIterator __middle = __first;
2134 std::advance(__middle, __half);
2135 if (__comp_it_val(__middle, __val))
2136 {
2137 __first = __middle;
2138 ++__first;
2139 __len = __len - __half - 1;
2140 }
2141 else if (__comp_val_it(__val, __middle))
2142 __len = __half;
2143 else
2144 {
2145 _ForwardIterator __left
2146 = std::__lower_bound(__first, __middle, __val, __comp_it_val);
2147 std::advance(__first, __len);
2148 _ForwardIterator __right
2149 = std::__upper_bound(++__middle, __first, __val, __comp_val_it);
2150 return pair<_ForwardIterator, _ForwardIterator>(__left, __right);
2151 }
2152 }
2153 return pair<_ForwardIterator, _ForwardIterator>(__first, __first);
2154 }
2155
2156 /**
2157 * @brief Finds the largest subrange in which @p __val could be inserted
2158 * at any place in it without changing the ordering.
2159 * @ingroup binary_search_algorithms
2160 * @param __first An iterator.
2161 * @param __last Another iterator.
2162 * @param __val The search term.
2163 * @return An pair of iterators defining the subrange.
2164 * @ingroup binary_search_algorithms
2165 *
2166 * This is equivalent to
2167 * @code
2168 * std::make_pair(lower_bound(__first, __last, __val),
2169 * upper_bound(__first, __last, __val))
2170 * @endcode
2171 * but does not actually call those functions.
2172 */
2173 template<typename _ForwardIterator, typename _Tp>
2174 inline pair<_ForwardIterator, _ForwardIterator>
2175 equal_range(_ForwardIterator __first, _ForwardIterator __last,
2176 const _Tp& __val)
2177 {
2178 // concept requirements
2179 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2180 __glibcxx_function_requires(_LessThanOpConcept<
2181 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2182 __glibcxx_function_requires(_LessThanOpConcept<
2183 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2184 __glibcxx_requires_partitioned_lower(__first, __last, __val);
2185 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2186
2187 return std::__equal_range(__first, __last, __val,
2188 __gnu_cxx::__ops::__iter_less_val(),
2189 __gnu_cxx::__ops::__val_less_iter());
2190 }
2191
2192 /**
2193 * @brief Finds the largest subrange in which @p __val could be inserted
2194 * at any place in it without changing the ordering.
2195 * @param __first An iterator.
2196 * @param __last Another iterator.
2197 * @param __val The search term.
2198 * @param __comp A functor to use for comparisons.
2199 * @return An pair of iterators defining the subrange.
2200 * @ingroup binary_search_algorithms
2201 *
2202 * This is equivalent to
2203 * @code
2204 * std::make_pair(lower_bound(__first, __last, __val, __comp),
2205 * upper_bound(__first, __last, __val, __comp))
2206 * @endcode
2207 * but does not actually call those functions.
2208 */
2209 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2210 inline pair<_ForwardIterator, _ForwardIterator>
2211 equal_range(_ForwardIterator __first, _ForwardIterator __last,
2212 const _Tp& __val, _Compare __comp)
2213 {
2214 // concept requirements
2215 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2216 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2217 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
2218 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2219 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2220 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2221 __val, __comp);
2222 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2223 __val, __comp);
2224
2225 return std::__equal_range(__first, __last, __val,
2226 __gnu_cxx::__ops::__iter_comp_val(__comp),
2227 __gnu_cxx::__ops::__val_comp_iter(__comp));
2228 }
2229
2230 /**
2231 * @brief Determines whether an element exists in a range.
2232 * @ingroup binary_search_algorithms
2233 * @param __first An iterator.
2234 * @param __last Another iterator.
2235 * @param __val The search term.
2236 * @return True if @p __val (or its equivalent) is in [@p
2237 * __first,@p __last ].
2238 *
2239 * Note that this does not actually return an iterator to @p __val. For
2240 * that, use std::find or a container's specialized find member functions.
2241 */
2242 template<typename _ForwardIterator, typename _Tp>
2243 bool
2244 binary_search(_ForwardIterator __first, _ForwardIterator __last,
2245 const _Tp& __val)
2246 {
2247 // concept requirements
2248 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2249 __glibcxx_function_requires(_LessThanOpConcept<
2250 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2251 __glibcxx_requires_partitioned_lower(__first, __last, __val);
2252 __glibcxx_requires_partitioned_upper(__first, __last, __val);
2253
2254 _ForwardIterator __i
2255 = std::__lower_bound(__first, __last, __val,
2256 __gnu_cxx::__ops::__iter_less_val());
2257 return __i != __last && !(__val < *__i);
2258 }
2259
2260 /**
2261 * @brief Determines whether an element exists in a range.
2262 * @ingroup binary_search_algorithms
2263 * @param __first An iterator.
2264 * @param __last Another iterator.
2265 * @param __val The search term.
2266 * @param __comp A functor to use for comparisons.
2267 * @return True if @p __val (or its equivalent) is in @p [__first,__last].
2268 *
2269 * Note that this does not actually return an iterator to @p __val. For
2270 * that, use std::find or a container's specialized find member functions.
2271 *
2272 * The comparison function should have the same effects on ordering as
2273 * the function used for the initial sort.
2274 */
2275 template<typename _ForwardIterator, typename _Tp, typename _Compare>
2276 bool
2277 binary_search(_ForwardIterator __first, _ForwardIterator __last,
2278 const _Tp& __val, _Compare __comp)
2279 {
2280 // concept requirements
2281 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
2282 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2283 _Tp, typename iterator_traits<_ForwardIterator>::value_type>)
2284 __glibcxx_requires_partitioned_lower_pred(__first, __last,
2285 __val, __comp);
2286 __glibcxx_requires_partitioned_upper_pred(__first, __last,
2287 __val, __comp);
2288
2289 _ForwardIterator __i
2290 = std::__lower_bound(__first, __last, __val,
2291 __gnu_cxx::__ops::__iter_comp_val(__comp));
2292 return __i != __last && !bool(__comp(__val, *__i));
2293 }
2294
2295 // merge
2296
2297 /// This is a helper function for the __merge_adaptive routines.
2298 template<typename _InputIterator1, typename _InputIterator2,
2299 typename _OutputIterator, typename _Compare>
2300 void
2301 __move_merge_adaptive(_InputIterator1 __first1, _InputIterator1 __last1,
2302 _InputIterator2 __first2, _InputIterator2 __last2,
2303 _OutputIterator __result, _Compare __comp)
2304 {
2305 while (__first1 != __last1 && __first2 != __last2)
2306 {
2307 if (__comp(__first2, __first1))
2308 {
2309 *__result = _GLIBCXX_MOVE(*__first2)std::move(*__first2);
2310 ++__first2;
2311 }
2312 else
2313 {
2314 *__result = _GLIBCXX_MOVE(*__first1)std::move(*__first1);
2315 ++__first1;
2316 }
2317 ++__result;
2318 }
2319 if (__first1 != __last1)
2320 _GLIBCXX_MOVE3(__first1, __last1, __result)std::move(__first1, __last1, __result);
2321 }
2322
2323 /// This is a helper function for the __merge_adaptive routines.
2324 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
2325 typename _BidirectionalIterator3, typename _Compare>
2326 void
2327 __move_merge_adaptive_backward(_BidirectionalIterator1 __first1,
2328 _BidirectionalIterator1 __last1,
2329 _BidirectionalIterator2 __first2,
2330 _BidirectionalIterator2 __last2,
2331 _BidirectionalIterator3 __result,
2332 _Compare __comp)
2333 {
2334 if (__first1 == __last1)
2335 {
2336 _GLIBCXX_MOVE_BACKWARD3(__first2, __last2, __result)std::move_backward(__first2, __last2, __result);
2337 return;
2338 }
2339 else if (__first2 == __last2)
2340 return;
2341
2342 --__last1;
2343 --__last2;
2344 while (true)
2345 {
2346 if (__comp(__last2, __last1))
2347 {
2348 *--__result = _GLIBCXX_MOVE(*__last1)std::move(*__last1);
2349 if (__first1 == __last1)
2350 {
2351 _GLIBCXX_MOVE_BACKWARD3(__first2, ++__last2, __result)std::move_backward(__first2, ++__last2, __result);
2352 return;
2353 }
2354 --__last1;
2355 }
2356 else
2357 {
2358 *--__result = _GLIBCXX_MOVE(*__last2)std::move(*__last2);
2359 if (__first2 == __last2)
2360 return;
2361 --__last2;
2362 }
2363 }
2364 }
2365
2366 /// This is a helper function for the merge routines.
2367 template<typename _BidirectionalIterator1, typename _BidirectionalIterator2,
2368 typename _Distance>
2369 _BidirectionalIterator1
2370 __rotate_adaptive(_BidirectionalIterator1 __first,
2371 _BidirectionalIterator1 __middle,
2372 _BidirectionalIterator1 __last,
2373 _Distance __len1, _Distance __len2,
2374 _BidirectionalIterator2 __buffer,
2375 _Distance __buffer_size)
2376 {
2377 _BidirectionalIterator2 __buffer_end;
2378 if (__len1 > __len2 && __len2 <= __buffer_size)
2379 {
2380 if (__len2)
2381 {
2382 __buffer_end = _GLIBCXX_MOVE3(__middle, __last, __buffer)std::move(__middle, __last, __buffer);
2383 _GLIBCXX_MOVE_BACKWARD3(__first, __middle, __last)std::move_backward(__first, __middle, __last);
2384 return _GLIBCXX_MOVE3(__buffer, __buffer_end, __first)std::move(__buffer, __buffer_end, __first);
2385 }
2386 else
2387 return __first;
2388 }
2389 else if (__len1 <= __buffer_size)
2390 {
2391 if (__len1)
2392 {
2393 __buffer_end = _GLIBCXX_MOVE3(__first, __middle, __buffer)std::move(__first, __middle, __buffer);
2394 _GLIBCXX_MOVE3(__middle, __last, __first)std::move(__middle, __last, __first);
2395 return _GLIBCXX_MOVE_BACKWARD3(__buffer, __buffer_end, __last)std::move_backward(__buffer, __buffer_end, __last);
2396 }
2397 else
2398 return __last;
2399 }
2400 else
2401 {
2402 std::rotate(__first, __middle, __last);
2403 std::advance(__first, std::distance(__middle, __last));
2404 return __first;
2405 }
2406 }
2407
2408 /// This is a helper function for the merge routines.
2409 template<typename _BidirectionalIterator, typename _Distance,
2410 typename _Pointer, typename _Compare>
2411 void
2412 __merge_adaptive(_BidirectionalIterator __first,
2413 _BidirectionalIterator __middle,
2414 _BidirectionalIterator __last,
2415 _Distance __len1, _Distance __len2,
2416 _Pointer __buffer, _Distance __buffer_size,
2417 _Compare __comp)
2418 {
2419 if (__len1 <= __len2 && __len1 <= __buffer_size)
2420 {
2421 _Pointer __buffer_end = _GLIBCXX_MOVE3(__first, __middle, __buffer)std::move(__first, __middle, __buffer);
2422 std::__move_merge_adaptive(__buffer, __buffer_end, __middle, __last,
2423 __first, __comp);
2424 }
2425 else if (__len2 <= __buffer_size)
2426 {
2427 _Pointer __buffer_end = _GLIBCXX_MOVE3(__middle, __last, __buffer)std::move(__middle, __last, __buffer);
2428 std::__move_merge_adaptive_backward(__first, __middle, __buffer,
2429 __buffer_end, __last, __comp);
2430 }
2431 else
2432 {
2433 _BidirectionalIterator __first_cut = __first;
2434 _BidirectionalIterator __second_cut = __middle;
2435 _Distance __len11 = 0;
2436 _Distance __len22 = 0;
2437 if (__len1 > __len2)
2438 {
2439 __len11 = __len1 / 2;
2440 std::advance(__first_cut, __len11);
2441 __second_cut
2442 = std::__lower_bound(__middle, __last, *__first_cut,
2443 __gnu_cxx::__ops::__iter_comp_val(__comp));
2444 __len22 = std::distance(__middle, __second_cut);
2445 }
2446 else
2447 {
2448 __len22 = __len2 / 2;
2449 std::advance(__second_cut, __len22);
2450 __first_cut
2451 = std::__upper_bound(__first, __middle, *__second_cut,
2452 __gnu_cxx::__ops::__val_comp_iter(__comp));
2453 __len11 = std::distance(__first, __first_cut);
2454 }
2455
2456 _BidirectionalIterator __new_middle
2457 = std::__rotate_adaptive(__first_cut, __middle, __second_cut,
2458 __len1 - __len11, __len22, __buffer,
2459 __buffer_size);
2460 std::__merge_adaptive(__first, __first_cut, __new_middle, __len11,
2461 __len22, __buffer, __buffer_size, __comp);
2462 std::__merge_adaptive(__new_middle, __second_cut, __last,
2463 __len1 - __len11,
2464 __len2 - __len22, __buffer,
2465 __buffer_size, __comp);
2466 }
2467 }
2468
2469 /// This is a helper function for the merge routines.
2470 template<typename _BidirectionalIterator, typename _Distance,
2471 typename _Compare>
2472 void
2473 __merge_without_buffer(_BidirectionalIterator __first,
2474 _BidirectionalIterator __middle,
2475 _BidirectionalIterator __last,
2476 _Distance __len1, _Distance __len2,
2477 _Compare __comp)
2478 {
2479 if (__len1 == 0 || __len2 == 0)
2480 return;
2481
2482 if (__len1 + __len2 == 2)
2483 {
2484 if (__comp(__middle, __first))
2485 std::iter_swap(__first, __middle);
2486 return;
2487 }
2488
2489 _BidirectionalIterator __first_cut = __first;
2490 _BidirectionalIterator __second_cut = __middle;
2491 _Distance __len11 = 0;
2492 _Distance __len22 = 0;
2493 if (__len1 > __len2)
2494 {
2495 __len11 = __len1 / 2;
2496 std::advance(__first_cut, __len11);
2497 __second_cut
2498 = std::__lower_bound(__middle, __last, *__first_cut,
2499 __gnu_cxx::__ops::__iter_comp_val(__comp));
2500 __len22 = std::distance(__middle, __second_cut);
2501 }
2502 else
2503 {
2504 __len22 = __len2 / 2;
2505 std::advance(__second_cut, __len22);
2506 __first_cut
2507 = std::__upper_bound(__first, __middle, *__second_cut,
2508 __gnu_cxx::__ops::__val_comp_iter(__comp));
2509 __len11 = std::distance(__first, __first_cut);
2510 }
2511
2512 std::rotate(__first_cut, __middle, __second_cut);
2513 _BidirectionalIterator __new_middle = __first_cut;
2514 std::advance(__new_middle, std::distance(__middle, __second_cut));
2515 std::__merge_without_buffer(__first, __first_cut, __new_middle,
2516 __len11, __len22, __comp);
2517 std::__merge_without_buffer(__new_middle, __second_cut, __last,
2518 __len1 - __len11, __len2 - __len22, __comp);
2519 }
2520
2521 template<typename _BidirectionalIterator, typename _Compare>
2522 void
2523 __inplace_merge(_BidirectionalIterator __first,
2524 _BidirectionalIterator __middle,
2525 _BidirectionalIterator __last,
2526 _Compare __comp)
2527 {
2528 typedef typename iterator_traits<_BidirectionalIterator>::value_type
2529 _ValueType;
2530 typedef typename iterator_traits<_BidirectionalIterator>::difference_type
2531 _DistanceType;
2532
2533 if (__first == __middle || __middle == __last)
2534 return;
2535
2536 const _DistanceType __len1 = std::distance(__first, __middle);
2537 const _DistanceType __len2 = std::distance(__middle, __last);
2538
2539 typedef _Temporary_buffer<_BidirectionalIterator, _ValueType> _TmpBuf;
2540 _TmpBuf __buf(__first, __last);
2541
2542 if (__buf.begin() == 0)
2543 std::__merge_without_buffer
2544 (__first, __middle, __last, __len1, __len2, __comp);
2545 else
2546 std::__merge_adaptive
2547 (__first, __middle, __last, __len1, __len2, __buf.begin(),
2548 _DistanceType(__buf.size()), __comp);
2549 }
2550
2551 /**
2552 * @brief Merges two sorted ranges in place.
2553 * @ingroup sorting_algorithms
2554 * @param __first An iterator.
2555 * @param __middle Another iterator.
2556 * @param __last Another iterator.
2557 * @return Nothing.
2558 *
2559 * Merges two sorted and consecutive ranges, [__first,__middle) and
2560 * [__middle,__last), and puts the result in [__first,__last). The
2561 * output will be sorted. The sort is @e stable, that is, for
2562 * equivalent elements in the two ranges, elements from the first
2563 * range will always come before elements from the second.
2564 *
2565 * If enough additional memory is available, this takes (__last-__first)-1
2566 * comparisons. Otherwise an NlogN algorithm is used, where N is
2567 * distance(__first,__last).
2568 */
2569 template<typename _BidirectionalIterator>
2570 inline void
2571 inplace_merge(_BidirectionalIterator __first,
2572 _BidirectionalIterator __middle,
2573 _BidirectionalIterator __last)
2574 {
2575 // concept requirements
2576 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
2577 _BidirectionalIterator>)
2578 __glibcxx_function_requires(_LessThanComparableConcept<
2579 typename iterator_traits<_BidirectionalIterator>::value_type>)
2580 __glibcxx_requires_sorted(__first, __middle);
2581 __glibcxx_requires_sorted(__middle, __last);
2582 __glibcxx_requires_irreflexive(__first, __last);
2583
2584 std::__inplace_merge(__first, __middle, __last,
2585 __gnu_cxx::__ops::__iter_less_iter());
2586 }
2587
2588 /**
2589 * @brief Merges two sorted ranges in place.
2590 * @ingroup sorting_algorithms
2591 * @param __first An iterator.
2592 * @param __middle Another iterator.
2593 * @param __last Another iterator.
2594 * @param __comp A functor to use for comparisons.
2595 * @return Nothing.
2596 *
2597 * Merges two sorted and consecutive ranges, [__first,__middle) and
2598 * [middle,last), and puts the result in [__first,__last). The output will
2599 * be sorted. The sort is @e stable, that is, for equivalent
2600 * elements in the two ranges, elements from the first range will always
2601 * come before elements from the second.
2602 *
2603 * If enough additional memory is available, this takes (__last-__first)-1
2604 * comparisons. Otherwise an NlogN algorithm is used, where N is
2605 * distance(__first,__last).
2606 *
2607 * The comparison function should have the same effects on ordering as
2608 * the function used for the initial sort.
2609 */
2610 template<typename _BidirectionalIterator, typename _Compare>
2611 inline void
2612 inplace_merge(_BidirectionalIterator __first,
2613 _BidirectionalIterator __middle,
2614 _BidirectionalIterator __last,
2615 _Compare __comp)
2616 {
2617 // concept requirements
2618 __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<
2619 _BidirectionalIterator>)
2620 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2621 typename iterator_traits<_BidirectionalIterator>::value_type,
2622 typename iterator_traits<_BidirectionalIterator>::value_type>)
2623 __glibcxx_requires_sorted_pred(__first, __middle, __comp);
2624 __glibcxx_requires_sorted_pred(__middle, __last, __comp);
2625 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
2626
2627 std::__inplace_merge(__first, __middle, __last,
2628 __gnu_cxx::__ops::__iter_comp_iter(__comp));
2629 }
2630
2631
2632 /// This is a helper function for the __merge_sort_loop routines.
2633 template<typename _InputIterator, typename _OutputIterator,
2634 typename _Compare>
2635 _OutputIterator
2636 __move_merge(_InputIterator __first1, _InputIterator __last1,
2637 _InputIterator __first2, _InputIterator __last2,
2638 _OutputIterator __result, _Compare __comp)
2639 {
2640 while (__first1 != __last1 && __first2 != __last2)
2641 {
2642 if (__comp(__first2, __first1))
2643 {
2644 *__result = _GLIBCXX_MOVE(*__first2)std::move(*__first2);
2645 ++__first2;
2646 }
2647 else
2648 {
2649 *__result = _GLIBCXX_MOVE(*__first1)std::move(*__first1);
2650 ++__first1;
2651 }
2652 ++__result;
2653 }
2654 return _GLIBCXX_MOVE3(__first2, __last2,std::move(__first2, __last2, std::move(__first1, __last1, __result
))
2655 _GLIBCXX_MOVE3(__first1, __last1,std::move(__first2, __last2, std::move(__first1, __last1, __result
))
2656 __result))std::move(__first2, __last2, std::move(__first1, __last1, __result
))
;
2657 }
2658
2659 template<typename _RandomAccessIterator1, typename _RandomAccessIterator2,
2660 typename _Distance, typename _Compare>
2661 void
2662 __merge_sort_loop(_RandomAccessIterator1 __first,
2663 _RandomAccessIterator1 __last,
2664 _RandomAccessIterator2 __result, _Distance __step_size,
2665 _Compare __comp)
2666 {
2667 const _Distance __two_step = 2 * __step_size;
2668
2669 while (__last - __first >= __two_step)
2670 {
2671 __result = std::__move_merge(__first, __first + __step_size,
2672 __first + __step_size,
2673 __first + __two_step,
2674 __result, __comp);
2675 __first += __two_step;
2676 }
2677 __step_size = std::min(_Distance(__last - __first), __step_size);
2678
2679 std::__move_merge(__first, __first + __step_size,
2680 __first + __step_size, __last, __result, __comp);
2681 }
2682
2683 template<typename _RandomAccessIterator, typename _Distance,
2684 typename _Compare>
2685 void
2686 __chunk_insertion_sort(_RandomAccessIterator __first,
2687 _RandomAccessIterator __last,
2688 _Distance __chunk_size, _Compare __comp)
2689 {
2690 while (__last - __first >= __chunk_size)
2691 {
2692 std::__insertion_sort(__first, __first + __chunk_size, __comp);
2693 __first += __chunk_size;
2694 }
2695 std::__insertion_sort(__first, __last, __comp);
2696 }
2697
2698 enum { _S_chunk_size = 7 };
2699
2700 template<typename _RandomAccessIterator, typename _Pointer, typename _Compare>
2701 void
2702 __merge_sort_with_buffer(_RandomAccessIterator __first,
2703 _RandomAccessIterator __last,
2704 _Pointer __buffer, _Compare __comp)
2705 {
2706 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
2707 _Distance;
2708
2709 const _Distance __len = __last - __first;
2710 const _Pointer __buffer_last = __buffer + __len;
2711
2712 _Distance __step_size = _S_chunk_size;
2713 std::__chunk_insertion_sort(__first, __last, __step_size, __comp);
2714
2715 while (__step_size < __len)
2716 {
2717 std::__merge_sort_loop(__first, __last, __buffer,
2718 __step_size, __comp);
2719 __step_size *= 2;
2720 std::__merge_sort_loop(__buffer, __buffer_last, __first,
2721 __step_size, __comp);
2722 __step_size *= 2;
2723 }
2724 }
2725
2726 template<typename _RandomAccessIterator, typename _Pointer,
2727 typename _Distance, typename _Compare>
2728 void
2729 __stable_sort_adaptive(_RandomAccessIterator __first,
2730 _RandomAccessIterator __last,
2731 _Pointer __buffer, _Distance __buffer_size,
2732 _Compare __comp)
2733 {
2734 const _Distance __len = (__last - __first + 1) / 2;
2735 const _RandomAccessIterator __middle = __first + __len;
2736 if (__len > __buffer_size)
2737 {
2738 std::__stable_sort_adaptive(__first, __middle, __buffer,
2739 __buffer_size, __comp);
2740 std::__stable_sort_adaptive(__middle, __last, __buffer,
2741 __buffer_size, __comp);
2742 }
2743 else
2744 {
2745 std::__merge_sort_with_buffer(__first, __middle, __buffer, __comp);
2746 std::__merge_sort_with_buffer(__middle, __last, __buffer, __comp);
2747 }
2748 std::__merge_adaptive(__first, __middle, __last,
2749 _Distance(__middle - __first),
2750 _Distance(__last - __middle),
2751 __buffer, __buffer_size,
2752 __comp);
2753 }
2754
2755 /// This is a helper function for the stable sorting routines.
2756 template<typename _RandomAccessIterator, typename _Compare>
2757 void
2758 __inplace_stable_sort(_RandomAccessIterator __first,
2759 _RandomAccessIterator __last, _Compare __comp)
2760 {
2761 if (__last - __first < 15)
2762 {
2763 std::__insertion_sort(__first, __last, __comp);
2764 return;
2765 }
2766 _RandomAccessIterator __middle = __first + (__last - __first) / 2;
2767 std::__inplace_stable_sort(__first, __middle, __comp);
2768 std::__inplace_stable_sort(__middle, __last, __comp);
2769 std::__merge_without_buffer(__first, __middle, __last,
2770 __middle - __first,
2771 __last - __middle,
2772 __comp);
2773 }
2774
2775 // stable_sort
2776
2777 // Set algorithms: includes, set_union, set_intersection, set_difference,
2778 // set_symmetric_difference. All of these algorithms have the precondition
2779 // that their input ranges are sorted and the postcondition that their output
2780 // ranges are sorted.
2781
2782 template<typename _InputIterator1, typename _InputIterator2,
2783 typename _Compare>
2784 bool
2785 __includes(_InputIterator1 __first1, _InputIterator1 __last1,
2786 _InputIterator2 __first2, _InputIterator2 __last2,
2787 _Compare __comp)
2788 {
2789 while (__first1 != __last1 && __first2 != __last2)
2790 if (__comp(__first2, __first1))
2791 return false;
2792 else if (__comp(__first1, __first2))
2793 ++__first1;
2794 else
2795 {
2796 ++__first1;
2797 ++__first2;
2798 }
2799
2800 return __first2 == __last2;
2801 }
2802
2803 /**
2804 * @brief Determines whether all elements of a sequence exists in a range.
2805 * @param __first1 Start of search range.
2806 * @param __last1 End of search range.
2807 * @param __first2 Start of sequence
2808 * @param __last2 End of sequence.
2809 * @return True if each element in [__first2,__last2) is contained in order
2810 * within [__first1,__last1). False otherwise.
2811 * @ingroup set_algorithms
2812 *
2813 * This operation expects both [__first1,__last1) and
2814 * [__first2,__last2) to be sorted. Searches for the presence of
2815 * each element in [__first2,__last2) within [__first1,__last1).
2816 * The iterators over each range only move forward, so this is a
2817 * linear algorithm. If an element in [__first2,__last2) is not
2818 * found before the search iterator reaches @p __last2, false is
2819 * returned.
2820 */
2821 template<typename _InputIterator1, typename _InputIterator2>
2822 inline bool
2823 includes(_InputIterator1 __first1, _InputIterator1 __last1,
2824 _InputIterator2 __first2, _InputIterator2 __last2)
2825 {
2826 // concept requirements
2827 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
2828 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
2829 __glibcxx_function_requires(_LessThanOpConcept<
2830 typename iterator_traits<_InputIterator1>::value_type,
2831 typename iterator_traits<_InputIterator2>::value_type>)
2832 __glibcxx_function_requires(_LessThanOpConcept<
2833 typename iterator_traits<_InputIterator2>::value_type,
2834 typename iterator_traits<_InputIterator1>::value_type>)
2835 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
2836 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
2837 __glibcxx_requires_irreflexive2(__first1, __last1);
2838 __glibcxx_requires_irreflexive2(__first2, __last2);
2839
2840 return std::__includes(__first1, __last1, __first2, __last2,
2841 __gnu_cxx::__ops::__iter_less_iter());
2842 }
2843
2844 /**
2845 * @brief Determines whether all elements of a sequence exists in a range
2846 * using comparison.
2847 * @ingroup set_algorithms
2848 * @param __first1 Start of search range.
2849 * @param __last1 End of search range.
2850 * @param __first2 Start of sequence
2851 * @param __last2 End of sequence.
2852 * @param __comp Comparison function to use.
2853 * @return True if each element in [__first2,__last2) is contained
2854 * in order within [__first1,__last1) according to comp. False
2855 * otherwise. @ingroup set_algorithms
2856 *
2857 * This operation expects both [__first1,__last1) and
2858 * [__first2,__last2) to be sorted. Searches for the presence of
2859 * each element in [__first2,__last2) within [__first1,__last1),
2860 * using comp to decide. The iterators over each range only move
2861 * forward, so this is a linear algorithm. If an element in
2862 * [__first2,__last2) is not found before the search iterator
2863 * reaches @p __last2, false is returned.
2864 */
2865 template<typename _InputIterator1, typename _InputIterator2,
2866 typename _Compare>
2867 inline bool
2868 includes(_InputIterator1 __first1, _InputIterator1 __last1,
2869 _InputIterator2 __first2, _InputIterator2 __last2,
2870 _Compare __comp)
2871 {
2872 // concept requirements
2873 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
2874 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
2875 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2876 typename iterator_traits<_InputIterator1>::value_type,
2877 typename iterator_traits<_InputIterator2>::value_type>)
2878 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2879 typename iterator_traits<_InputIterator2>::value_type,
2880 typename iterator_traits<_InputIterator1>::value_type>)
2881 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
2882 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
2883 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
2884 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
2885
2886 return std::__includes(__first1, __last1, __first2, __last2,
2887 __gnu_cxx::__ops::__iter_comp_iter(__comp));
2888 }
2889
2890 // nth_element
2891 // merge
2892 // set_difference
2893 // set_intersection
2894 // set_union
2895 // stable_sort
2896 // set_symmetric_difference
2897 // min_element
2898 // max_element
2899
2900 template<typename _BidirectionalIterator, typename _Compare>
2901 bool
2902 __next_permutation(_BidirectionalIterator __first,
2903 _BidirectionalIterator __last, _Compare __comp)
2904 {
2905 if (__first == __last)
2906 return false;
2907 _BidirectionalIterator __i = __first;
2908 ++__i;
2909 if (__i == __last)
2910 return false;
2911 __i = __last;
2912 --__i;
2913
2914 for(;;)
2915 {
2916 _BidirectionalIterator __ii = __i;
2917 --__i;
2918 if (__comp(__i, __ii))
2919 {
2920 _BidirectionalIterator __j = __last;
2921 while (!__comp(__i, --__j))
2922 {}
2923 std::iter_swap(__i, __j);
2924 std::__reverse(__ii, __last,
2925 std::__iterator_category(__first));
2926 return true;
2927 }
2928 if (__i == __first)
2929 {
2930 std::__reverse(__first, __last,
2931 std::__iterator_category(__first));
2932 return false;
2933 }
2934 }
2935 }
2936
2937 /**
2938 * @brief Permute range into the next @e dictionary ordering.
2939 * @ingroup sorting_algorithms
2940 * @param __first Start of range.
2941 * @param __last End of range.
2942 * @return False if wrapped to first permutation, true otherwise.
2943 *
2944 * Treats all permutations of the range as a set of @e dictionary sorted
2945 * sequences. Permutes the current sequence into the next one of this set.
2946 * Returns true if there are more sequences to generate. If the sequence
2947 * is the largest of the set, the smallest is generated and false returned.
2948 */
2949 template<typename _BidirectionalIterator>
2950 inline bool
2951 next_permutation(_BidirectionalIterator __first,
2952 _BidirectionalIterator __last)
2953 {
2954 // concept requirements
2955 __glibcxx_function_requires(_BidirectionalIteratorConcept<
2956 _BidirectionalIterator>)
2957 __glibcxx_function_requires(_LessThanComparableConcept<
2958 typename iterator_traits<_BidirectionalIterator>::value_type>)
2959 __glibcxx_requires_valid_range(__first, __last);
2960 __glibcxx_requires_irreflexive(__first, __last);
2961
2962 return std::__next_permutation
2963 (__first, __last, __gnu_cxx::__ops::__iter_less_iter());
2964 }
2965
2966 /**
2967 * @brief Permute range into the next @e dictionary ordering using
2968 * comparison functor.
2969 * @ingroup sorting_algorithms
2970 * @param __first Start of range.
2971 * @param __last End of range.
2972 * @param __comp A comparison functor.
2973 * @return False if wrapped to first permutation, true otherwise.
2974 *
2975 * Treats all permutations of the range [__first,__last) as a set of
2976 * @e dictionary sorted sequences ordered by @p __comp. Permutes the current
2977 * sequence into the next one of this set. Returns true if there are more
2978 * sequences to generate. If the sequence is the largest of the set, the
2979 * smallest is generated and false returned.
2980 */
2981 template<typename _BidirectionalIterator, typename _Compare>
2982 inline bool
2983 next_permutation(_BidirectionalIterator __first,
2984 _BidirectionalIterator __last, _Compare __comp)
2985 {
2986 // concept requirements
2987 __glibcxx_function_requires(_BidirectionalIteratorConcept<
2988 _BidirectionalIterator>)
2989 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
2990 typename iterator_traits<_BidirectionalIterator>::value_type,
2991 typename iterator_traits<_BidirectionalIterator>::value_type>)
2992 __glibcxx_requires_valid_range(__first, __last);
2993 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
2994
2995 return std::__next_permutation
2996 (__first, __last, __gnu_cxx::__ops::__iter_comp_iter(__comp));
2997 }
2998
2999 template<typename _BidirectionalIterator, typename _Compare>
3000 bool
3001 __prev_permutation(_BidirectionalIterator __first,
3002 _BidirectionalIterator __last, _Compare __comp)
3003 {
3004 if (__first == __last)
3005 return false;
3006 _BidirectionalIterator __i = __first;
3007 ++__i;
3008 if (__i == __last)
3009 return false;
3010 __i = __last;
3011 --__i;
3012
3013 for(;;)
3014 {
3015 _BidirectionalIterator __ii = __i;
3016 --__i;
3017 if (__comp(__ii, __i))
3018 {
3019 _BidirectionalIterator __j = __last;
3020 while (!__comp(--__j, __i))
3021 {}
3022 std::iter_swap(__i, __j);
3023 std::__reverse(__ii, __last,
3024 std::__iterator_category(__first));
3025 return true;
3026 }
3027 if (__i == __first)
3028 {
3029 std::__reverse(__first, __last,
3030 std::__iterator_category(__first));
3031 return false;
3032 }
3033 }
3034 }
3035
3036 /**
3037 * @brief Permute range into the previous @e dictionary ordering.
3038 * @ingroup sorting_algorithms
3039 * @param __first Start of range.
3040 * @param __last End of range.
3041 * @return False if wrapped to last permutation, true otherwise.
3042 *
3043 * Treats all permutations of the range as a set of @e dictionary sorted
3044 * sequences. Permutes the current sequence into the previous one of this
3045 * set. Returns true if there are more sequences to generate. If the
3046 * sequence is the smallest of the set, the largest is generated and false
3047 * returned.
3048 */
3049 template<typename _BidirectionalIterator>
3050 inline bool
3051 prev_permutation(_BidirectionalIterator __first,
3052 _BidirectionalIterator __last)
3053 {
3054 // concept requirements
3055 __glibcxx_function_requires(_BidirectionalIteratorConcept<
3056 _BidirectionalIterator>)
3057 __glibcxx_function_requires(_LessThanComparableConcept<
3058 typename iterator_traits<_BidirectionalIterator>::value_type>)
3059 __glibcxx_requires_valid_range(__first, __last);
3060 __glibcxx_requires_irreflexive(__first, __last);
3061
3062 return std::__prev_permutation(__first, __last,
3063 __gnu_cxx::__ops::__iter_less_iter());
3064 }
3065
3066 /**
3067 * @brief Permute range into the previous @e dictionary ordering using
3068 * comparison functor.
3069 * @ingroup sorting_algorithms
3070 * @param __first Start of range.
3071 * @param __last End of range.
3072 * @param __comp A comparison functor.
3073 * @return False if wrapped to last permutation, true otherwise.
3074 *
3075 * Treats all permutations of the range [__first,__last) as a set of
3076 * @e dictionary sorted sequences ordered by @p __comp. Permutes the current
3077 * sequence into the previous one of this set. Returns true if there are
3078 * more sequences to generate. If the sequence is the smallest of the set,
3079 * the largest is generated and false returned.
3080 */
3081 template<typename _BidirectionalIterator, typename _Compare>
3082 inline bool
3083 prev_permutation(_BidirectionalIterator __first,
3084 _BidirectionalIterator __last, _Compare __comp)
3085 {
3086 // concept requirements
3087 __glibcxx_function_requires(_BidirectionalIteratorConcept<
3088 _BidirectionalIterator>)
3089 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3090 typename iterator_traits<_BidirectionalIterator>::value_type,
3091 typename iterator_traits<_BidirectionalIterator>::value_type>)
3092 __glibcxx_requires_valid_range(__first, __last);
3093 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3094
3095 return std::__prev_permutation(__first, __last,
3096 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3097 }
3098
3099 // replace
3100 // replace_if
3101
3102 template<typename _InputIterator, typename _OutputIterator,
3103 typename _Predicate, typename _Tp>
3104 _OutputIterator
3105 __replace_copy_if(_InputIterator __first, _InputIterator __last,
3106 _OutputIterator __result,
3107 _Predicate __pred, const _Tp& __new_value)
3108 {
3109 for (; __first != __last; ++__first, (void)++__result)
3110 if (__pred(__first))
3111 *__result = __new_value;
3112 else
3113 *__result = *__first;
3114 return __result;
3115 }
3116
3117 /**
3118 * @brief Copy a sequence, replacing each element of one value with another
3119 * value.
3120 * @param __first An input iterator.
3121 * @param __last An input iterator.
3122 * @param __result An output iterator.
3123 * @param __old_value The value to be replaced.
3124 * @param __new_value The replacement value.
3125 * @return The end of the output sequence, @p result+(last-first).
3126 *
3127 * Copies each element in the input range @p [__first,__last) to the
3128 * output range @p [__result,__result+(__last-__first)) replacing elements
3129 * equal to @p __old_value with @p __new_value.
3130 */
3131 template<typename _InputIterator, typename _OutputIterator, typename _Tp>
3132 inline _OutputIterator
3133 replace_copy(_InputIterator __first, _InputIterator __last,
3134 _OutputIterator __result,
3135 const _Tp& __old_value, const _Tp& __new_value)
3136 {
3137 // concept requirements
3138 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3139 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
3140 typename iterator_traits<_InputIterator>::value_type>)
3141 __glibcxx_function_requires(_EqualOpConcept<
3142 typename iterator_traits<_InputIterator>::value_type, _Tp>)
3143 __glibcxx_requires_valid_range(__first, __last);
3144
3145 return std::__replace_copy_if(__first, __last, __result,
3146 __gnu_cxx::__ops::__iter_equals_val(__old_value),
3147 __new_value);
3148 }
3149
3150 /**
3151 * @brief Copy a sequence, replacing each value for which a predicate
3152 * returns true with another value.
3153 * @ingroup mutating_algorithms
3154 * @param __first An input iterator.
3155 * @param __last An input iterator.
3156 * @param __result An output iterator.
3157 * @param __pred A predicate.
3158 * @param __new_value The replacement value.
3159 * @return The end of the output sequence, @p __result+(__last-__first).
3160 *
3161 * Copies each element in the range @p [__first,__last) to the range
3162 * @p [__result,__result+(__last-__first)) replacing elements for which
3163 * @p __pred returns true with @p __new_value.
3164 */
3165 template<typename _InputIterator, typename _OutputIterator,
3166 typename _Predicate, typename _Tp>
3167 inline _OutputIterator
3168 replace_copy_if(_InputIterator __first, _InputIterator __last,
3169 _OutputIterator __result,
3170 _Predicate __pred, const _Tp& __new_value)
3171 {
3172 // concept requirements
3173 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3174 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
3175 typename iterator_traits<_InputIterator>::value_type>)
3176 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
3177 typename iterator_traits<_InputIterator>::value_type>)
3178 __glibcxx_requires_valid_range(__first, __last);
3179
3180 return std::__replace_copy_if(__first, __last, __result,
3181 __gnu_cxx::__ops::__pred_iter(__pred),
3182 __new_value);
3183 }
3184
3185 template<typename _InputIterator, typename _Predicate>
3186 typename iterator_traits<_InputIterator>::difference_type
3187 __count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred)
3188 {
3189 typename iterator_traits<_InputIterator>::difference_type __n = 0;
3190 for (; __first != __last; ++__first)
3191 if (__pred(__first))
3192 ++__n;
3193 return __n;
3194 }
3195
3196#if __cplusplus201402L >= 201103L
3197 /**
3198 * @brief Determines whether the elements of a sequence are sorted.
3199 * @ingroup sorting_algorithms
3200 * @param __first An iterator.
3201 * @param __last Another iterator.
3202 * @return True if the elements are sorted, false otherwise.
3203 */
3204 template<typename _ForwardIterator>
3205 inline bool
3206 is_sorted(_ForwardIterator __first, _ForwardIterator __last)
3207 { return std::is_sorted_until(__first, __last) == __last; }
3208
3209 /**
3210 * @brief Determines whether the elements of a sequence are sorted
3211 * according to a comparison functor.
3212 * @ingroup sorting_algorithms
3213 * @param __first An iterator.
3214 * @param __last Another iterator.
3215 * @param __comp A comparison functor.
3216 * @return True if the elements are sorted, false otherwise.
3217 */
3218 template<typename _ForwardIterator, typename _Compare>
3219 inline bool
3220 is_sorted(_ForwardIterator __first, _ForwardIterator __last,
3221 _Compare __comp)
3222 { return std::is_sorted_until(__first, __last, __comp) == __last; }
3223
3224 template<typename _ForwardIterator, typename _Compare>
3225 _ForwardIterator
3226 __is_sorted_until(_ForwardIterator __first, _ForwardIterator __last,
3227 _Compare __comp)
3228 {
3229 if (__first == __last)
3230 return __last;
3231
3232 _ForwardIterator __next = __first;
3233 for (++__next; __next != __last; __first = __next, (void)++__next)
3234 if (__comp(__next, __first))
3235 return __next;
3236 return __next;
3237 }
3238
3239 /**
3240 * @brief Determines the end of a sorted sequence.
3241 * @ingroup sorting_algorithms
3242 * @param __first An iterator.
3243 * @param __last Another iterator.
3244 * @return An iterator pointing to the last iterator i in [__first, __last)
3245 * for which the range [__first, i) is sorted.
3246 */
3247 template<typename _ForwardIterator>
3248 inline _ForwardIterator
3249 is_sorted_until(_ForwardIterator __first, _ForwardIterator __last)
3250 {
3251 // concept requirements
3252 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3253 __glibcxx_function_requires(_LessThanComparableConcept<
3254 typename iterator_traits<_ForwardIterator>::value_type>)
3255 __glibcxx_requires_valid_range(__first, __last);
3256 __glibcxx_requires_irreflexive(__first, __last);
3257
3258 return std::__is_sorted_until(__first, __last,
3259 __gnu_cxx::__ops::__iter_less_iter());
3260 }
3261
3262 /**
3263 * @brief Determines the end of a sorted sequence using comparison functor.
3264 * @ingroup sorting_algorithms
3265 * @param __first An iterator.
3266 * @param __last Another iterator.
3267 * @param __comp A comparison functor.
3268 * @return An iterator pointing to the last iterator i in [__first, __last)
3269 * for which the range [__first, i) is sorted.
3270 */
3271 template<typename _ForwardIterator, typename _Compare>
3272 inline _ForwardIterator
3273 is_sorted_until(_ForwardIterator __first, _ForwardIterator __last,
3274 _Compare __comp)
3275 {
3276 // concept requirements
3277 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3278 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3279 typename iterator_traits<_ForwardIterator>::value_type,
3280 typename iterator_traits<_ForwardIterator>::value_type>)
3281 __glibcxx_requires_valid_range(__first, __last);
3282 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3283
3284 return std::__is_sorted_until(__first, __last,
3285 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3286 }
3287
3288 /**
3289 * @brief Determines min and max at once as an ordered pair.
3290 * @ingroup sorting_algorithms
3291 * @param __a A thing of arbitrary type.
3292 * @param __b Another thing of arbitrary type.
3293 * @return A pair(__b, __a) if __b is smaller than __a, pair(__a,
3294 * __b) otherwise.
3295 */
3296 template<typename _Tp>
3297 _GLIBCXX14_CONSTEXPRconstexpr
3298 inline pair<const _Tp&, const _Tp&>
3299 minmax(const _Tp& __a, const _Tp& __b)
3300 {
3301 // concept requirements
3302 __glibcxx_function_requires(_LessThanComparableConcept<_Tp>)
3303
3304 return __b < __a ? pair<const _Tp&, const _Tp&>(__b, __a)
3305 : pair<const _Tp&, const _Tp&>(__a, __b);
3306 }
3307
3308 /**
3309 * @brief Determines min and max at once as an ordered pair.
3310 * @ingroup sorting_algorithms
3311 * @param __a A thing of arbitrary type.
3312 * @param __b Another thing of arbitrary type.
3313 * @param __comp A @link comparison_functors comparison functor @endlink.
3314 * @return A pair(__b, __a) if __b is smaller than __a, pair(__a,
3315 * __b) otherwise.
3316 */
3317 template<typename _Tp, typename _Compare>
3318 _GLIBCXX14_CONSTEXPRconstexpr
3319 inline pair<const _Tp&, const _Tp&>
3320 minmax(const _Tp& __a, const _Tp& __b, _Compare __comp)
3321 {
3322 return __comp(__b, __a) ? pair<const _Tp&, const _Tp&>(__b, __a)
3323 : pair<const _Tp&, const _Tp&>(__a, __b);
3324 }
3325
3326 template<typename _ForwardIterator, typename _Compare>
3327 _GLIBCXX14_CONSTEXPRconstexpr
3328 pair<_ForwardIterator, _ForwardIterator>
3329 __minmax_element(_ForwardIterator __first, _ForwardIterator __last,
3330 _Compare __comp)
3331 {
3332 _ForwardIterator __next = __first;
3333 if (__first == __last
3334 || ++__next == __last)
3335 return std::make_pair(__first, __first);
3336
3337 _ForwardIterator __min{}, __max{};
3338 if (__comp(__next, __first))
3339 {
3340 __min = __next;
3341 __max = __first;
3342 }
3343 else
3344 {
3345 __min = __first;
3346 __max = __next;
3347 }
3348
3349 __first = __next;
3350 ++__first;
3351
3352 while (__first != __last)
3353 {
3354 __next = __first;
3355 if (++__next == __last)
3356 {
3357 if (__comp(__first, __min))
3358 __min = __first;
3359 else if (!__comp(__first, __max))
3360 __max = __first;
3361 break;
3362 }
3363
3364 if (__comp(__next, __first))
3365 {
3366 if (__comp(__next, __min))
3367 __min = __next;
3368 if (!__comp(__first, __max))
3369 __max = __first;
3370 }
3371 else
3372 {
3373 if (__comp(__first, __min))
3374 __min = __first;
3375 if (!__comp(__next, __max))
3376 __max = __next;
3377 }
3378
3379 __first = __next;
3380 ++__first;
3381 }
3382
3383 return std::make_pair(__min, __max);
3384 }
3385
3386 /**
3387 * @brief Return a pair of iterators pointing to the minimum and maximum
3388 * elements in a range.
3389 * @ingroup sorting_algorithms
3390 * @param __first Start of range.
3391 * @param __last End of range.
3392 * @return make_pair(m, M), where m is the first iterator i in
3393 * [__first, __last) such that no other element in the range is
3394 * smaller, and where M is the last iterator i in [__first, __last)
3395 * such that no other element in the range is larger.
3396 */
3397 template<typename _ForwardIterator>
3398 _GLIBCXX14_CONSTEXPRconstexpr
3399 inline pair<_ForwardIterator, _ForwardIterator>
3400 minmax_element(_ForwardIterator __first, _ForwardIterator __last)
3401 {
3402 // concept requirements
3403 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3404 __glibcxx_function_requires(_LessThanComparableConcept<
3405 typename iterator_traits<_ForwardIterator>::value_type>)
3406 __glibcxx_requires_valid_range(__first, __last);
3407 __glibcxx_requires_irreflexive(__first, __last);
3408
3409 return std::__minmax_element(__first, __last,
3410 __gnu_cxx::__ops::__iter_less_iter());
3411 }
3412
3413 /**
3414 * @brief Return a pair of iterators pointing to the minimum and maximum
3415 * elements in a range.
3416 * @ingroup sorting_algorithms
3417 * @param __first Start of range.
3418 * @param __last End of range.
3419 * @param __comp Comparison functor.
3420 * @return make_pair(m, M), where m is the first iterator i in
3421 * [__first, __last) such that no other element in the range is
3422 * smaller, and where M is the last iterator i in [__first, __last)
3423 * such that no other element in the range is larger.
3424 */
3425 template<typename _ForwardIterator, typename _Compare>
3426 _GLIBCXX14_CONSTEXPRconstexpr
3427 inline pair<_ForwardIterator, _ForwardIterator>
3428 minmax_element(_ForwardIterator __first, _ForwardIterator __last,
3429 _Compare __comp)
3430 {
3431 // concept requirements
3432 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3433 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
3434 typename iterator_traits<_ForwardIterator>::value_type,
3435 typename iterator_traits<_ForwardIterator>::value_type>)
3436 __glibcxx_requires_valid_range(__first, __last);
3437 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
3438
3439 return std::__minmax_element(__first, __last,
3440 __gnu_cxx::__ops::__iter_comp_iter(__comp));
3441 }
3442
3443 // N2722 + DR 915.
3444 template<typename _Tp>
3445 _GLIBCXX14_CONSTEXPRconstexpr
3446 inline _Tp
3447 min(initializer_list<_Tp> __l)
3448 { return *std::min_element(__l.begin(), __l.end()); }
3449
3450 template<typename _Tp, typename _Compare>
3451 _GLIBCXX14_CONSTEXPRconstexpr
3452 inline _Tp
3453 min(initializer_list<_Tp> __l, _Compare __comp)
3454 { return *std::min_element(__l.begin(), __l.end(), __comp); }
3455
3456 template<typename _Tp>
3457 _GLIBCXX14_CONSTEXPRconstexpr
3458 inline _Tp
3459 max(initializer_list<_Tp> __l)
3460 { return *std::max_element(__l.begin(), __l.end()); }
3461
3462 template<typename _Tp, typename _Compare>
3463 _GLIBCXX14_CONSTEXPRconstexpr
3464 inline _Tp
3465 max(initializer_list<_Tp> __l, _Compare __comp)
3466 { return *std::max_element(__l.begin(), __l.end(), __comp); }
3467
3468 template<typename _Tp>
3469 _GLIBCXX14_CONSTEXPRconstexpr
3470 inline pair<_Tp, _Tp>
3471 minmax(initializer_list<_Tp> __l)
3472 {
3473 pair<const _Tp*, const _Tp*> __p =
3474 std::minmax_element(__l.begin(), __l.end());
3475 return std::make_pair(*__p.first, *__p.second);
3476 }
3477
3478 template<typename _Tp, typename _Compare>
3479 _GLIBCXX14_CONSTEXPRconstexpr
3480 inline pair<_Tp, _Tp>
3481 minmax(initializer_list<_Tp> __l, _Compare __comp)
3482 {
3483 pair<const _Tp*, const _Tp*> __p =
3484 std::minmax_element(__l.begin(), __l.end(), __comp);
3485 return std::make_pair(*__p.first, *__p.second);
3486 }
3487
3488 template<typename _ForwardIterator1, typename _ForwardIterator2,
3489 typename _BinaryPredicate>
3490 bool
3491 __is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3492 _ForwardIterator2 __first2, _BinaryPredicate __pred)
3493 {
3494 // Efficiently compare identical prefixes: O(N) if sequences
3495 // have the same elements in the same order.
3496 for (; __first1 != __last1; ++__first1, (void)++__first2)
3497 if (!__pred(__first1, __first2))
3498 break;
3499
3500 if (__first1 == __last1)
3501 return true;
3502
3503 // Establish __last2 assuming equal ranges by iterating over the
3504 // rest of the list.
3505 _ForwardIterator2 __last2 = __first2;
3506 std::advance(__last2, std::distance(__first1, __last1));
3507 for (_ForwardIterator1 __scan = __first1; __scan != __last1; ++__scan)
3508 {
3509 if (__scan != std::__find_if(__first1, __scan,
3510 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan)))
3511 continue; // We've seen this one before.
3512
3513 auto __matches
3514 = std::__count_if(__first2, __last2,
3515 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan));
3516 if (0 == __matches ||
3517 std::__count_if(__scan, __last1,
3518 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan))
3519 != __matches)
3520 return false;
3521 }
3522 return true;
3523 }
3524
3525 /**
3526 * @brief Checks whether a permutation of the second sequence is equal
3527 * to the first sequence.
3528 * @ingroup non_mutating_algorithms
3529 * @param __first1 Start of first range.
3530 * @param __last1 End of first range.
3531 * @param __first2 Start of second range.
3532 * @return true if there exists a permutation of the elements in the range
3533 * [__first2, __first2 + (__last1 - __first1)), beginning with
3534 * ForwardIterator2 begin, such that equal(__first1, __last1, begin)
3535 * returns true; otherwise, returns false.
3536 */
3537 template<typename _ForwardIterator1, typename _ForwardIterator2>
3538 inline bool
3539 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3540 _ForwardIterator2 __first2)
3541 {
3542 // concept requirements
3543 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
3544 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
3545 __glibcxx_function_requires(_EqualOpConcept<
3546 typename iterator_traits<_ForwardIterator1>::value_type,
3547 typename iterator_traits<_ForwardIterator2>::value_type>)
3548 __glibcxx_requires_valid_range(__first1, __last1);
3549
3550 return std::__is_permutation(__first1, __last1, __first2,
3551 __gnu_cxx::__ops::__iter_equal_to_iter());
3552 }
3553
3554 /**
3555 * @brief Checks whether a permutation of the second sequence is equal
3556 * to the first sequence.
3557 * @ingroup non_mutating_algorithms
3558 * @param __first1 Start of first range.
3559 * @param __last1 End of first range.
3560 * @param __first2 Start of second range.
3561 * @param __pred A binary predicate.
3562 * @return true if there exists a permutation of the elements in
3563 * the range [__first2, __first2 + (__last1 - __first1)),
3564 * beginning with ForwardIterator2 begin, such that
3565 * equal(__first1, __last1, __begin, __pred) returns true;
3566 * otherwise, returns false.
3567 */
3568 template<typename _ForwardIterator1, typename _ForwardIterator2,
3569 typename _BinaryPredicate>
3570 inline bool
3571 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3572 _ForwardIterator2 __first2, _BinaryPredicate __pred)
3573 {
3574 // concept requirements
3575 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
3576 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
3577 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
3578 typename iterator_traits<_ForwardIterator1>::value_type,
3579 typename iterator_traits<_ForwardIterator2>::value_type>)
3580 __glibcxx_requires_valid_range(__first1, __last1);
3581
3582 return std::__is_permutation(__first1, __last1, __first2,
3583 __gnu_cxx::__ops::__iter_comp_iter(__pred));
3584 }
3585
3586#if __cplusplus201402L > 201103L
3587 template<typename _ForwardIterator1, typename _ForwardIterator2,
3588 typename _BinaryPredicate>
3589 bool
3590 __is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3591 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
3592 _BinaryPredicate __pred)
3593 {
3594 using _Cat1
3595 = typename iterator_traits<_ForwardIterator1>::iterator_category;
3596 using _Cat2
3597 = typename iterator_traits<_ForwardIterator2>::iterator_category;
3598 using _It1_is_RA = is_same<_Cat1, random_access_iterator_tag>;
3599 using _It2_is_RA = is_same<_Cat2, random_access_iterator_tag>;
3600 constexpr bool __ra_iters = _It1_is_RA() && _It2_is_RA();
3601 if (__ra_iters)
3602 {
3603 auto __d1 = std::distance(__first1, __last1);
3604 auto __d2 = std::distance(__first2, __last2);
3605 if (__d1 != __d2)
3606 return false;
3607 }
3608
3609 // Efficiently compare identical prefixes: O(N) if sequences
3610 // have the same elements in the same order.
3611 for (; __first1 != __last1 && __first2 != __last2;
3612 ++__first1, (void)++__first2)
3613 if (!__pred(__first1, __first2))
3614 break;
3615
3616 if (__ra_iters)
3617 {
3618 if (__first1 == __last1)
3619 return true;
3620 }
3621 else
3622 {
3623 auto __d1 = std::distance(__first1, __last1);
3624 auto __d2 = std::distance(__first2, __last2);
3625 if (__d1 == 0 && __d2 == 0)
3626 return true;
3627 if (__d1 != __d2)
3628 return false;
3629 }
3630
3631 for (_ForwardIterator1 __scan = __first1; __scan != __last1; ++__scan)
3632 {
3633 if (__scan != std::__find_if(__first1, __scan,
3634 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan)))
3635 continue; // We've seen this one before.
3636
3637 auto __matches = std::__count_if(__first2, __last2,
3638 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan));
3639 if (0 == __matches
3640 || std::__count_if(__scan, __last1,
3641 __gnu_cxx::__ops::__iter_comp_iter(__pred, __scan))
3642 != __matches)
3643 return false;
3644 }
3645 return true;
3646 }
3647
3648 /**
3649 * @brief Checks whether a permutaion of the second sequence is equal
3650 * to the first sequence.
3651 * @ingroup non_mutating_algorithms
3652 * @param __first1 Start of first range.
3653 * @param __last1 End of first range.
3654 * @param __first2 Start of second range.
3655 * @param __last2 End of first range.
3656 * @return true if there exists a permutation of the elements in the range
3657 * [__first2, __last2), beginning with ForwardIterator2 begin,
3658 * such that equal(__first1, __last1, begin) returns true;
3659 * otherwise, returns false.
3660 */
3661 template<typename _ForwardIterator1, typename _ForwardIterator2>
3662 inline bool
3663 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3664 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
3665 {
3666 __glibcxx_requires_valid_range(__first1, __last1);
3667 __glibcxx_requires_valid_range(__first2, __last2);
3668
3669 return
3670 std::__is_permutation(__first1, __last1, __first2, __last2,
3671 __gnu_cxx::__ops::__iter_equal_to_iter());
3672 }
3673
3674 /**
3675 * @brief Checks whether a permutation of the second sequence is equal
3676 * to the first sequence.
3677 * @ingroup non_mutating_algorithms
3678 * @param __first1 Start of first range.
3679 * @param __last1 End of first range.
3680 * @param __first2 Start of second range.
3681 * @param __last2 End of first range.
3682 * @param __pred A binary predicate.
3683 * @return true if there exists a permutation of the elements in the range
3684 * [__first2, __last2), beginning with ForwardIterator2 begin,
3685 * such that equal(__first1, __last1, __begin, __pred) returns true;
3686 * otherwise, returns false.
3687 */
3688 template<typename _ForwardIterator1, typename _ForwardIterator2,
3689 typename _BinaryPredicate>
3690 inline bool
3691 is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
3692 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
3693 _BinaryPredicate __pred)
3694 {
3695 __glibcxx_requires_valid_range(__first1, __last1);
3696 __glibcxx_requires_valid_range(__first2, __last2);
3697
3698 return std::__is_permutation(__first1, __last1, __first2, __last2,
3699 __gnu_cxx::__ops::__iter_comp_iter(__pred));
3700 }
3701#endif
3702
3703#ifdef _GLIBCXX_USE_C99_STDINT_TR11
3704 /**
3705 * @brief Shuffle the elements of a sequence using a uniform random
3706 * number generator.
3707 * @ingroup mutating_algorithms
3708 * @param __first A forward iterator.
3709 * @param __last A forward iterator.
3710 * @param __g A UniformRandomNumberGenerator (26.5.1.3).
3711 * @return Nothing.
3712 *
3713 * Reorders the elements in the range @p [__first,__last) using @p __g to
3714 * provide random numbers.
3715 */
3716 template<typename _RandomAccessIterator,
3717 typename _UniformRandomNumberGenerator>
3718 void
3719 shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last,
3720 _UniformRandomNumberGenerator&& __g)
3721 {
3722 // concept requirements
3723 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
3724 _RandomAccessIterator>)
3725 __glibcxx_requires_valid_range(__first, __last);
3726
3727 if (__first == __last)
3728 return;
3729
3730 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
3731 _DistanceType;
3732
3733 typedef typename std::make_unsigned<_DistanceType>::type __ud_type;
3734 typedef typename std::uniform_int_distribution<__ud_type> __distr_type;
3735 typedef typename __distr_type::param_type __p_type;
3736 __distr_type __d;
3737
3738 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
3739 std::iter_swap(__i, __first + __d(__g, __p_type(0, __i - __first)));
3740 }
3741#endif
3742
3743#endif // C++11
3744
3745_GLIBCXX_END_NAMESPACE_VERSION
3746
3747_GLIBCXX_BEGIN_NAMESPACE_ALGO
3748
3749 /**
3750 * @brief Apply a function to every element of a sequence.
3751 * @ingroup non_mutating_algorithms
3752 * @param __first An input iterator.
3753 * @param __last An input iterator.
3754 * @param __f A unary function object.
3755 * @return @p __f (std::move(@p __f) in C++0x).
3756 *
3757 * Applies the function object @p __f to each element in the range
3758 * @p [first,last). @p __f must not modify the order of the sequence.
3759 * If @p __f has a return value it is ignored.
3760 */
3761 template<typename _InputIterator, typename _Function>
3762 _Function
3763 for_each(_InputIterator __first, _InputIterator __last, _Function __f)
3764 {
3765 // concept requirements
3766 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3767 __glibcxx_requires_valid_range(__first, __last);
3768 for (; __first != __last; ++__first)
3769 __f(*__first);
3770 return _GLIBCXX_MOVE(__f)std::move(__f);
3771 }
3772
3773 /**
3774 * @brief Find the first occurrence of a value in a sequence.
3775 * @ingroup non_mutating_algorithms
3776 * @param __first An input iterator.
3777 * @param __last An input iterator.
3778 * @param __val The value to find.
3779 * @return The first iterator @c i in the range @p [__first,__last)
3780 * such that @c *i == @p __val, or @p __last if no such iterator exists.
3781 */
3782 template<typename _InputIterator, typename _Tp>
3783 inline _InputIterator
3784 find(_InputIterator __first, _InputIterator __last,
3785 const _Tp& __val)
3786 {
3787 // concept requirements
3788 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3789 __glibcxx_function_requires(_EqualOpConcept<
3790 typename iterator_traits<_InputIterator>::value_type, _Tp>)
3791 __glibcxx_requires_valid_range(__first, __last);
3792 return std::__find_if(__first, __last,
3793 __gnu_cxx::__ops::__iter_equals_val(__val));
3794 }
3795
3796 /**
3797 * @brief Find the first element in a sequence for which a
3798 * predicate is true.
3799 * @ingroup non_mutating_algorithms
3800 * @param __first An input iterator.
3801 * @param __last An input iterator.
3802 * @param __pred A predicate.
3803 * @return The first iterator @c i in the range @p [__first,__last)
3804 * such that @p __pred(*i) is true, or @p __last if no such iterator exists.
3805 */
3806 template<typename _InputIterator, typename _Predicate>
3807 inline _InputIterator
3808 find_if(_InputIterator __first, _InputIterator __last,
3809 _Predicate __pred)
3810 {
3811 // concept requirements
3812 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3813 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
3814 typename iterator_traits<_InputIterator>::value_type>)
3815 __glibcxx_requires_valid_range(__first, __last);
3816
3817 return std::__find_if(__first, __last,
3818 __gnu_cxx::__ops::__pred_iter(__pred));
3819 }
3820
3821 /**
3822 * @brief Find element from a set in a sequence.
3823 * @ingroup non_mutating_algorithms
3824 * @param __first1 Start of range to search.
3825 * @param __last1 End of range to search.
3826 * @param __first2 Start of match candidates.
3827 * @param __last2 End of match candidates.
3828 * @return The first iterator @c i in the range
3829 * @p [__first1,__last1) such that @c *i == @p *(i2) such that i2 is an
3830 * iterator in [__first2,__last2), or @p __last1 if no such iterator exists.
3831 *
3832 * Searches the range @p [__first1,__last1) for an element that is
3833 * equal to some element in the range [__first2,__last2). If
3834 * found, returns an iterator in the range [__first1,__last1),
3835 * otherwise returns @p __last1.
3836 */
3837 template<typename _InputIterator, typename _ForwardIterator>
3838 _InputIterator
3839 find_first_of(_InputIterator __first1, _InputIterator __last1,
3840 _ForwardIterator __first2, _ForwardIterator __last2)
3841 {
3842 // concept requirements
3843 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3844 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3845 __glibcxx_function_requires(_EqualOpConcept<
3846 typename iterator_traits<_InputIterator>::value_type,
3847 typename iterator_traits<_ForwardIterator>::value_type>)
3848 __glibcxx_requires_valid_range(__first1, __last1);
3849 __glibcxx_requires_valid_range(__first2, __last2);
3850
3851 for (; __first1 != __last1; ++__first1)
3852 for (_ForwardIterator __iter = __first2; __iter != __last2; ++__iter)
3853 if (*__first1 == *__iter)
3854 return __first1;
3855 return __last1;
3856 }
3857
3858 /**
3859 * @brief Find element from a set in a sequence using a predicate.
3860 * @ingroup non_mutating_algorithms
3861 * @param __first1 Start of range to search.
3862 * @param __last1 End of range to search.
3863 * @param __first2 Start of match candidates.
3864 * @param __last2 End of match candidates.
3865 * @param __comp Predicate to use.
3866 * @return The first iterator @c i in the range
3867 * @p [__first1,__last1) such that @c comp(*i, @p *(i2)) is true
3868 * and i2 is an iterator in [__first2,__last2), or @p __last1 if no
3869 * such iterator exists.
3870 *
3871
3872 * Searches the range @p [__first1,__last1) for an element that is
3873 * equal to some element in the range [__first2,__last2). If
3874 * found, returns an iterator in the range [__first1,__last1),
3875 * otherwise returns @p __last1.
3876 */
3877 template<typename _InputIterator, typename _ForwardIterator,
3878 typename _BinaryPredicate>
3879 _InputIterator
3880 find_first_of(_InputIterator __first1, _InputIterator __last1,
3881 _ForwardIterator __first2, _ForwardIterator __last2,
3882 _BinaryPredicate __comp)
3883 {
3884 // concept requirements
3885 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3886 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3887 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
3888 typename iterator_traits<_InputIterator>::value_type,
3889 typename iterator_traits<_ForwardIterator>::value_type>)
3890 __glibcxx_requires_valid_range(__first1, __last1);
3891 __glibcxx_requires_valid_range(__first2, __last2);
3892
3893 for (; __first1 != __last1; ++__first1)
3894 for (_ForwardIterator __iter = __first2; __iter != __last2; ++__iter)
3895 if (__comp(*__first1, *__iter))
3896 return __first1;
3897 return __last1;
3898 }
3899
3900 /**
3901 * @brief Find two adjacent values in a sequence that are equal.
3902 * @ingroup non_mutating_algorithms
3903 * @param __first A forward iterator.
3904 * @param __last A forward iterator.
3905 * @return The first iterator @c i such that @c i and @c i+1 are both
3906 * valid iterators in @p [__first,__last) and such that @c *i == @c *(i+1),
3907 * or @p __last if no such iterator exists.
3908 */
3909 template<typename _ForwardIterator>
3910 inline _ForwardIterator
3911 adjacent_find(_ForwardIterator __first, _ForwardIterator __last)
3912 {
3913 // concept requirements
3914 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3915 __glibcxx_function_requires(_EqualityComparableConcept<
3916 typename iterator_traits<_ForwardIterator>::value_type>)
3917 __glibcxx_requires_valid_range(__first, __last);
3918
3919 return std::__adjacent_find(__first, __last,
3920 __gnu_cxx::__ops::__iter_equal_to_iter());
3921 }
3922
3923 /**
3924 * @brief Find two adjacent values in a sequence using a predicate.
3925 * @ingroup non_mutating_algorithms
3926 * @param __first A forward iterator.
3927 * @param __last A forward iterator.
3928 * @param __binary_pred A binary predicate.
3929 * @return The first iterator @c i such that @c i and @c i+1 are both
3930 * valid iterators in @p [__first,__last) and such that
3931 * @p __binary_pred(*i,*(i+1)) is true, or @p __last if no such iterator
3932 * exists.
3933 */
3934 template<typename _ForwardIterator, typename _BinaryPredicate>
3935 inline _ForwardIterator
3936 adjacent_find(_ForwardIterator __first, _ForwardIterator __last,
3937 _BinaryPredicate __binary_pred)
3938 {
3939 // concept requirements
3940 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
3941 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
3942 typename iterator_traits<_ForwardIterator>::value_type,
3943 typename iterator_traits<_ForwardIterator>::value_type>)
3944 __glibcxx_requires_valid_range(__first, __last);
3945
3946 return std::__adjacent_find(__first, __last,
3947 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred));
3948 }
3949
3950 /**
3951 * @brief Count the number of copies of a value in a sequence.
3952 * @ingroup non_mutating_algorithms
3953 * @param __first An input iterator.
3954 * @param __last An input iterator.
3955 * @param __value The value to be counted.
3956 * @return The number of iterators @c i in the range @p [__first,__last)
3957 * for which @c *i == @p __value
3958 */
3959 template<typename _InputIterator, typename _Tp>
3960 inline typename iterator_traits<_InputIterator>::difference_type
3961 count(_InputIterator __first, _InputIterator __last, const _Tp& __value)
3962 {
3963 // concept requirements
3964 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3965 __glibcxx_function_requires(_EqualOpConcept<
3966 typename iterator_traits<_InputIterator>::value_type, _Tp>)
3967 __glibcxx_requires_valid_range(__first, __last);
3968
3969 return std::__count_if(__first, __last,
3970 __gnu_cxx::__ops::__iter_equals_val(__value));
3971 }
3972
3973 /**
3974 * @brief Count the elements of a sequence for which a predicate is true.
3975 * @ingroup non_mutating_algorithms
3976 * @param __first An input iterator.
3977 * @param __last An input iterator.
3978 * @param __pred A predicate.
3979 * @return The number of iterators @c i in the range @p [__first,__last)
3980 * for which @p __pred(*i) is true.
3981 */
3982 template<typename _InputIterator, typename _Predicate>
3983 inline typename iterator_traits<_InputIterator>::difference_type
3984 count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred)
3985 {
3986 // concept requirements
3987 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
3988 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
3989 typename iterator_traits<_InputIterator>::value_type>)
3990 __glibcxx_requires_valid_range(__first, __last);
3991
3992 return std::__count_if(__first, __last,
3993 __gnu_cxx::__ops::__pred_iter(__pred));
3994 }
3995
3996 /**
3997 * @brief Search a sequence for a matching sub-sequence.
3998 * @ingroup non_mutating_algorithms
3999 * @param __first1 A forward iterator.
4000 * @param __last1 A forward iterator.
4001 * @param __first2 A forward iterator.
4002 * @param __last2 A forward iterator.
4003 * @return The first iterator @c i in the range @p
4004 * [__first1,__last1-(__last2-__first2)) such that @c *(i+N) == @p
4005 * *(__first2+N) for each @c N in the range @p
4006 * [0,__last2-__first2), or @p __last1 if no such iterator exists.
4007 *
4008 * Searches the range @p [__first1,__last1) for a sub-sequence that
4009 * compares equal value-by-value with the sequence given by @p
4010 * [__first2,__last2) and returns an iterator to the first element
4011 * of the sub-sequence, or @p __last1 if the sub-sequence is not
4012 * found.
4013 *
4014 * Because the sub-sequence must lie completely within the range @p
4015 * [__first1,__last1) it must start at a position less than @p
4016 * __last1-(__last2-__first2) where @p __last2-__first2 is the
4017 * length of the sub-sequence.
4018 *
4019 * This means that the returned iterator @c i will be in the range
4020 * @p [__first1,__last1-(__last2-__first2))
4021 */
4022 template<typename _ForwardIterator1, typename _ForwardIterator2>
4023 inline _ForwardIterator1
4024 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
4025 _ForwardIterator2 __first2, _ForwardIterator2 __last2)
4026 {
4027 // concept requirements
4028 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
4029 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
4030 __glibcxx_function_requires(_EqualOpConcept<
4031 typename iterator_traits<_ForwardIterator1>::value_type,
4032 typename iterator_traits<_ForwardIterator2>::value_type>)
4033 __glibcxx_requires_valid_range(__first1, __last1);
4034 __glibcxx_requires_valid_range(__first2, __last2);
4035
4036 return std::__search(__first1, __last1, __first2, __last2,
4037 __gnu_cxx::__ops::__iter_equal_to_iter());
4038 }
4039
4040 /**
4041 * @brief Search a sequence for a matching sub-sequence using a predicate.
4042 * @ingroup non_mutating_algorithms
4043 * @param __first1 A forward iterator.
4044 * @param __last1 A forward iterator.
4045 * @param __first2 A forward iterator.
4046 * @param __last2 A forward iterator.
4047 * @param __predicate A binary predicate.
4048 * @return The first iterator @c i in the range
4049 * @p [__first1,__last1-(__last2-__first2)) such that
4050 * @p __predicate(*(i+N),*(__first2+N)) is true for each @c N in the range
4051 * @p [0,__last2-__first2), or @p __last1 if no such iterator exists.
4052 *
4053 * Searches the range @p [__first1,__last1) for a sub-sequence that
4054 * compares equal value-by-value with the sequence given by @p
4055 * [__first2,__last2), using @p __predicate to determine equality,
4056 * and returns an iterator to the first element of the
4057 * sub-sequence, or @p __last1 if no such iterator exists.
4058 *
4059 * @see search(_ForwardIter1, _ForwardIter1, _ForwardIter2, _ForwardIter2)
4060 */
4061 template<typename _ForwardIterator1, typename _ForwardIterator2,
4062 typename _BinaryPredicate>
4063 inline _ForwardIterator1
4064 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
4065 _ForwardIterator2 __first2, _ForwardIterator2 __last2,
4066 _BinaryPredicate __predicate)
4067 {
4068 // concept requirements
4069 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator1>)
4070 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator2>)
4071 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4072 typename iterator_traits<_ForwardIterator1>::value_type,
4073 typename iterator_traits<_ForwardIterator2>::value_type>)
4074 __glibcxx_requires_valid_range(__first1, __last1);
4075 __glibcxx_requires_valid_range(__first2, __last2);
4076
4077 return std::__search(__first1, __last1, __first2, __last2,
4078 __gnu_cxx::__ops::__iter_comp_iter(__predicate));
4079 }
4080
4081 /**
4082 * @brief Search a sequence for a number of consecutive values.
4083 * @ingroup non_mutating_algorithms
4084 * @param __first A forward iterator.
4085 * @param __last A forward iterator.
4086 * @param __count The number of consecutive values.
4087 * @param __val The value to find.
4088 * @return The first iterator @c i in the range @p
4089 * [__first,__last-__count) such that @c *(i+N) == @p __val for
4090 * each @c N in the range @p [0,__count), or @p __last if no such
4091 * iterator exists.
4092 *
4093 * Searches the range @p [__first,__last) for @p count consecutive elements
4094 * equal to @p __val.
4095 */
4096 template<typename _ForwardIterator, typename _Integer, typename _Tp>
4097 inline _ForwardIterator
4098 search_n(_ForwardIterator __first, _ForwardIterator __last,
4099 _Integer __count, const _Tp& __val)
4100 {
4101 // concept requirements
4102 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4103 __glibcxx_function_requires(_EqualOpConcept<
4104 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4105 __glibcxx_requires_valid_range(__first, __last);
4106
4107 return std::__search_n(__first, __last, __count,
4108 __gnu_cxx::__ops::__iter_equals_val(__val));
4109 }
4110
4111
4112 /**
4113 * @brief Search a sequence for a number of consecutive values using a
4114 * predicate.
4115 * @ingroup non_mutating_algorithms
4116 * @param __first A forward iterator.
4117 * @param __last A forward iterator.
4118 * @param __count The number of consecutive values.
4119 * @param __val The value to find.
4120 * @param __binary_pred A binary predicate.
4121 * @return The first iterator @c i in the range @p
4122 * [__first,__last-__count) such that @p
4123 * __binary_pred(*(i+N),__val) is true for each @c N in the range
4124 * @p [0,__count), or @p __last if no such iterator exists.
4125 *
4126 * Searches the range @p [__first,__last) for @p __count
4127 * consecutive elements for which the predicate returns true.
4128 */
4129 template<typename _ForwardIterator, typename _Integer, typename _Tp,
4130 typename _BinaryPredicate>
4131 inline _ForwardIterator
4132 search_n(_ForwardIterator __first, _ForwardIterator __last,
4133 _Integer __count, const _Tp& __val,
4134 _BinaryPredicate __binary_pred)
4135 {
4136 // concept requirements
4137 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4138 __glibcxx_function_requires(_BinaryPredicateConcept<_BinaryPredicate,
4139 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4140 __glibcxx_requires_valid_range(__first, __last);
4141
4142 return std::__search_n(__first, __last, __count,
4143 __gnu_cxx::__ops::__iter_comp_val(__binary_pred, __val));
4144 }
4145
4146
4147 /**
4148 * @brief Perform an operation on a sequence.
4149 * @ingroup mutating_algorithms
4150 * @param __first An input iterator.
4151 * @param __last An input iterator.
4152 * @param __result An output iterator.
4153 * @param __unary_op A unary operator.
4154 * @return An output iterator equal to @p __result+(__last-__first).
4155 *
4156 * Applies the operator to each element in the input range and assigns
4157 * the results to successive elements of the output sequence.
4158 * Evaluates @p *(__result+N)=unary_op(*(__first+N)) for each @c N in the
4159 * range @p [0,__last-__first).
4160 *
4161 * @p unary_op must not alter its argument.
4162 */
4163 template<typename _InputIterator, typename _OutputIterator,
4164 typename _UnaryOperation>
4165 _OutputIterator
4166 transform(_InputIterator __first, _InputIterator __last,
4167 _OutputIterator __result, _UnaryOperation __unary_op)
4168 {
4169 // concept requirements
4170 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4171 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4172 // "the type returned by a _UnaryOperation"
4173 __typeof__(__unary_op(*__first))>)
4174 __glibcxx_requires_valid_range(__first, __last);
4175
4176 for (; __first != __last; ++__first, (void)++__result)
4177 *__result = __unary_op(*__first);
4178 return __result;
4179 }
4180
4181 /**
4182 * @brief Perform an operation on corresponding elements of two sequences.
4183 * @ingroup mutating_algorithms
4184 * @param __first1 An input iterator.
4185 * @param __last1 An input iterator.
4186 * @param __first2 An input iterator.
4187 * @param __result An output iterator.
4188 * @param __binary_op A binary operator.
4189 * @return An output iterator equal to @p result+(last-first).
4190 *
4191 * Applies the operator to the corresponding elements in the two
4192 * input ranges and assigns the results to successive elements of the
4193 * output sequence.
4194 * Evaluates @p
4195 * *(__result+N)=__binary_op(*(__first1+N),*(__first2+N)) for each
4196 * @c N in the range @p [0,__last1-__first1).
4197 *
4198 * @p binary_op must not alter either of its arguments.
4199 */
4200 template<typename _InputIterator1, typename _InputIterator2,
4201 typename _OutputIterator, typename _BinaryOperation>
4202 _OutputIterator
4203 transform(_InputIterator1 __first1, _InputIterator1 __last1,
4204 _InputIterator2 __first2, _OutputIterator __result,
4205 _BinaryOperation __binary_op)
4206 {
4207 // concept requirements
4208 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
4209 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
4210 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4211 // "the type returned by a _BinaryOperation"
4212 __typeof__(__binary_op(*__first1,*__first2))>)
4213 __glibcxx_requires_valid_range(__first1, __last1);
4214
4215 for (; __first1 != __last1; ++__first1, (void)++__first2, ++__result)
4216 *__result = __binary_op(*__first1, *__first2);
4217 return __result;
4218 }
4219
4220 /**
4221 * @brief Replace each occurrence of one value in a sequence with another
4222 * value.
4223 * @ingroup mutating_algorithms
4224 * @param __first A forward iterator.
4225 * @param __last A forward iterator.
4226 * @param __old_value The value to be replaced.
4227 * @param __new_value The replacement value.
4228 * @return replace() returns no value.
4229 *
4230 * For each iterator @c i in the range @p [__first,__last) if @c *i ==
4231 * @p __old_value then the assignment @c *i = @p __new_value is performed.
4232 */
4233 template<typename _ForwardIterator, typename _Tp>
4234 void
4235 replace(_ForwardIterator __first, _ForwardIterator __last,
4236 const _Tp& __old_value, const _Tp& __new_value)
4237 {
4238 // concept requirements
4239 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4240 _ForwardIterator>)
4241 __glibcxx_function_requires(_EqualOpConcept<
4242 typename iterator_traits<_ForwardIterator>::value_type, _Tp>)
4243 __glibcxx_function_requires(_ConvertibleConcept<_Tp,
4244 typename iterator_traits<_ForwardIterator>::value_type>)
4245 __glibcxx_requires_valid_range(__first, __last);
4246
4247 for (; __first != __last; ++__first)
4248 if (*__first == __old_value)
4249 *__first = __new_value;
4250 }
4251
4252 /**
4253 * @brief Replace each value in a sequence for which a predicate returns
4254 * true with another value.
4255 * @ingroup mutating_algorithms
4256 * @param __first A forward iterator.
4257 * @param __last A forward iterator.
4258 * @param __pred A predicate.
4259 * @param __new_value The replacement value.
4260 * @return replace_if() returns no value.
4261 *
4262 * For each iterator @c i in the range @p [__first,__last) if @p __pred(*i)
4263 * is true then the assignment @c *i = @p __new_value is performed.
4264 */
4265 template<typename _ForwardIterator, typename _Predicate, typename _Tp>
4266 void
4267 replace_if(_ForwardIterator __first, _ForwardIterator __last,
4268 _Predicate __pred, const _Tp& __new_value)
4269 {
4270 // concept requirements
4271 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4272 _ForwardIterator>)
4273 __glibcxx_function_requires(_ConvertibleConcept<_Tp,
4274 typename iterator_traits<_ForwardIterator>::value_type>)
4275 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
4276 typename iterator_traits<_ForwardIterator>::value_type>)
4277 __glibcxx_requires_valid_range(__first, __last);
4278
4279 for (; __first != __last; ++__first)
4280 if (__pred(*__first))
4281 *__first = __new_value;
4282 }
4283
4284 /**
4285 * @brief Assign the result of a function object to each value in a
4286 * sequence.
4287 * @ingroup mutating_algorithms
4288 * @param __first A forward iterator.
4289 * @param __last A forward iterator.
4290 * @param __gen A function object taking no arguments and returning
4291 * std::iterator_traits<_ForwardIterator>::value_type
4292 * @return generate() returns no value.
4293 *
4294 * Performs the assignment @c *i = @p __gen() for each @c i in the range
4295 * @p [__first,__last).
4296 */
4297 template<typename _ForwardIterator, typename _Generator>
4298 void
4299 generate(_ForwardIterator __first, _ForwardIterator __last,
4300 _Generator __gen)
4301 {
4302 // concept requirements
4303 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
4304 __glibcxx_function_requires(_GeneratorConcept<_Generator,
4305 typename iterator_traits<_ForwardIterator>::value_type>)
4306 __glibcxx_requires_valid_range(__first, __last);
4307
4308 for (; __first != __last; ++__first)
4309 *__first = __gen();
4310 }
4311
4312 /**
4313 * @brief Assign the result of a function object to each value in a
4314 * sequence.
4315 * @ingroup mutating_algorithms
4316 * @param __first A forward iterator.
4317 * @param __n The length of the sequence.
4318 * @param __gen A function object taking no arguments and returning
4319 * std::iterator_traits<_ForwardIterator>::value_type
4320 * @return The end of the sequence, @p __first+__n
4321 *
4322 * Performs the assignment @c *i = @p __gen() for each @c i in the range
4323 * @p [__first,__first+__n).
4324 *
4325 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4326 * DR 865. More algorithms that throw away information
4327 */
4328 template<typename _OutputIterator, typename _Size, typename _Generator>
4329 _OutputIterator
4330 generate_n(_OutputIterator __first, _Size __n, _Generator __gen)
4331 {
4332 // concept requirements
4333 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4334 // "the type returned by a _Generator"
4335 __typeof__(__gen())>)
4336
4337 for (__decltype(__n + 0) __niter = __n;
4338 __niter > 0; --__niter, ++__first)
4339 *__first = __gen();
4340 return __first;
4341 }
4342
4343 /**
4344 * @brief Copy a sequence, removing consecutive duplicate values.
4345 * @ingroup mutating_algorithms
4346 * @param __first An input iterator.
4347 * @param __last An input iterator.
4348 * @param __result An output iterator.
4349 * @return An iterator designating the end of the resulting sequence.
4350 *
4351 * Copies each element in the range @p [__first,__last) to the range
4352 * beginning at @p __result, except that only the first element is copied
4353 * from groups of consecutive elements that compare equal.
4354 * unique_copy() is stable, so the relative order of elements that are
4355 * copied is unchanged.
4356 *
4357 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4358 * DR 241. Does unique_copy() require CopyConstructible and Assignable?
4359 *
4360 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4361 * DR 538. 241 again: Does unique_copy() require CopyConstructible and
4362 * Assignable?
4363 */
4364 template<typename _InputIterator, typename _OutputIterator>
4365 inline _OutputIterator
4366 unique_copy(_InputIterator __first, _InputIterator __last,
4367 _OutputIterator __result)
4368 {
4369 // concept requirements
4370 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4371 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4372 typename iterator_traits<_InputIterator>::value_type>)
4373 __glibcxx_function_requires(_EqualityComparableConcept<
4374 typename iterator_traits<_InputIterator>::value_type>)
4375 __glibcxx_requires_valid_range(__first, __last);
4376
4377 if (__first == __last)
4378 return __result;
4379 return std::__unique_copy(__first, __last, __result,
4380 __gnu_cxx::__ops::__iter_equal_to_iter(),
4381 std::__iterator_category(__first),
4382 std::__iterator_category(__result));
4383 }
4384
4385 /**
4386 * @brief Copy a sequence, removing consecutive values using a predicate.
4387 * @ingroup mutating_algorithms
4388 * @param __first An input iterator.
4389 * @param __last An input iterator.
4390 * @param __result An output iterator.
4391 * @param __binary_pred A binary predicate.
4392 * @return An iterator designating the end of the resulting sequence.
4393 *
4394 * Copies each element in the range @p [__first,__last) to the range
4395 * beginning at @p __result, except that only the first element is copied
4396 * from groups of consecutive elements for which @p __binary_pred returns
4397 * true.
4398 * unique_copy() is stable, so the relative order of elements that are
4399 * copied is unchanged.
4400 *
4401 * _GLIBCXX_RESOLVE_LIB_DEFECTS
4402 * DR 241. Does unique_copy() require CopyConstructible and Assignable?
4403 */
4404 template<typename _InputIterator, typename _OutputIterator,
4405 typename _BinaryPredicate>
4406 inline _OutputIterator
4407 unique_copy(_InputIterator __first, _InputIterator __last,
4408 _OutputIterator __result,
4409 _BinaryPredicate __binary_pred)
4410 {
4411 // concept requirements -- predicates checked later
4412 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator>)
4413 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4414 typename iterator_traits<_InputIterator>::value_type>)
4415 __glibcxx_requires_valid_range(__first, __last);
4416
4417 if (__first == __last)
4418 return __result;
4419 return std::__unique_copy(__first, __last, __result,
4420 __gnu_cxx::__ops::__iter_comp_iter(__binary_pred),
4421 std::__iterator_category(__first),
4422 std::__iterator_category(__result));
4423 }
4424
4425#if _GLIBCXX_HOSTED1
4426 /**
4427 * @brief Randomly shuffle the elements of a sequence.
4428 * @ingroup mutating_algorithms
4429 * @param __first A forward iterator.
4430 * @param __last A forward iterator.
4431 * @return Nothing.
4432 *
4433 * Reorder the elements in the range @p [__first,__last) using a random
4434 * distribution, so that every possible ordering of the sequence is
4435 * equally likely.
4436 */
4437 template<typename _RandomAccessIterator>
4438 inline void
4439 random_shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last)
4440 {
4441 // concept requirements
4442 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4443 _RandomAccessIterator>)
4444 __glibcxx_requires_valid_range(__first, __last);
4445
4446 if (__first != __last)
4447 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
4448 {
4449 // XXX rand() % N is not uniformly distributed
4450 _RandomAccessIterator __j = __first
4451 + std::rand() % ((__i - __first) + 1);
4452 if (__i != __j)
4453 std::iter_swap(__i, __j);
4454 }
4455 }
4456#endif
4457
4458 /**
4459 * @brief Shuffle the elements of a sequence using a random number
4460 * generator.
4461 * @ingroup mutating_algorithms
4462 * @param __first A forward iterator.
4463 * @param __last A forward iterator.
4464 * @param __rand The RNG functor or function.
4465 * @return Nothing.
4466 *
4467 * Reorders the elements in the range @p [__first,__last) using @p __rand to
4468 * provide a random distribution. Calling @p __rand(N) for a positive
4469 * integer @p N should return a randomly chosen integer from the
4470 * range [0,N).
4471 */
4472 template<typename _RandomAccessIterator, typename _RandomNumberGenerator>
4473 void
4474 random_shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last,
4475#if __cplusplus201402L >= 201103L
4476 _RandomNumberGenerator&& __rand)
4477#else
4478 _RandomNumberGenerator& __rand)
4479#endif
4480 {
4481 // concept requirements
4482 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4483 _RandomAccessIterator>)
4484 __glibcxx_requires_valid_range(__first, __last);
4485
4486 if (__first == __last)
4487 return;
4488 for (_RandomAccessIterator __i = __first + 1; __i != __last; ++__i)
4489 {
4490 _RandomAccessIterator __j = __first + __rand((__i - __first) + 1);
4491 if (__i != __j)
4492 std::iter_swap(__i, __j);
4493 }
4494 }
4495
4496
4497 /**
4498 * @brief Move elements for which a predicate is true to the beginning
4499 * of a sequence.
4500 * @ingroup mutating_algorithms
4501 * @param __first A forward iterator.
4502 * @param __last A forward iterator.
4503 * @param __pred A predicate functor.
4504 * @return An iterator @p middle such that @p __pred(i) is true for each
4505 * iterator @p i in the range @p [__first,middle) and false for each @p i
4506 * in the range @p [middle,__last).
4507 *
4508 * @p __pred must not modify its operand. @p partition() does not preserve
4509 * the relative ordering of elements in each group, use
4510 * @p stable_partition() if this is needed.
4511 */
4512 template<typename _ForwardIterator, typename _Predicate>
4513 inline _ForwardIterator
4514 partition(_ForwardIterator __first, _ForwardIterator __last,
4515 _Predicate __pred)
4516 {
4517 // concept requirements
4518 __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
4519 _ForwardIterator>)
4520 __glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
4521 typename iterator_traits<_ForwardIterator>::value_type>)
4522 __glibcxx_requires_valid_range(__first, __last);
4523
4524 return std::__partition(__first, __last, __pred,
4525 std::__iterator_category(__first));
4526 }
4527
4528
4529 /**
4530 * @brief Sort the smallest elements of a sequence.
4531 * @ingroup sorting_algorithms
4532 * @param __first An iterator.
4533 * @param __middle Another iterator.
4534 * @param __last Another iterator.
4535 * @return Nothing.
4536 *
4537 * Sorts the smallest @p (__middle-__first) elements in the range
4538 * @p [first,last) and moves them to the range @p [__first,__middle). The
4539 * order of the remaining elements in the range @p [__middle,__last) is
4540 * undefined.
4541 * After the sort if @e i and @e j are iterators in the range
4542 * @p [__first,__middle) such that i precedes j and @e k is an iterator in
4543 * the range @p [__middle,__last) then *j<*i and *k<*i are both false.
4544 */
4545 template<typename _RandomAccessIterator>
4546 inline void
4547 partial_sort(_RandomAccessIterator __first,
4548 _RandomAccessIterator __middle,
4549 _RandomAccessIterator __last)
4550 {
4551 // concept requirements
4552 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4553 _RandomAccessIterator>)
4554 __glibcxx_function_requires(_LessThanComparableConcept<
4555 typename iterator_traits<_RandomAccessIterator>::value_type>)
4556 __glibcxx_requires_valid_range(__first, __middle);
4557 __glibcxx_requires_valid_range(__middle, __last);
4558 __glibcxx_requires_irreflexive(__first, __last);
4559
4560 std::__partial_sort(__first, __middle, __last,
4561 __gnu_cxx::__ops::__iter_less_iter());
4562 }
4563
4564 /**
4565 * @brief Sort the smallest elements of a sequence using a predicate
4566 * for comparison.
4567 * @ingroup sorting_algorithms
4568 * @param __first An iterator.
4569 * @param __middle Another iterator.
4570 * @param __last Another iterator.
4571 * @param __comp A comparison functor.
4572 * @return Nothing.
4573 *
4574 * Sorts the smallest @p (__middle-__first) elements in the range
4575 * @p [__first,__last) and moves them to the range @p [__first,__middle). The
4576 * order of the remaining elements in the range @p [__middle,__last) is
4577 * undefined.
4578 * After the sort if @e i and @e j are iterators in the range
4579 * @p [__first,__middle) such that i precedes j and @e k is an iterator in
4580 * the range @p [__middle,__last) then @p *__comp(j,*i) and @p __comp(*k,*i)
4581 * are both false.
4582 */
4583 template<typename _RandomAccessIterator, typename _Compare>
4584 inline void
4585 partial_sort(_RandomAccessIterator __first,
4586 _RandomAccessIterator __middle,
4587 _RandomAccessIterator __last,
4588 _Compare __comp)
4589 {
4590 // concept requirements
4591 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4592 _RandomAccessIterator>)
4593 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4594 typename iterator_traits<_RandomAccessIterator>::value_type,
4595 typename iterator_traits<_RandomAccessIterator>::value_type>)
4596 __glibcxx_requires_valid_range(__first, __middle);
4597 __glibcxx_requires_valid_range(__middle, __last);
4598 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4599
4600 std::__partial_sort(__first, __middle, __last,
4601 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4602 }
4603
4604 /**
4605 * @brief Sort a sequence just enough to find a particular position.
4606 * @ingroup sorting_algorithms
4607 * @param __first An iterator.
4608 * @param __nth Another iterator.
4609 * @param __last Another iterator.
4610 * @return Nothing.
4611 *
4612 * Rearranges the elements in the range @p [__first,__last) so that @p *__nth
4613 * is the same element that would have been in that position had the
4614 * whole sequence been sorted. The elements either side of @p *__nth are
4615 * not completely sorted, but for any iterator @e i in the range
4616 * @p [__first,__nth) and any iterator @e j in the range @p [__nth,__last) it
4617 * holds that *j < *i is false.
4618 */
4619 template<typename _RandomAccessIterator>
4620 inline void
4621 nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth,
4622 _RandomAccessIterator __last)
4623 {
4624 // concept requirements
4625 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4626 _RandomAccessIterator>)
4627 __glibcxx_function_requires(_LessThanComparableConcept<
4628 typename iterator_traits<_RandomAccessIterator>::value_type>)
4629 __glibcxx_requires_valid_range(__first, __nth);
4630 __glibcxx_requires_valid_range(__nth, __last);
4631 __glibcxx_requires_irreflexive(__first, __last);
4632
4633 if (__first == __last || __nth == __last)
4634 return;
4635
4636 std::__introselect(__first, __nth, __last,
4637 std::__lg(__last - __first) * 2,
4638 __gnu_cxx::__ops::__iter_less_iter());
4639 }
4640
4641 /**
4642 * @brief Sort a sequence just enough to find a particular position
4643 * using a predicate for comparison.
4644 * @ingroup sorting_algorithms
4645 * @param __first An iterator.
4646 * @param __nth Another iterator.
4647 * @param __last Another iterator.
4648 * @param __comp A comparison functor.
4649 * @return Nothing.
4650 *
4651 * Rearranges the elements in the range @p [__first,__last) so that @p *__nth
4652 * is the same element that would have been in that position had the
4653 * whole sequence been sorted. The elements either side of @p *__nth are
4654 * not completely sorted, but for any iterator @e i in the range
4655 * @p [__first,__nth) and any iterator @e j in the range @p [__nth,__last) it
4656 * holds that @p __comp(*j,*i) is false.
4657 */
4658 template<typename _RandomAccessIterator, typename _Compare>
4659 inline void
4660 nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth,
4661 _RandomAccessIterator __last, _Compare __comp)
4662 {
4663 // concept requirements
4664 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4665 _RandomAccessIterator>)
4666 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4667 typename iterator_traits<_RandomAccessIterator>::value_type,
4668 typename iterator_traits<_RandomAccessIterator>::value_type>)
4669 __glibcxx_requires_valid_range(__first, __nth);
4670 __glibcxx_requires_valid_range(__nth, __last);
4671 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4672
4673 if (__first == __last || __nth == __last)
4674 return;
4675
4676 std::__introselect(__first, __nth, __last,
4677 std::__lg(__last - __first) * 2,
4678 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4679 }
4680
4681 /**
4682 * @brief Sort the elements of a sequence.
4683 * @ingroup sorting_algorithms
4684 * @param __first An iterator.
4685 * @param __last Another iterator.
4686 * @return Nothing.
4687 *
4688 * Sorts the elements in the range @p [__first,__last) in ascending order,
4689 * such that for each iterator @e i in the range @p [__first,__last-1),
4690 * *(i+1)<*i is false.
4691 *
4692 * The relative ordering of equivalent elements is not preserved, use
4693 * @p stable_sort() if this is needed.
4694 */
4695 template<typename _RandomAccessIterator>
4696 inline void
4697 sort(_RandomAccessIterator __first, _RandomAccessIterator __last)
4698 {
4699 // concept requirements
4700 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4701 _RandomAccessIterator>)
4702 __glibcxx_function_requires(_LessThanComparableConcept<
4703 typename iterator_traits<_RandomAccessIterator>::value_type>)
4704 __glibcxx_requires_valid_range(__first, __last);
4705 __glibcxx_requires_irreflexive(__first, __last);
4706
4707 std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
4708 }
4709
4710 /**
4711 * @brief Sort the elements of a sequence using a predicate for comparison.
4712 * @ingroup sorting_algorithms
4713 * @param __first An iterator.
4714 * @param __last Another iterator.
4715 * @param __comp A comparison functor.
4716 * @return Nothing.
4717 *
4718 * Sorts the elements in the range @p [__first,__last) in ascending order,
4719 * such that @p __comp(*(i+1),*i) is false for every iterator @e i in the
4720 * range @p [__first,__last-1).
4721 *
4722 * The relative ordering of equivalent elements is not preserved, use
4723 * @p stable_sort() if this is needed.
4724 */
4725 template<typename _RandomAccessIterator, typename _Compare>
4726 inline void
4727 sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
4728 _Compare __comp)
4729 {
4730 // concept requirements
4731 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4732 _RandomAccessIterator>)
4733 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4734 typename iterator_traits<_RandomAccessIterator>::value_type,
4735 typename iterator_traits<_RandomAccessIterator>::value_type>)
4736 __glibcxx_requires_valid_range(__first, __last);
4737 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4738
4739 std::__sort(__first, __last, __gnu_cxx::__ops::__iter_comp_iter(__comp));
4740 }
4741
4742 template<typename _InputIterator1, typename _InputIterator2,
4743 typename _OutputIterator, typename _Compare>
4744 _OutputIterator
4745 __merge(_InputIterator1 __first1, _InputIterator1 __last1,
4746 _InputIterator2 __first2, _InputIterator2 __last2,
4747 _OutputIterator __result, _Compare __comp)
4748 {
4749 while (__first1 != __last1 && __first2 != __last2)
4750 {
4751 if (__comp(__first2, __first1))
4752 {
4753 *__result = *__first2;
4754 ++__first2;
4755 }
4756 else
4757 {
4758 *__result = *__first1;
4759 ++__first1;
4760 }
4761 ++__result;
4762 }
4763 return std::copy(__first2, __last2,
4764 std::copy(__first1, __last1, __result));
4765 }
4766
4767 /**
4768 * @brief Merges two sorted ranges.
4769 * @ingroup sorting_algorithms
4770 * @param __first1 An iterator.
4771 * @param __first2 Another iterator.
4772 * @param __last1 Another iterator.
4773 * @param __last2 Another iterator.
4774 * @param __result An iterator pointing to the end of the merged range.
4775 * @return An iterator pointing to the first element <em>not less
4776 * than</em> @e val.
4777 *
4778 * Merges the ranges @p [__first1,__last1) and @p [__first2,__last2) into
4779 * the sorted range @p [__result, __result + (__last1-__first1) +
4780 * (__last2-__first2)). Both input ranges must be sorted, and the
4781 * output range must not overlap with either of the input ranges.
4782 * The sort is @e stable, that is, for equivalent elements in the
4783 * two ranges, elements from the first range will always come
4784 * before elements from the second.
4785 */
4786 template<typename _InputIterator1, typename _InputIterator2,
4787 typename _OutputIterator>
4788 inline _OutputIterator
4789 merge(_InputIterator1 __first1, _InputIterator1 __last1,
4790 _InputIterator2 __first2, _InputIterator2 __last2,
4791 _OutputIterator __result)
4792 {
4793 // concept requirements
4794 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
4795 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
4796 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4797 typename iterator_traits<_InputIterator1>::value_type>)
4798 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4799 typename iterator_traits<_InputIterator2>::value_type>)
4800 __glibcxx_function_requires(_LessThanOpConcept<
4801 typename iterator_traits<_InputIterator2>::value_type,
4802 typename iterator_traits<_InputIterator1>::value_type>)
4803 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
4804 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
4805 __glibcxx_requires_irreflexive2(__first1, __last1);
4806 __glibcxx_requires_irreflexive2(__first2, __last2);
4807
4808 return _GLIBCXX_STD_Astd::__merge(__first1, __last1,
4809 __first2, __last2, __result,
4810 __gnu_cxx::__ops::__iter_less_iter());
4811 }
4812
4813 /**
4814 * @brief Merges two sorted ranges.
4815 * @ingroup sorting_algorithms
4816 * @param __first1 An iterator.
4817 * @param __first2 Another iterator.
4818 * @param __last1 Another iterator.
4819 * @param __last2 Another iterator.
4820 * @param __result An iterator pointing to the end of the merged range.
4821 * @param __comp A functor to use for comparisons.
4822 * @return An iterator pointing to the first element "not less
4823 * than" @e val.
4824 *
4825 * Merges the ranges @p [__first1,__last1) and @p [__first2,__last2) into
4826 * the sorted range @p [__result, __result + (__last1-__first1) +
4827 * (__last2-__first2)). Both input ranges must be sorted, and the
4828 * output range must not overlap with either of the input ranges.
4829 * The sort is @e stable, that is, for equivalent elements in the
4830 * two ranges, elements from the first range will always come
4831 * before elements from the second.
4832 *
4833 * The comparison function should have the same effects on ordering as
4834 * the function used for the initial sort.
4835 */
4836 template<typename _InputIterator1, typename _InputIterator2,
4837 typename _OutputIterator, typename _Compare>
4838 inline _OutputIterator
4839 merge(_InputIterator1 __first1, _InputIterator1 __last1,
4840 _InputIterator2 __first2, _InputIterator2 __last2,
4841 _OutputIterator __result, _Compare __comp)
4842 {
4843 // concept requirements
4844 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
4845 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
4846 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4847 typename iterator_traits<_InputIterator1>::value_type>)
4848 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
4849 typename iterator_traits<_InputIterator2>::value_type>)
4850 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4851 typename iterator_traits<_InputIterator2>::value_type,
4852 typename iterator_traits<_InputIterator1>::value_type>)
4853 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
4854 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
4855 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
4856 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
4857
4858 return _GLIBCXX_STD_Astd::__merge(__first1, __last1,
4859 __first2, __last2, __result,
4860 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4861 }
4862
4863 template<typename _RandomAccessIterator, typename _Compare>
4864 inline void
4865 __stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
4866 _Compare __comp)
4867 {
4868 typedef typename iterator_traits<_RandomAccessIterator>::value_type
4869 _ValueType;
4870 typedef typename iterator_traits<_RandomAccessIterator>::difference_type
4871 _DistanceType;
4872
4873 typedef _Temporary_buffer<_RandomAccessIterator, _ValueType> _TmpBuf;
4874 _TmpBuf __buf(__first, __last);
4875
4876 if (__buf.begin() == 0)
4877 std::__inplace_stable_sort(__first, __last, __comp);
4878 else
4879 std::__stable_sort_adaptive(__first, __last, __buf.begin(),
4880 _DistanceType(__buf.size()), __comp);
4881 }
4882
4883 /**
4884 * @brief Sort the elements of a sequence, preserving the relative order
4885 * of equivalent elements.
4886 * @ingroup sorting_algorithms
4887 * @param __first An iterator.
4888 * @param __last Another iterator.
4889 * @return Nothing.
4890 *
4891 * Sorts the elements in the range @p [__first,__last) in ascending order,
4892 * such that for each iterator @p i in the range @p [__first,__last-1),
4893 * @p *(i+1)<*i is false.
4894 *
4895 * The relative ordering of equivalent elements is preserved, so any two
4896 * elements @p x and @p y in the range @p [__first,__last) such that
4897 * @p x<y is false and @p y<x is false will have the same relative
4898 * ordering after calling @p stable_sort().
4899 */
4900 template<typename _RandomAccessIterator>
4901 inline void
4902 stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last)
4903 {
4904 // concept requirements
4905 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4906 _RandomAccessIterator>)
4907 __glibcxx_function_requires(_LessThanComparableConcept<
4908 typename iterator_traits<_RandomAccessIterator>::value_type>)
4909 __glibcxx_requires_valid_range(__first, __last);
4910 __glibcxx_requires_irreflexive(__first, __last);
4911
4912 _GLIBCXX_STD_Astd::__stable_sort(__first, __last,
4913 __gnu_cxx::__ops::__iter_less_iter());
4914 }
4915
4916 /**
4917 * @brief Sort the elements of a sequence using a predicate for comparison,
4918 * preserving the relative order of equivalent elements.
4919 * @ingroup sorting_algorithms
4920 * @param __first An iterator.
4921 * @param __last Another iterator.
4922 * @param __comp A comparison functor.
4923 * @return Nothing.
4924 *
4925 * Sorts the elements in the range @p [__first,__last) in ascending order,
4926 * such that for each iterator @p i in the range @p [__first,__last-1),
4927 * @p __comp(*(i+1),*i) is false.
4928 *
4929 * The relative ordering of equivalent elements is preserved, so any two
4930 * elements @p x and @p y in the range @p [__first,__last) such that
4931 * @p __comp(x,y) is false and @p __comp(y,x) is false will have the same
4932 * relative ordering after calling @p stable_sort().
4933 */
4934 template<typename _RandomAccessIterator, typename _Compare>
4935 inline void
4936 stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last,
4937 _Compare __comp)
4938 {
4939 // concept requirements
4940 __glibcxx_function_requires(_Mutable_RandomAccessIteratorConcept<
4941 _RandomAccessIterator>)
4942 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
4943 typename iterator_traits<_RandomAccessIterator>::value_type,
4944 typename iterator_traits<_RandomAccessIterator>::value_type>)
4945 __glibcxx_requires_valid_range(__first, __last);
4946 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
4947
4948 _GLIBCXX_STD_Astd::__stable_sort(__first, __last,
4949 __gnu_cxx::__ops::__iter_comp_iter(__comp));
4950 }
4951
4952 template<typename _InputIterator1, typename _InputIterator2,
4953 typename _OutputIterator,
4954 typename _Compare>
4955 _OutputIterator
4956 __set_union(_InputIterator1 __first1, _InputIterator1 __last1,
4957 _InputIterator2 __first2, _InputIterator2 __last2,
4958 _OutputIterator __result, _Compare __comp)
4959 {
4960 while (__first1 != __last1 && __first2 != __last2)
4961 {
4962 if (__comp(__first1, __first2))
4963 {
4964 *__result = *__first1;
4965 ++__first1;
4966 }
4967 else if (__comp(__first2, __first1))
4968 {
4969 *__result = *__first2;
4970 ++__first2;
4971 }
4972 else
4973 {
4974 *__result = *__first1;
4975 ++__first1;
4976 ++__first2;
4977 }
4978 ++__result;
4979 }
4980 return std::copy(__first2, __last2,
4981 std::copy(__first1, __last1, __result));
4982 }
4983
4984 /**
4985 * @brief Return the union of two sorted ranges.
4986 * @ingroup set_algorithms
4987 * @param __first1 Start of first range.
4988 * @param __last1 End of first range.
4989 * @param __first2 Start of second range.
4990 * @param __last2 End of second range.
4991 * @return End of the output range.
4992 * @ingroup set_algorithms
4993 *
4994 * This operation iterates over both ranges, copying elements present in
4995 * each range in order to the output range. Iterators increment for each
4996 * range. When the current element of one range is less than the other,
4997 * that element is copied and the iterator advanced. If an element is
4998 * contained in both ranges, the element from the first range is copied and
4999 * both ranges advance. The output range may not overlap either input
5000 * range.
5001 */
5002 template<typename _InputIterator1, typename _InputIterator2,
5003 typename _OutputIterator>
5004 inline _OutputIterator
5005 set_union(_InputIterator1 __first1, _InputIterator1 __last1,
5006 _InputIterator2 __first2, _InputIterator2 __last2,
5007 _OutputIterator __result)
5008 {
5009 // concept requirements
5010 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5011 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5012 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5013 typename iterator_traits<_InputIterator1>::value_type>)
5014 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5015 typename iterator_traits<_InputIterator2>::value_type>)
5016 __glibcxx_function_requires(_LessThanOpConcept<
5017 typename iterator_traits<_InputIterator1>::value_type,
5018 typename iterator_traits<_InputIterator2>::value_type>)
5019 __glibcxx_function_requires(_LessThanOpConcept<
5020 typename iterator_traits<_InputIterator2>::value_type,
5021 typename iterator_traits<_InputIterator1>::value_type>)
5022 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5023 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5024 __glibcxx_requires_irreflexive2(__first1, __last1);
5025 __glibcxx_requires_irreflexive2(__first2, __last2);
5026
5027 return _GLIBCXX_STD_Astd::__set_union(__first1, __last1,
5028 __first2, __last2, __result,
5029 __gnu_cxx::__ops::__iter_less_iter());
5030 }
5031
5032 /**
5033 * @brief Return the union of two sorted ranges using a comparison functor.
5034 * @ingroup set_algorithms
5035 * @param __first1 Start of first range.
5036 * @param __last1 End of first range.
5037 * @param __first2 Start of second range.
5038 * @param __last2 End of second range.
5039 * @param __comp The comparison functor.
5040 * @return End of the output range.
5041 * @ingroup set_algorithms
5042 *
5043 * This operation iterates over both ranges, copying elements present in
5044 * each range in order to the output range. Iterators increment for each
5045 * range. When the current element of one range is less than the other
5046 * according to @p __comp, that element is copied and the iterator advanced.
5047 * If an equivalent element according to @p __comp is contained in both
5048 * ranges, the element from the first range is copied and both ranges
5049 * advance. The output range may not overlap either input range.
5050 */
5051 template<typename _InputIterator1, typename _InputIterator2,
5052 typename _OutputIterator, typename _Compare>
5053 inline _OutputIterator
5054 set_union(_InputIterator1 __first1, _InputIterator1 __last1,
5055 _InputIterator2 __first2, _InputIterator2 __last2,
5056 _OutputIterator __result, _Compare __comp)
5057 {
5058 // concept requirements
5059 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5060 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5061 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5062 typename iterator_traits<_InputIterator1>::value_type>)
5063 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5064 typename iterator_traits<_InputIterator2>::value_type>)
5065 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5066 typename iterator_traits<_InputIterator1>::value_type,
5067 typename iterator_traits<_InputIterator2>::value_type>)
5068 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5069 typename iterator_traits<_InputIterator2>::value_type,
5070 typename iterator_traits<_InputIterator1>::value_type>)
5071 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5072 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5073 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5074 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5075
5076 return _GLIBCXX_STD_Astd::__set_union(__first1, __last1,
5077 __first2, __last2, __result,
5078 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5079 }
5080
5081 template<typename _InputIterator1, typename _InputIterator2,
5082 typename _OutputIterator,
5083 typename _Compare>
5084 _OutputIterator
5085 __set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5086 _InputIterator2 __first2, _InputIterator2 __last2,
5087 _OutputIterator __result, _Compare __comp)
5088 {
5089 while (__first1 != __last1 && __first2 != __last2)
5090 if (__comp(__first1, __first2))
5091 ++__first1;
5092 else if (__comp(__first2, __first1))
5093 ++__first2;
5094 else
5095 {
5096 *__result = *__first1;
5097 ++__first1;
5098 ++__first2;
5099 ++__result;
5100 }
5101 return __result;
5102 }
5103
5104 /**
5105 * @brief Return the intersection of two sorted ranges.
5106 * @ingroup set_algorithms
5107 * @param __first1 Start of first range.
5108 * @param __last1 End of first range.
5109 * @param __first2 Start of second range.
5110 * @param __last2 End of second range.
5111 * @return End of the output range.
5112 * @ingroup set_algorithms
5113 *
5114 * This operation iterates over both ranges, copying elements present in
5115 * both ranges in order to the output range. Iterators increment for each
5116 * range. When the current element of one range is less than the other,
5117 * that iterator advances. If an element is contained in both ranges, the
5118 * element from the first range is copied and both ranges advance. The
5119 * output range may not overlap either input range.
5120 */
5121 template<typename _InputIterator1, typename _InputIterator2,
5122 typename _OutputIterator>
5123 inline _OutputIterator
5124 set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5125 _InputIterator2 __first2, _InputIterator2 __last2,
5126 _OutputIterator __result)
5127 {
5128 // concept requirements
5129 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5130 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5131 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5132 typename iterator_traits<_InputIterator1>::value_type>)
5133 __glibcxx_function_requires(_LessThanOpConcept<
5134 typename iterator_traits<_InputIterator1>::value_type,
5135 typename iterator_traits<_InputIterator2>::value_type>)
5136 __glibcxx_function_requires(_LessThanOpConcept<
5137 typename iterator_traits<_InputIterator2>::value_type,
5138 typename iterator_traits<_InputIterator1>::value_type>)
5139 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5140 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5141 __glibcxx_requires_irreflexive2(__first1, __last1);
5142 __glibcxx_requires_irreflexive2(__first2, __last2);
5143
5144 return _GLIBCXX_STD_Astd::__set_intersection(__first1, __last1,
5145 __first2, __last2, __result,
5146 __gnu_cxx::__ops::__iter_less_iter());
5147 }
5148
5149 /**
5150 * @brief Return the intersection of two sorted ranges using comparison
5151 * functor.
5152 * @ingroup set_algorithms
5153 * @param __first1 Start of first range.
5154 * @param __last1 End of first range.
5155 * @param __first2 Start of second range.
5156 * @param __last2 End of second range.
5157 * @param __comp The comparison functor.
5158 * @return End of the output range.
5159 * @ingroup set_algorithms
5160 *
5161 * This operation iterates over both ranges, copying elements present in
5162 * both ranges in order to the output range. Iterators increment for each
5163 * range. When the current element of one range is less than the other
5164 * according to @p __comp, that iterator advances. If an element is
5165 * contained in both ranges according to @p __comp, the element from the
5166 * first range is copied and both ranges advance. The output range may not
5167 * overlap either input range.
5168 */
5169 template<typename _InputIterator1, typename _InputIterator2,
5170 typename _OutputIterator, typename _Compare>
5171 inline _OutputIterator
5172 set_intersection(_InputIterator1 __first1, _InputIterator1 __last1,
5173 _InputIterator2 __first2, _InputIterator2 __last2,
5174 _OutputIterator __result, _Compare __comp)
5175 {
5176 // concept requirements
5177 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5178 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5179 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5180 typename iterator_traits<_InputIterator1>::value_type>)
5181 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5182 typename iterator_traits<_InputIterator1>::value_type,
5183 typename iterator_traits<_InputIterator2>::value_type>)
5184 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5185 typename iterator_traits<_InputIterator2>::value_type,
5186 typename iterator_traits<_InputIterator1>::value_type>)
5187 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5188 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5189 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5190 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5191
5192 return _GLIBCXX_STD_Astd::__set_intersection(__first1, __last1,
5193 __first2, __last2, __result,
5194 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5195 }
5196
5197 template<typename _InputIterator1, typename _InputIterator2,
5198 typename _OutputIterator,
5199 typename _Compare>
5200 _OutputIterator
5201 __set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5202 _InputIterator2 __first2, _InputIterator2 __last2,
5203 _OutputIterator __result, _Compare __comp)
5204 {
5205 while (__first1 != __last1 && __first2 != __last2)
5206 if (__comp(__first1, __first2))
5207 {
5208 *__result = *__first1;
5209 ++__first1;
5210 ++__result;
5211 }
5212 else if (__comp(__first2, __first1))
5213 ++__first2;
5214 else
5215 {
5216 ++__first1;
5217 ++__first2;
5218 }
5219 return std::copy(__first1, __last1, __result);
5220 }
5221
5222 /**
5223 * @brief Return the difference of two sorted ranges.
5224 * @ingroup set_algorithms
5225 * @param __first1 Start of first range.
5226 * @param __last1 End of first range.
5227 * @param __first2 Start of second range.
5228 * @param __last2 End of second range.
5229 * @return End of the output range.
5230 * @ingroup set_algorithms
5231 *
5232 * This operation iterates over both ranges, copying elements present in
5233 * the first range but not the second in order to the output range.
5234 * Iterators increment for each range. When the current element of the
5235 * first range is less than the second, that element is copied and the
5236 * iterator advances. If the current element of the second range is less,
5237 * the iterator advances, but no element is copied. If an element is
5238 * contained in both ranges, no elements are copied and both ranges
5239 * advance. The output range may not overlap either input range.
5240 */
5241 template<typename _InputIterator1, typename _InputIterator2,
5242 typename _OutputIterator>
5243 inline _OutputIterator
5244 set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5245 _InputIterator2 __first2, _InputIterator2 __last2,
5246 _OutputIterator __result)
5247 {
5248 // concept requirements
5249 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5250 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5251 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5252 typename iterator_traits<_InputIterator1>::value_type>)
5253 __glibcxx_function_requires(_LessThanOpConcept<
5254 typename iterator_traits<_InputIterator1>::value_type,
5255 typename iterator_traits<_InputIterator2>::value_type>)
5256 __glibcxx_function_requires(_LessThanOpConcept<
5257 typename iterator_traits<_InputIterator2>::value_type,
5258 typename iterator_traits<_InputIterator1>::value_type>)
5259 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5260 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5261 __glibcxx_requires_irreflexive2(__first1, __last1);
5262 __glibcxx_requires_irreflexive2(__first2, __last2);
5263
5264 return _GLIBCXX_STD_Astd::__set_difference(__first1, __last1,
5265 __first2, __last2, __result,
5266 __gnu_cxx::__ops::__iter_less_iter());
5267 }
5268
5269 /**
5270 * @brief Return the difference of two sorted ranges using comparison
5271 * functor.
5272 * @ingroup set_algorithms
5273 * @param __first1 Start of first range.
5274 * @param __last1 End of first range.
5275 * @param __first2 Start of second range.
5276 * @param __last2 End of second range.
5277 * @param __comp The comparison functor.
5278 * @return End of the output range.
5279 * @ingroup set_algorithms
5280 *
5281 * This operation iterates over both ranges, copying elements present in
5282 * the first range but not the second in order to the output range.
5283 * Iterators increment for each range. When the current element of the
5284 * first range is less than the second according to @p __comp, that element
5285 * is copied and the iterator advances. If the current element of the
5286 * second range is less, no element is copied and the iterator advances.
5287 * If an element is contained in both ranges according to @p __comp, no
5288 * elements are copied and both ranges advance. The output range may not
5289 * overlap either input range.
5290 */
5291 template<typename _InputIterator1, typename _InputIterator2,
5292 typename _OutputIterator, typename _Compare>
5293 inline _OutputIterator
5294 set_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5295 _InputIterator2 __first2, _InputIterator2 __last2,
5296 _OutputIterator __result, _Compare __comp)
5297 {
5298 // concept requirements
5299 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5300 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5301 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5302 typename iterator_traits<_InputIterator1>::value_type>)
5303 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5304 typename iterator_traits<_InputIterator1>::value_type,
5305 typename iterator_traits<_InputIterator2>::value_type>)
5306 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5307 typename iterator_traits<_InputIterator2>::value_type,
5308 typename iterator_traits<_InputIterator1>::value_type>)
5309 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5310 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5311 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5312 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5313
5314 return _GLIBCXX_STD_Astd::__set_difference(__first1, __last1,
5315 __first2, __last2, __result,
5316 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5317 }
5318
5319 template<typename _InputIterator1, typename _InputIterator2,
5320 typename _OutputIterator,
5321 typename _Compare>
5322 _OutputIterator
5323 __set_symmetric_difference(_InputIterator1 __first1,
5324 _InputIterator1 __last1,
5325 _InputIterator2 __first2,
5326 _InputIterator2 __last2,
5327 _OutputIterator __result,
5328 _Compare __comp)
5329 {
5330 while (__first1 != __last1 && __first2 != __last2)
5331 if (__comp(__first1, __first2))
5332 {
5333 *__result = *__first1;
5334 ++__first1;
5335 ++__result;
5336 }
5337 else if (__comp(__first2, __first1))
5338 {
5339 *__result = *__first2;
5340 ++__first2;
5341 ++__result;
5342 }
5343 else
5344 {
5345 ++__first1;
5346 ++__first2;
5347 }
5348 return std::copy(__first2, __last2,
5349 std::copy(__first1, __last1, __result));
5350 }
5351
5352 /**
5353 * @brief Return the symmetric difference of two sorted ranges.
5354 * @ingroup set_algorithms
5355 * @param __first1 Start of first range.
5356 * @param __last1 End of first range.
5357 * @param __first2 Start of second range.
5358 * @param __last2 End of second range.
5359 * @return End of the output range.
5360 * @ingroup set_algorithms
5361 *
5362 * This operation iterates over both ranges, copying elements present in
5363 * one range but not the other in order to the output range. Iterators
5364 * increment for each range. When the current element of one range is less
5365 * than the other, that element is copied and the iterator advances. If an
5366 * element is contained in both ranges, no elements are copied and both
5367 * ranges advance. The output range may not overlap either input range.
5368 */
5369 template<typename _InputIterator1, typename _InputIterator2,
5370 typename _OutputIterator>
5371 inline _OutputIterator
5372 set_symmetric_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5373 _InputIterator2 __first2, _InputIterator2 __last2,
5374 _OutputIterator __result)
5375 {
5376 // concept requirements
5377 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5378 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5379 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5380 typename iterator_traits<_InputIterator1>::value_type>)
5381 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5382 typename iterator_traits<_InputIterator2>::value_type>)
5383 __glibcxx_function_requires(_LessThanOpConcept<
5384 typename iterator_traits<_InputIterator1>::value_type,
5385 typename iterator_traits<_InputIterator2>::value_type>)
5386 __glibcxx_function_requires(_LessThanOpConcept<
5387 typename iterator_traits<_InputIterator2>::value_type,
5388 typename iterator_traits<_InputIterator1>::value_type>)
5389 __glibcxx_requires_sorted_set(__first1, __last1, __first2);
5390 __glibcxx_requires_sorted_set(__first2, __last2, __first1);
5391 __glibcxx_requires_irreflexive2(__first1, __last1);
5392 __glibcxx_requires_irreflexive2(__first2, __last2);
5393
5394 return _GLIBCXX_STD_Astd::__set_symmetric_difference(__first1, __last1,
5395 __first2, __last2, __result,
5396 __gnu_cxx::__ops::__iter_less_iter());
5397 }
5398
5399 /**
5400 * @brief Return the symmetric difference of two sorted ranges using
5401 * comparison functor.
5402 * @ingroup set_algorithms
5403 * @param __first1 Start of first range.
5404 * @param __last1 End of first range.
5405 * @param __first2 Start of second range.
5406 * @param __last2 End of second range.
5407 * @param __comp The comparison functor.
5408 * @return End of the output range.
5409 * @ingroup set_algorithms
5410 *
5411 * This operation iterates over both ranges, copying elements present in
5412 * one range but not the other in order to the output range. Iterators
5413 * increment for each range. When the current element of one range is less
5414 * than the other according to @p comp, that element is copied and the
5415 * iterator advances. If an element is contained in both ranges according
5416 * to @p __comp, no elements are copied and both ranges advance. The output
5417 * range may not overlap either input range.
5418 */
5419 template<typename _InputIterator1, typename _InputIterator2,
5420 typename _OutputIterator, typename _Compare>
5421 inline _OutputIterator
5422 set_symmetric_difference(_InputIterator1 __first1, _InputIterator1 __last1,
5423 _InputIterator2 __first2, _InputIterator2 __last2,
5424 _OutputIterator __result,
5425 _Compare __comp)
5426 {
5427 // concept requirements
5428 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
5429 __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
5430 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5431 typename iterator_traits<_InputIterator1>::value_type>)
5432 __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator,
5433 typename iterator_traits<_InputIterator2>::value_type>)
5434 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5435 typename iterator_traits<_InputIterator1>::value_type,
5436 typename iterator_traits<_InputIterator2>::value_type>)
5437 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5438 typename iterator_traits<_InputIterator2>::value_type,
5439 typename iterator_traits<_InputIterator1>::value_type>)
5440 __glibcxx_requires_sorted_set_pred(__first1, __last1, __first2, __comp);
5441 __glibcxx_requires_sorted_set_pred(__first2, __last2, __first1, __comp);
5442 __glibcxx_requires_irreflexive_pred2(__first1, __last1, __comp);
5443 __glibcxx_requires_irreflexive_pred2(__first2, __last2, __comp);
5444
5445 return _GLIBCXX_STD_Astd::__set_symmetric_difference(__first1, __last1,
5446 __first2, __last2, __result,
5447 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5448 }
5449
5450 template<typename _ForwardIterator, typename _Compare>
5451 _GLIBCXX14_CONSTEXPRconstexpr
5452 _ForwardIterator
5453 __min_element(_ForwardIterator __first, _ForwardIterator __last,
5454 _Compare __comp)
5455 {
5456 if (__first == __last)
5457 return __first;
5458 _ForwardIterator __result = __first;
5459 while (++__first != __last)
5460 if (__comp(__first, __result))
5461 __result = __first;
5462 return __result;
5463 }
5464
5465 /**
5466 * @brief Return the minimum element in a range.
5467 * @ingroup sorting_algorithms
5468 * @param __first Start of range.
5469 * @param __last End of range.
5470 * @return Iterator referencing the first instance of the smallest value.
5471 */
5472 template<typename _ForwardIterator>
5473 _GLIBCXX14_CONSTEXPRconstexpr
5474 _ForwardIterator
5475 inline min_element(_ForwardIterator __first, _ForwardIterator __last)
5476 {
5477 // concept requirements
5478 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5479 __glibcxx_function_requires(_LessThanComparableConcept<
5480 typename iterator_traits<_ForwardIterator>::value_type>)
5481 __glibcxx_requires_valid_range(__first, __last);
5482 __glibcxx_requires_irreflexive(__first, __last);
5483
5484 return _GLIBCXX_STD_Astd::__min_element(__first, __last,
5485 __gnu_cxx::__ops::__iter_less_iter());
5486 }
5487
5488 /**
5489 * @brief Return the minimum element in a range using comparison functor.
5490 * @ingroup sorting_algorithms
5491 * @param __first Start of range.
5492 * @param __last End of range.
5493 * @param __comp Comparison functor.
5494 * @return Iterator referencing the first instance of the smallest value
5495 * according to __comp.
5496 */
5497 template<typename _ForwardIterator, typename _Compare>
5498 _GLIBCXX14_CONSTEXPRconstexpr
5499 inline _ForwardIterator
5500 min_element(_ForwardIterator __first, _ForwardIterator __last,
5501 _Compare __comp)
5502 {
5503 // concept requirements
5504 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5505 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5506 typename iterator_traits<_ForwardIterator>::value_type,
5507 typename iterator_traits<_ForwardIterator>::value_type>)
5508 __glibcxx_requires_valid_range(__first, __last);
5509 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
5510
5511 return _GLIBCXX_STD_Astd::__min_element(__first, __last,
5512 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5513 }
5514
5515 template<typename _ForwardIterator, typename _Compare>
5516 _GLIBCXX14_CONSTEXPRconstexpr
5517 _ForwardIterator
5518 __max_element(_ForwardIterator __first, _ForwardIterator __last,
5519 _Compare __comp)
5520 {
5521 if (__first == __last) return __first;
5522 _ForwardIterator __result = __first;
5523 while (++__first != __last)
5524 if (__comp(__result, __first))
5525 __result = __first;
5526 return __result;
5527 }
5528
5529 /**
5530 * @brief Return the maximum element in a range.
5531 * @ingroup sorting_algorithms
5532 * @param __first Start of range.
5533 * @param __last End of range.
5534 * @return Iterator referencing the first instance of the largest value.
5535 */
5536 template<typename _ForwardIterator>
5537 _GLIBCXX14_CONSTEXPRconstexpr
5538 inline _ForwardIterator
5539 max_element(_ForwardIterator __first, _ForwardIterator __last)
5540 {
5541 // concept requirements
5542 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5543 __glibcxx_function_requires(_LessThanComparableConcept<
5544 typename iterator_traits<_ForwardIterator>::value_type>)
5545 __glibcxx_requires_valid_range(__first, __last);
5546 __glibcxx_requires_irreflexive(__first, __last);
5547
5548 return _GLIBCXX_STD_Astd::__max_element(__first, __last,
5549 __gnu_cxx::__ops::__iter_less_iter());
5550 }
5551
5552 /**
5553 * @brief Return the maximum element in a range using comparison functor.
5554 * @ingroup sorting_algorithms
5555 * @param __first Start of range.
5556 * @param __last End of range.
5557 * @param __comp Comparison functor.
5558 * @return Iterator referencing the first instance of the largest value
5559 * according to __comp.
5560 */
5561 template<typename _ForwardIterator, typename _Compare>
5562 _GLIBCXX14_CONSTEXPRconstexpr
5563 inline _ForwardIterator
5564 max_element(_ForwardIterator __first, _ForwardIterator __last,
5565 _Compare __comp)
5566 {
5567 // concept requirements
5568 __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>)
5569 __glibcxx_function_requires(_BinaryPredicateConcept<_Compare,
5570 typename iterator_traits<_ForwardIterator>::value_type,
5571 typename iterator_traits<_ForwardIterator>::value_type>)
5572 __glibcxx_requires_valid_range(__first, __last);
5573 __glibcxx_requires_irreflexive_pred(__first, __last, __comp);
5574
5575 return _GLIBCXX_STD_Astd::__max_element(__first, __last,
5576 __gnu_cxx::__ops::__iter_comp_iter(__comp));
5577 }
5578
5579_GLIBCXX_END_NAMESPACE_ALGO
5580} // namespace std
5581
5582#endif /* _STL_ALGO_H */

/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/ilist_iterator.h

1//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ADT_ILIST_ITERATOR_H
10#define LLVM_ADT_ILIST_ITERATOR_H
11
12#include "llvm/ADT/ilist_node.h"
13#include <cassert>
14#include <cstddef>
15#include <iterator>
16#include <type_traits>
17
18namespace llvm {
19
20namespace ilist_detail {
21
22/// Find const-correct node types.
23template <class OptionsT, bool IsConst> struct IteratorTraits;
24template <class OptionsT> struct IteratorTraits<OptionsT, false> {
25 using value_type = typename OptionsT::value_type;
26 using pointer = typename OptionsT::pointer;
27 using reference = typename OptionsT::reference;
28 using node_pointer = ilist_node_impl<OptionsT> *;
29 using node_reference = ilist_node_impl<OptionsT> &;
30};
31template <class OptionsT> struct IteratorTraits<OptionsT, true> {
32 using value_type = const typename OptionsT::value_type;
33 using pointer = typename OptionsT::const_pointer;
34 using reference = typename OptionsT::const_reference;
35 using node_pointer = const ilist_node_impl<OptionsT> *;
36 using node_reference = const ilist_node_impl<OptionsT> &;
37};
38
39template <bool IsReverse> struct IteratorHelper;
40template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
41 using Access = ilist_detail::NodeAccess;
42
43 template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
44 template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
45};
46template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
47 using Access = ilist_detail::NodeAccess;
48
49 template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
50 template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
51};
52
53} // end namespace ilist_detail
54
55/// Iterator for intrusive lists based on ilist_node.
56template <class OptionsT, bool IsReverse, bool IsConst>
57class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
58 friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
59 friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
60 friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
61
62 using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
63 using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
64
65public:
66 using value_type = typename Traits::value_type;
67 using pointer = typename Traits::pointer;
68 using reference = typename Traits::reference;
69 using difference_type = ptrdiff_t;
70 using iterator_category = std::bidirectional_iterator_tag;
71 using const_pointer = typename OptionsT::const_pointer;
72 using const_reference = typename OptionsT::const_reference;
73
74private:
75 using node_pointer = typename Traits::node_pointer;
76 using node_reference = typename Traits::node_reference;
77
78 node_pointer NodePtr = nullptr;
79
80public:
81 /// Create from an ilist_node.
82 explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
83
84 explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
85 explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
86 ilist_iterator() = default;
87
88 // This is templated so that we can allow constructing a const iterator from
89 // a nonconst iterator...
90 template <bool RHSIsConst>
91 ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
92 std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
93 : NodePtr(RHS.NodePtr) {}
94
95 // This is templated so that we can allow assigning to a const iterator from
96 // a nonconst iterator...
97 template <bool RHSIsConst>
98 std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
99 operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
100 NodePtr = RHS.NodePtr;
101 return *this;
102 }
103
104 /// Explicit conversion between forward/reverse iterators.
105 ///
106 /// Translate between forward and reverse iterators without changing range
107 /// boundaries. The resulting iterator will dereference (and have a handle)
108 /// to the previous node, which is somewhat unexpected; but converting the
109 /// two endpoints in a range will give the same range in reverse.
110 ///
111 /// This matches std::reverse_iterator conversions.
112 explicit ilist_iterator(
113 const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
114 : ilist_iterator(++RHS.getReverse()) {}
115
116 /// Get a reverse iterator to the same node.
117 ///
118 /// Gives a reverse iterator that will dereference (and have a handle) to the
119 /// same node. Converting the endpoint iterators in a range will give a
120 /// different range; for range operations, use the explicit conversions.
121 ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
122 if (NodePtr)
123 return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
124 return ilist_iterator<OptionsT, !IsReverse, IsConst>();
125 }
126
127 /// Const-cast.
128 ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
129 if (NodePtr)
130 return ilist_iterator<OptionsT, IsReverse, false>(
131 const_cast<typename ilist_iterator<OptionsT, IsReverse,
132 false>::node_reference>(*NodePtr));
133 return ilist_iterator<OptionsT, IsReverse, false>();
134 }
135
136 // Accessors...
137 reference operator*() const {
138 assert(!NodePtr->isKnownSentinel())((!NodePtr->isKnownSentinel()) ? static_cast<void> (
0) : __assert_fail ("!NodePtr->isKnownSentinel()", "/build/llvm-toolchain-snapshot-12~++20210121111113+bee486851c1a/llvm/include/llvm/ADT/ilist_iterator.h"
, 138, __PRETTY_FUNCTION__))
;
139 return *Access::getValuePtr(NodePtr);
140 }
141 pointer operator->() const { return &operator*(); }
142
143 // Comparison operators
144 friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
145 return LHS.NodePtr
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
24.1
'LHS.NodePtr' is equal to 'RHS.NodePtr'
== RHS.NodePtr
;
25
Returning the value 1, which participates in a condition later
146 }
147 friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
148 return LHS.NodePtr != RHS.NodePtr;
149 }
150
151 // Increment and decrement operators...
152 ilist_iterator &operator--() {
153 NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
154 return *this;
155 }
156 ilist_iterator &operator++() {
157 NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
158 return *this;
159 }
160 ilist_iterator operator--(int) {
161 ilist_iterator tmp = *this;
162 --*this;
163 return tmp;
164 }
165 ilist_iterator operator++(int) {
166 ilist_iterator tmp = *this;
167 ++*this;
168 return tmp;
169 }
170
171 /// Get the underlying ilist_node.
172 node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
173
174 /// Check for end. Only valid if ilist_sentinel_tracking<true>.
175 bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
176};
177
178template <typename From> struct simplify_type;
179
180/// Allow ilist_iterators to convert into pointers to a node automatically when
181/// used by the dyn_cast, cast, isa mechanisms...
182///
183/// FIXME: remove this, since there is no implicit conversion to NodeTy.
184template <class OptionsT, bool IsConst>
185struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
186 using iterator = ilist_iterator<OptionsT, false, IsConst>;
187 using SimpleType = typename iterator::pointer;
188
189 static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
190};
191template <class OptionsT, bool IsConst>
192struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
193 : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
194
195} // end namespace llvm
196
197#endif // LLVM_ADT_ILIST_ITERATOR_H