File: | llvm/lib/Transforms/Scalar/JumpThreading.cpp |
Warning: | line 1453, column 7 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- JumpThreading.cpp - Thread control through conditional blocks ------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file implements the Jump Threading pass. | |||
10 | // | |||
11 | //===----------------------------------------------------------------------===// | |||
12 | ||||
13 | #include "llvm/Transforms/Scalar/JumpThreading.h" | |||
14 | #include "llvm/ADT/DenseMap.h" | |||
15 | #include "llvm/ADT/DenseSet.h" | |||
16 | #include "llvm/ADT/MapVector.h" | |||
17 | #include "llvm/ADT/Optional.h" | |||
18 | #include "llvm/ADT/STLExtras.h" | |||
19 | #include "llvm/ADT/SmallPtrSet.h" | |||
20 | #include "llvm/ADT/SmallVector.h" | |||
21 | #include "llvm/ADT/Statistic.h" | |||
22 | #include "llvm/Analysis/AliasAnalysis.h" | |||
23 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
24 | #include "llvm/Analysis/BranchProbabilityInfo.h" | |||
25 | #include "llvm/Analysis/CFG.h" | |||
26 | #include "llvm/Analysis/ConstantFolding.h" | |||
27 | #include "llvm/Analysis/DomTreeUpdater.h" | |||
28 | #include "llvm/Analysis/GlobalsModRef.h" | |||
29 | #include "llvm/Analysis/GuardUtils.h" | |||
30 | #include "llvm/Analysis/InstructionSimplify.h" | |||
31 | #include "llvm/Analysis/LazyValueInfo.h" | |||
32 | #include "llvm/Analysis/Loads.h" | |||
33 | #include "llvm/Analysis/LoopInfo.h" | |||
34 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
35 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
36 | #include "llvm/Analysis/ValueTracking.h" | |||
37 | #include "llvm/IR/BasicBlock.h" | |||
38 | #include "llvm/IR/CFG.h" | |||
39 | #include "llvm/IR/Constant.h" | |||
40 | #include "llvm/IR/ConstantRange.h" | |||
41 | #include "llvm/IR/Constants.h" | |||
42 | #include "llvm/IR/DataLayout.h" | |||
43 | #include "llvm/IR/Dominators.h" | |||
44 | #include "llvm/IR/Function.h" | |||
45 | #include "llvm/IR/InstrTypes.h" | |||
46 | #include "llvm/IR/Instruction.h" | |||
47 | #include "llvm/IR/Instructions.h" | |||
48 | #include "llvm/IR/IntrinsicInst.h" | |||
49 | #include "llvm/IR/Intrinsics.h" | |||
50 | #include "llvm/IR/LLVMContext.h" | |||
51 | #include "llvm/IR/MDBuilder.h" | |||
52 | #include "llvm/IR/Metadata.h" | |||
53 | #include "llvm/IR/Module.h" | |||
54 | #include "llvm/IR/PassManager.h" | |||
55 | #include "llvm/IR/PatternMatch.h" | |||
56 | #include "llvm/IR/Type.h" | |||
57 | #include "llvm/IR/Use.h" | |||
58 | #include "llvm/IR/User.h" | |||
59 | #include "llvm/IR/Value.h" | |||
60 | #include "llvm/InitializePasses.h" | |||
61 | #include "llvm/Pass.h" | |||
62 | #include "llvm/Support/BlockFrequency.h" | |||
63 | #include "llvm/Support/BranchProbability.h" | |||
64 | #include "llvm/Support/Casting.h" | |||
65 | #include "llvm/Support/CommandLine.h" | |||
66 | #include "llvm/Support/Debug.h" | |||
67 | #include "llvm/Support/raw_ostream.h" | |||
68 | #include "llvm/Transforms/Scalar.h" | |||
69 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
70 | #include "llvm/Transforms/Utils/Cloning.h" | |||
71 | #include "llvm/Transforms/Utils/Local.h" | |||
72 | #include "llvm/Transforms/Utils/SSAUpdater.h" | |||
73 | #include "llvm/Transforms/Utils/ValueMapper.h" | |||
74 | #include <algorithm> | |||
75 | #include <cassert> | |||
76 | #include <cstddef> | |||
77 | #include <cstdint> | |||
78 | #include <iterator> | |||
79 | #include <memory> | |||
80 | #include <utility> | |||
81 | ||||
82 | using namespace llvm; | |||
83 | using namespace jumpthreading; | |||
84 | ||||
85 | #define DEBUG_TYPE"jump-threading" "jump-threading" | |||
86 | ||||
87 | STATISTIC(NumThreads, "Number of jumps threaded")static llvm::Statistic NumThreads = {"jump-threading", "NumThreads" , "Number of jumps threaded"}; | |||
88 | STATISTIC(NumFolds, "Number of terminators folded")static llvm::Statistic NumFolds = {"jump-threading", "NumFolds" , "Number of terminators folded"}; | |||
89 | STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi")static llvm::Statistic NumDupes = {"jump-threading", "NumDupes" , "Number of branch blocks duplicated to eliminate phi"}; | |||
90 | ||||
91 | static cl::opt<unsigned> | |||
92 | BBDuplicateThreshold("jump-threading-threshold", | |||
93 | cl::desc("Max block size to duplicate for jump threading"), | |||
94 | cl::init(6), cl::Hidden); | |||
95 | ||||
96 | static cl::opt<unsigned> | |||
97 | ImplicationSearchThreshold( | |||
98 | "jump-threading-implication-search-threshold", | |||
99 | cl::desc("The number of predecessors to search for a stronger " | |||
100 | "condition to use to thread over a weaker condition"), | |||
101 | cl::init(3), cl::Hidden); | |||
102 | ||||
103 | static cl::opt<bool> PrintLVIAfterJumpThreading( | |||
104 | "print-lvi-after-jump-threading", | |||
105 | cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false), | |||
106 | cl::Hidden); | |||
107 | ||||
108 | static cl::opt<bool> JumpThreadingFreezeSelectCond( | |||
109 | "jump-threading-freeze-select-cond", | |||
110 | cl::desc("Freeze the condition when unfolding select"), cl::init(false), | |||
111 | cl::Hidden); | |||
112 | ||||
113 | static cl::opt<bool> ThreadAcrossLoopHeaders( | |||
114 | "jump-threading-across-loop-headers", | |||
115 | cl::desc("Allow JumpThreading to thread across loop headers, for testing"), | |||
116 | cl::init(false), cl::Hidden); | |||
117 | ||||
118 | ||||
119 | namespace { | |||
120 | ||||
121 | /// This pass performs 'jump threading', which looks at blocks that have | |||
122 | /// multiple predecessors and multiple successors. If one or more of the | |||
123 | /// predecessors of the block can be proven to always jump to one of the | |||
124 | /// successors, we forward the edge from the predecessor to the successor by | |||
125 | /// duplicating the contents of this block. | |||
126 | /// | |||
127 | /// An example of when this can occur is code like this: | |||
128 | /// | |||
129 | /// if () { ... | |||
130 | /// X = 4; | |||
131 | /// } | |||
132 | /// if (X < 3) { | |||
133 | /// | |||
134 | /// In this case, the unconditional branch at the end of the first if can be | |||
135 | /// revectored to the false side of the second if. | |||
136 | class JumpThreading : public FunctionPass { | |||
137 | JumpThreadingPass Impl; | |||
138 | ||||
139 | public: | |||
140 | static char ID; // Pass identification | |||
141 | ||||
142 | JumpThreading(bool InsertFreezeWhenUnfoldingSelect = false, int T = -1) | |||
143 | : FunctionPass(ID), Impl(InsertFreezeWhenUnfoldingSelect, T) { | |||
144 | initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); | |||
145 | } | |||
146 | ||||
147 | bool runOnFunction(Function &F) override; | |||
148 | ||||
149 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
150 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
151 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
152 | AU.addRequired<AAResultsWrapperPass>(); | |||
153 | AU.addRequired<LazyValueInfoWrapperPass>(); | |||
154 | AU.addPreserved<LazyValueInfoWrapperPass>(); | |||
155 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||
156 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
157 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
158 | } | |||
159 | ||||
160 | void releaseMemory() override { Impl.releaseMemory(); } | |||
161 | }; | |||
162 | ||||
163 | } // end anonymous namespace | |||
164 | ||||
165 | char JumpThreading::ID = 0; | |||
166 | ||||
167 | INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",static void *initializeJumpThreadingPassOnce(PassRegistry & Registry) { | |||
168 | "Jump Threading", false, false)static void *initializeJumpThreadingPassOnce(PassRegistry & Registry) { | |||
169 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||
170 | INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)initializeLazyValueInfoWrapperPassPass(Registry); | |||
171 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | |||
172 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
173 | INITIALIZE_PASS_END(JumpThreading, "jump-threading",PassInfo *PI = new PassInfo( "Jump Threading", "jump-threading" , &JumpThreading::ID, PassInfo::NormalCtor_t(callDefaultCtor <JumpThreading>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeJumpThreadingPassFlag ; void llvm::initializeJumpThreadingPass(PassRegistry &Registry ) { llvm::call_once(InitializeJumpThreadingPassFlag, initializeJumpThreadingPassOnce , std::ref(Registry)); } | |||
174 | "Jump Threading", false, false)PassInfo *PI = new PassInfo( "Jump Threading", "jump-threading" , &JumpThreading::ID, PassInfo::NormalCtor_t(callDefaultCtor <JumpThreading>), false, false); Registry.registerPass( *PI, true); return PI; } static llvm::once_flag InitializeJumpThreadingPassFlag ; void llvm::initializeJumpThreadingPass(PassRegistry &Registry ) { llvm::call_once(InitializeJumpThreadingPassFlag, initializeJumpThreadingPassOnce , std::ref(Registry)); } | |||
175 | ||||
176 | // Public interface to the Jump Threading pass | |||
177 | FunctionPass *llvm::createJumpThreadingPass(bool InsertFr, int Threshold) { | |||
178 | return new JumpThreading(InsertFr, Threshold); | |||
179 | } | |||
180 | ||||
181 | JumpThreadingPass::JumpThreadingPass(bool InsertFr, int T) { | |||
182 | InsertFreezeWhenUnfoldingSelect = JumpThreadingFreezeSelectCond | InsertFr; | |||
183 | DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); | |||
184 | } | |||
185 | ||||
186 | // Update branch probability information according to conditional | |||
187 | // branch probability. This is usually made possible for cloned branches | |||
188 | // in inline instances by the context specific profile in the caller. | |||
189 | // For instance, | |||
190 | // | |||
191 | // [Block PredBB] | |||
192 | // [Branch PredBr] | |||
193 | // if (t) { | |||
194 | // Block A; | |||
195 | // } else { | |||
196 | // Block B; | |||
197 | // } | |||
198 | // | |||
199 | // [Block BB] | |||
200 | // cond = PN([true, %A], [..., %B]); // PHI node | |||
201 | // [Branch CondBr] | |||
202 | // if (cond) { | |||
203 | // ... // P(cond == true) = 1% | |||
204 | // } | |||
205 | // | |||
206 | // Here we know that when block A is taken, cond must be true, which means | |||
207 | // P(cond == true | A) = 1 | |||
208 | // | |||
209 | // Given that P(cond == true) = P(cond == true | A) * P(A) + | |||
210 | // P(cond == true | B) * P(B) | |||
211 | // we get: | |||
212 | // P(cond == true ) = P(A) + P(cond == true | B) * P(B) | |||
213 | // | |||
214 | // which gives us: | |||
215 | // P(A) is less than P(cond == true), i.e. | |||
216 | // P(t == true) <= P(cond == true) | |||
217 | // | |||
218 | // In other words, if we know P(cond == true) is unlikely, we know | |||
219 | // that P(t == true) is also unlikely. | |||
220 | // | |||
221 | static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) { | |||
222 | BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); | |||
223 | if (!CondBr) | |||
224 | return; | |||
225 | ||||
226 | uint64_t TrueWeight, FalseWeight; | |||
227 | if (!CondBr->extractProfMetadata(TrueWeight, FalseWeight)) | |||
228 | return; | |||
229 | ||||
230 | if (TrueWeight + FalseWeight == 0) | |||
231 | // Zero branch_weights do not give a hint for getting branch probabilities. | |||
232 | // Technically it would result in division by zero denominator, which is | |||
233 | // TrueWeight + FalseWeight. | |||
234 | return; | |||
235 | ||||
236 | // Returns the outgoing edge of the dominating predecessor block | |||
237 | // that leads to the PhiNode's incoming block: | |||
238 | auto GetPredOutEdge = | |||
239 | [](BasicBlock *IncomingBB, | |||
240 | BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> { | |||
241 | auto *PredBB = IncomingBB; | |||
242 | auto *SuccBB = PhiBB; | |||
243 | SmallPtrSet<BasicBlock *, 16> Visited; | |||
244 | while (true) { | |||
245 | BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); | |||
246 | if (PredBr && PredBr->isConditional()) | |||
247 | return {PredBB, SuccBB}; | |||
248 | Visited.insert(PredBB); | |||
249 | auto *SinglePredBB = PredBB->getSinglePredecessor(); | |||
250 | if (!SinglePredBB) | |||
251 | return {nullptr, nullptr}; | |||
252 | ||||
253 | // Stop searching when SinglePredBB has been visited. It means we see | |||
254 | // an unreachable loop. | |||
255 | if (Visited.count(SinglePredBB)) | |||
256 | return {nullptr, nullptr}; | |||
257 | ||||
258 | SuccBB = PredBB; | |||
259 | PredBB = SinglePredBB; | |||
260 | } | |||
261 | }; | |||
262 | ||||
263 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
264 | Value *PhiOpnd = PN->getIncomingValue(i); | |||
265 | ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd); | |||
266 | ||||
267 | if (!CI || !CI->getType()->isIntegerTy(1)) | |||
268 | continue; | |||
269 | ||||
270 | BranchProbability BP = | |||
271 | (CI->isOne() ? BranchProbability::getBranchProbability( | |||
272 | TrueWeight, TrueWeight + FalseWeight) | |||
273 | : BranchProbability::getBranchProbability( | |||
274 | FalseWeight, TrueWeight + FalseWeight)); | |||
275 | ||||
276 | auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB); | |||
277 | if (!PredOutEdge.first) | |||
278 | return; | |||
279 | ||||
280 | BasicBlock *PredBB = PredOutEdge.first; | |||
281 | BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); | |||
282 | if (!PredBr) | |||
283 | return; | |||
284 | ||||
285 | uint64_t PredTrueWeight, PredFalseWeight; | |||
286 | // FIXME: We currently only set the profile data when it is missing. | |||
287 | // With PGO, this can be used to refine even existing profile data with | |||
288 | // context information. This needs to be done after more performance | |||
289 | // testing. | |||
290 | if (PredBr->extractProfMetadata(PredTrueWeight, PredFalseWeight)) | |||
291 | continue; | |||
292 | ||||
293 | // We can not infer anything useful when BP >= 50%, because BP is the | |||
294 | // upper bound probability value. | |||
295 | if (BP >= BranchProbability(50, 100)) | |||
296 | continue; | |||
297 | ||||
298 | SmallVector<uint32_t, 2> Weights; | |||
299 | if (PredBr->getSuccessor(0) == PredOutEdge.second) { | |||
300 | Weights.push_back(BP.getNumerator()); | |||
301 | Weights.push_back(BP.getCompl().getNumerator()); | |||
302 | } else { | |||
303 | Weights.push_back(BP.getCompl().getNumerator()); | |||
304 | Weights.push_back(BP.getNumerator()); | |||
305 | } | |||
306 | PredBr->setMetadata(LLVMContext::MD_prof, | |||
307 | MDBuilder(PredBr->getParent()->getContext()) | |||
308 | .createBranchWeights(Weights)); | |||
309 | } | |||
310 | } | |||
311 | ||||
312 | /// runOnFunction - Toplevel algorithm. | |||
313 | bool JumpThreading::runOnFunction(Function &F) { | |||
314 | if (skipFunction(F)) | |||
315 | return false; | |||
316 | auto TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
317 | // Jump Threading has no sense for the targets with divergent CF | |||
318 | if (TTI->hasBranchDivergence()) | |||
319 | return false; | |||
320 | auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | |||
321 | auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
322 | auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI(); | |||
323 | auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
324 | DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy); | |||
325 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
326 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
327 | if (F.hasProfileData()) { | |||
328 | LoopInfo LI{DominatorTree(F)}; | |||
329 | BPI.reset(new BranchProbabilityInfo(F, LI, TLI)); | |||
330 | BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); | |||
331 | } | |||
332 | ||||
333 | bool Changed = Impl.runImpl(F, TLI, LVI, AA, &DTU, F.hasProfileData(), | |||
334 | std::move(BFI), std::move(BPI)); | |||
335 | if (PrintLVIAfterJumpThreading) { | |||
336 | dbgs() << "LVI for function '" << F.getName() << "':\n"; | |||
337 | LVI->printLVI(F, DTU.getDomTree(), dbgs()); | |||
338 | } | |||
339 | return Changed; | |||
340 | } | |||
341 | ||||
342 | PreservedAnalyses JumpThreadingPass::run(Function &F, | |||
343 | FunctionAnalysisManager &AM) { | |||
344 | auto &TTI = AM.getResult<TargetIRAnalysis>(F); | |||
345 | // Jump Threading has no sense for the targets with divergent CF | |||
346 | if (TTI.hasBranchDivergence()) | |||
347 | return PreservedAnalyses::all(); | |||
348 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | |||
349 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | |||
350 | auto &LVI = AM.getResult<LazyValueAnalysis>(F); | |||
351 | auto &AA = AM.getResult<AAManager>(F); | |||
352 | DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); | |||
353 | ||||
354 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
355 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
356 | if (F.hasProfileData()) { | |||
357 | LoopInfo LI{DominatorTree(F)}; | |||
358 | BPI.reset(new BranchProbabilityInfo(F, LI, &TLI)); | |||
359 | BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); | |||
360 | } | |||
361 | ||||
362 | bool Changed = runImpl(F, &TLI, &LVI, &AA, &DTU, F.hasProfileData(), | |||
363 | std::move(BFI), std::move(BPI)); | |||
364 | ||||
365 | if (PrintLVIAfterJumpThreading) { | |||
366 | dbgs() << "LVI for function '" << F.getName() << "':\n"; | |||
367 | LVI.printLVI(F, DTU.getDomTree(), dbgs()); | |||
368 | } | |||
369 | ||||
370 | if (!Changed) | |||
371 | return PreservedAnalyses::all(); | |||
372 | PreservedAnalyses PA; | |||
373 | PA.preserve<GlobalsAA>(); | |||
374 | PA.preserve<DominatorTreeAnalysis>(); | |||
375 | PA.preserve<LazyValueAnalysis>(); | |||
376 | return PA; | |||
377 | } | |||
378 | ||||
379 | bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, | |||
380 | LazyValueInfo *LVI_, AliasAnalysis *AA_, | |||
381 | DomTreeUpdater *DTU_, bool HasProfileData_, | |||
382 | std::unique_ptr<BlockFrequencyInfo> BFI_, | |||
383 | std::unique_ptr<BranchProbabilityInfo> BPI_) { | |||
384 | LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "Jump threading on function '" << F.getName() << "'\n"; } } while (false); | |||
385 | TLI = TLI_; | |||
386 | LVI = LVI_; | |||
387 | AA = AA_; | |||
388 | DTU = DTU_; | |||
389 | BFI.reset(); | |||
390 | BPI.reset(); | |||
391 | // When profile data is available, we need to update edge weights after | |||
392 | // successful jump threading, which requires both BPI and BFI being available. | |||
393 | HasProfileData = HasProfileData_; | |||
394 | auto *GuardDecl = F.getParent()->getFunction( | |||
395 | Intrinsic::getName(Intrinsic::experimental_guard)); | |||
396 | HasGuards = GuardDecl && !GuardDecl->use_empty(); | |||
397 | if (HasProfileData) { | |||
398 | BPI = std::move(BPI_); | |||
399 | BFI = std::move(BFI_); | |||
400 | } | |||
401 | ||||
402 | // Reduce the number of instructions duplicated when optimizing strictly for | |||
403 | // size. | |||
404 | if (BBDuplicateThreshold.getNumOccurrences()) | |||
405 | BBDupThreshold = BBDuplicateThreshold; | |||
406 | else if (F.hasFnAttribute(Attribute::MinSize)) | |||
407 | BBDupThreshold = 3; | |||
408 | else | |||
409 | BBDupThreshold = DefaultBBDupThreshold; | |||
410 | ||||
411 | // JumpThreading must not processes blocks unreachable from entry. It's a | |||
412 | // waste of compute time and can potentially lead to hangs. | |||
413 | SmallPtrSet<BasicBlock *, 16> Unreachable; | |||
414 | assert(DTU && "DTU isn't passed into JumpThreading before using it.")((DTU && "DTU isn't passed into JumpThreading before using it." ) ? static_cast<void> (0) : __assert_fail ("DTU && \"DTU isn't passed into JumpThreading before using it.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 414, __PRETTY_FUNCTION__)); | |||
415 | assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed.")((DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed." ) ? static_cast<void> (0) : __assert_fail ("DTU->hasDomTree() && \"JumpThreading relies on DomTree to proceed.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 415, __PRETTY_FUNCTION__)); | |||
416 | DominatorTree &DT = DTU->getDomTree(); | |||
417 | for (auto &BB : F) | |||
418 | if (!DT.isReachableFromEntry(&BB)) | |||
419 | Unreachable.insert(&BB); | |||
420 | ||||
421 | if (!ThreadAcrossLoopHeaders) | |||
422 | findLoopHeaders(F); | |||
423 | ||||
424 | bool EverChanged = false; | |||
425 | bool Changed; | |||
426 | do { | |||
427 | Changed = false; | |||
428 | for (auto &BB : F) { | |||
429 | if (Unreachable.count(&BB)) | |||
430 | continue; | |||
431 | while (processBlock(&BB)) // Thread all of the branches we can over BB. | |||
432 | Changed = true; | |||
433 | ||||
434 | // Jump threading may have introduced redundant debug values into BB | |||
435 | // which should be removed. | |||
436 | // Remove redundant pseudo probes as well. | |||
437 | if (Changed) | |||
438 | RemoveRedundantDbgInstrs(&BB, true); | |||
439 | ||||
440 | // Stop processing BB if it's the entry or is now deleted. The following | |||
441 | // routines attempt to eliminate BB and locating a suitable replacement | |||
442 | // for the entry is non-trivial. | |||
443 | if (&BB == &F.getEntryBlock() || DTU->isBBPendingDeletion(&BB)) | |||
444 | continue; | |||
445 | ||||
446 | if (pred_empty(&BB)) { | |||
447 | // When processBlock makes BB unreachable it doesn't bother to fix up | |||
448 | // the instructions in it. We must remove BB to prevent invalid IR. | |||
449 | LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " JT: Deleting dead block '" << BB.getName() << "' with terminator: " << *BB.getTerminator() << '\n'; } } while (false) | |||
450 | << "' with terminator: " << *BB.getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " JT: Deleting dead block '" << BB.getName() << "' with terminator: " << *BB.getTerminator() << '\n'; } } while (false) | |||
451 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " JT: Deleting dead block '" << BB.getName() << "' with terminator: " << *BB.getTerminator() << '\n'; } } while (false); | |||
452 | LoopHeaders.erase(&BB); | |||
453 | LVI->eraseBlock(&BB); | |||
454 | DeleteDeadBlock(&BB, DTU); | |||
455 | Changed = true; | |||
456 | continue; | |||
457 | } | |||
458 | ||||
459 | // processBlock doesn't thread BBs with unconditional TIs. However, if BB | |||
460 | // is "almost empty", we attempt to merge BB with its sole successor. | |||
461 | auto *BI = dyn_cast<BranchInst>(BB.getTerminator()); | |||
462 | if (BI && BI->isUnconditional()) { | |||
463 | BasicBlock *Succ = BI->getSuccessor(0); | |||
464 | if ( | |||
465 | // The terminator must be the only non-phi instruction in BB. | |||
466 | BB.getFirstNonPHIOrDbg(true)->isTerminator() && | |||
467 | // Don't alter Loop headers and latches to ensure another pass can | |||
468 | // detect and transform nested loops later. | |||
469 | !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) && | |||
470 | TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU)) { | |||
471 | RemoveRedundantDbgInstrs(Succ, true); | |||
472 | // BB is valid for cleanup here because we passed in DTU. F remains | |||
473 | // BB's parent until a DTU->getDomTree() event. | |||
474 | LVI->eraseBlock(&BB); | |||
475 | Changed = true; | |||
476 | } | |||
477 | } | |||
478 | } | |||
479 | EverChanged |= Changed; | |||
480 | } while (Changed); | |||
481 | ||||
482 | LoopHeaders.clear(); | |||
483 | return EverChanged; | |||
484 | } | |||
485 | ||||
486 | // Replace uses of Cond with ToVal when safe to do so. If all uses are | |||
487 | // replaced, we can remove Cond. We cannot blindly replace all uses of Cond | |||
488 | // because we may incorrectly replace uses when guards/assumes are uses of | |||
489 | // of `Cond` and we used the guards/assume to reason about the `Cond` value | |||
490 | // at the end of block. RAUW unconditionally replaces all uses | |||
491 | // including the guards/assumes themselves and the uses before the | |||
492 | // guard/assume. | |||
493 | static void replaceFoldableUses(Instruction *Cond, Value *ToVal) { | |||
494 | assert(Cond->getType() == ToVal->getType())((Cond->getType() == ToVal->getType()) ? static_cast< void> (0) : __assert_fail ("Cond->getType() == ToVal->getType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 494, __PRETTY_FUNCTION__)); | |||
495 | auto *BB = Cond->getParent(); | |||
496 | // We can unconditionally replace all uses in non-local blocks (i.e. uses | |||
497 | // strictly dominated by BB), since LVI information is true from the | |||
498 | // terminator of BB. | |||
499 | replaceNonLocalUsesWith(Cond, ToVal); | |||
500 | for (Instruction &I : reverse(*BB)) { | |||
501 | // Reached the Cond whose uses we are trying to replace, so there are no | |||
502 | // more uses. | |||
503 | if (&I == Cond) | |||
504 | break; | |||
505 | // We only replace uses in instructions that are guaranteed to reach the end | |||
506 | // of BB, where we know Cond is ToVal. | |||
507 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | |||
508 | break; | |||
509 | I.replaceUsesOfWith(Cond, ToVal); | |||
510 | } | |||
511 | if (Cond->use_empty() && !Cond->mayHaveSideEffects()) | |||
512 | Cond->eraseFromParent(); | |||
513 | } | |||
514 | ||||
515 | /// Return the cost of duplicating a piece of this block from first non-phi | |||
516 | /// and before StopAt instruction to thread across it. Stop scanning the block | |||
517 | /// when exceeding the threshold. If duplication is impossible, returns ~0U. | |||
518 | static unsigned getJumpThreadDuplicationCost(BasicBlock *BB, | |||
519 | Instruction *StopAt, | |||
520 | unsigned Threshold) { | |||
521 | assert(StopAt->getParent() == BB && "Not an instruction from proper BB?")((StopAt->getParent() == BB && "Not an instruction from proper BB?" ) ? static_cast<void> (0) : __assert_fail ("StopAt->getParent() == BB && \"Not an instruction from proper BB?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 521, __PRETTY_FUNCTION__)); | |||
522 | /// Ignore PHI nodes, these will be flattened when duplication happens. | |||
523 | BasicBlock::const_iterator I(BB->getFirstNonPHI()); | |||
524 | ||||
525 | // FIXME: THREADING will delete values that are just used to compute the | |||
526 | // branch, so they shouldn't count against the duplication cost. | |||
527 | ||||
528 | unsigned Bonus = 0; | |||
529 | if (BB->getTerminator() == StopAt) { | |||
530 | // Threading through a switch statement is particularly profitable. If this | |||
531 | // block ends in a switch, decrease its cost to make it more likely to | |||
532 | // happen. | |||
533 | if (isa<SwitchInst>(StopAt)) | |||
534 | Bonus = 6; | |||
535 | ||||
536 | // The same holds for indirect branches, but slightly more so. | |||
537 | if (isa<IndirectBrInst>(StopAt)) | |||
538 | Bonus = 8; | |||
539 | } | |||
540 | ||||
541 | // Bump the threshold up so the early exit from the loop doesn't skip the | |||
542 | // terminator-based Size adjustment at the end. | |||
543 | Threshold += Bonus; | |||
544 | ||||
545 | // Sum up the cost of each instruction until we get to the terminator. Don't | |||
546 | // include the terminator because the copy won't include it. | |||
547 | unsigned Size = 0; | |||
548 | for (; &*I != StopAt; ++I) { | |||
549 | ||||
550 | // Stop scanning the block if we've reached the threshold. | |||
551 | if (Size > Threshold) | |||
552 | return Size; | |||
553 | ||||
554 | // Debugger intrinsics don't incur code size. | |||
555 | if (isa<DbgInfoIntrinsic>(I)) continue; | |||
556 | ||||
557 | // Pseudo-probes don't incur code size. | |||
558 | if (isa<PseudoProbeInst>(I)) | |||
559 | continue; | |||
560 | ||||
561 | // If this is a pointer->pointer bitcast, it is free. | |||
562 | if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) | |||
563 | continue; | |||
564 | ||||
565 | // Freeze instruction is free, too. | |||
566 | if (isa<FreezeInst>(I)) | |||
567 | continue; | |||
568 | ||||
569 | // Bail out if this instruction gives back a token type, it is not possible | |||
570 | // to duplicate it if it is used outside this BB. | |||
571 | if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) | |||
572 | return ~0U; | |||
573 | ||||
574 | // All other instructions count for at least one unit. | |||
575 | ++Size; | |||
576 | ||||
577 | // Calls are more expensive. If they are non-intrinsic calls, we model them | |||
578 | // as having cost of 4. If they are a non-vector intrinsic, we model them | |||
579 | // as having cost of 2 total, and if they are a vector intrinsic, we model | |||
580 | // them as having cost 1. | |||
581 | if (const CallInst *CI = dyn_cast<CallInst>(I)) { | |||
582 | if (CI->cannotDuplicate() || CI->isConvergent()) | |||
583 | // Blocks with NoDuplicate are modelled as having infinite cost, so they | |||
584 | // are never duplicated. | |||
585 | return ~0U; | |||
586 | else if (!isa<IntrinsicInst>(CI)) | |||
587 | Size += 3; | |||
588 | else if (!CI->getType()->isVectorTy()) | |||
589 | Size += 1; | |||
590 | } | |||
591 | } | |||
592 | ||||
593 | return Size > Bonus ? Size - Bonus : 0; | |||
594 | } | |||
595 | ||||
596 | /// findLoopHeaders - We do not want jump threading to turn proper loop | |||
597 | /// structures into irreducible loops. Doing this breaks up the loop nesting | |||
598 | /// hierarchy and pessimizes later transformations. To prevent this from | |||
599 | /// happening, we first have to find the loop headers. Here we approximate this | |||
600 | /// by finding targets of backedges in the CFG. | |||
601 | /// | |||
602 | /// Note that there definitely are cases when we want to allow threading of | |||
603 | /// edges across a loop header. For example, threading a jump from outside the | |||
604 | /// loop (the preheader) to an exit block of the loop is definitely profitable. | |||
605 | /// It is also almost always profitable to thread backedges from within the loop | |||
606 | /// to exit blocks, and is often profitable to thread backedges to other blocks | |||
607 | /// within the loop (forming a nested loop). This simple analysis is not rich | |||
608 | /// enough to track all of these properties and keep it up-to-date as the CFG | |||
609 | /// mutates, so we don't allow any of these transformations. | |||
610 | void JumpThreadingPass::findLoopHeaders(Function &F) { | |||
611 | SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; | |||
612 | FindFunctionBackedges(F, Edges); | |||
613 | ||||
614 | for (const auto &Edge : Edges) | |||
615 | LoopHeaders.insert(Edge.second); | |||
616 | } | |||
617 | ||||
618 | /// getKnownConstant - Helper method to determine if we can thread over a | |||
619 | /// terminator with the given value as its condition, and if so what value to | |||
620 | /// use for that. What kind of value this is depends on whether we want an | |||
621 | /// integer or a block address, but an undef is always accepted. | |||
622 | /// Returns null if Val is null or not an appropriate constant. | |||
623 | static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { | |||
624 | if (!Val) | |||
625 | return nullptr; | |||
626 | ||||
627 | // Undef is "known" enough. | |||
628 | if (UndefValue *U = dyn_cast<UndefValue>(Val)) | |||
629 | return U; | |||
630 | ||||
631 | if (Preference == WantBlockAddress) | |||
632 | return dyn_cast<BlockAddress>(Val->stripPointerCasts()); | |||
633 | ||||
634 | return dyn_cast<ConstantInt>(Val); | |||
635 | } | |||
636 | ||||
637 | /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see | |||
638 | /// if we can infer that the value is a known ConstantInt/BlockAddress or undef | |||
639 | /// in any of our predecessors. If so, return the known list of value and pred | |||
640 | /// BB in the result vector. | |||
641 | /// | |||
642 | /// This returns true if there were any known values. | |||
643 | bool JumpThreadingPass::computeValueKnownInPredecessorsImpl( | |||
644 | Value *V, BasicBlock *BB, PredValueInfo &Result, | |||
645 | ConstantPreference Preference, DenseSet<Value *> &RecursionSet, | |||
646 | Instruction *CxtI) { | |||
647 | // This method walks up use-def chains recursively. Because of this, we could | |||
648 | // get into an infinite loop going around loops in the use-def chain. To | |||
649 | // prevent this, keep track of what (value, block) pairs we've already visited | |||
650 | // and terminate the search if we loop back to them | |||
651 | if (!RecursionSet.insert(V).second) | |||
652 | return false; | |||
653 | ||||
654 | // If V is a constant, then it is known in all predecessors. | |||
655 | if (Constant *KC = getKnownConstant(V, Preference)) { | |||
656 | for (BasicBlock *Pred : predecessors(BB)) | |||
657 | Result.emplace_back(KC, Pred); | |||
658 | ||||
659 | return !Result.empty(); | |||
660 | } | |||
661 | ||||
662 | // If V is a non-instruction value, or an instruction in a different block, | |||
663 | // then it can't be derived from a PHI. | |||
664 | Instruction *I = dyn_cast<Instruction>(V); | |||
665 | if (!I || I->getParent() != BB) { | |||
666 | ||||
667 | // Okay, if this is a live-in value, see if it has a known value at the end | |||
668 | // of any of our predecessors. | |||
669 | // | |||
670 | // FIXME: This should be an edge property, not a block end property. | |||
671 | /// TODO: Per PR2563, we could infer value range information about a | |||
672 | /// predecessor based on its terminator. | |||
673 | // | |||
674 | // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if | |||
675 | // "I" is a non-local compare-with-a-constant instruction. This would be | |||
676 | // able to handle value inequalities better, for example if the compare is | |||
677 | // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. | |||
678 | // Perhaps getConstantOnEdge should be smart enough to do this? | |||
679 | for (BasicBlock *P : predecessors(BB)) { | |||
680 | // If the value is known by LazyValueInfo to be a constant in a | |||
681 | // predecessor, use that information to try to thread this block. | |||
682 | Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); | |||
683 | if (Constant *KC = getKnownConstant(PredCst, Preference)) | |||
684 | Result.emplace_back(KC, P); | |||
685 | } | |||
686 | ||||
687 | return !Result.empty(); | |||
688 | } | |||
689 | ||||
690 | /// If I is a PHI node, then we know the incoming values for any constants. | |||
691 | if (PHINode *PN = dyn_cast<PHINode>(I)) { | |||
692 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
693 | Value *InVal = PN->getIncomingValue(i); | |||
694 | if (Constant *KC = getKnownConstant(InVal, Preference)) { | |||
695 | Result.emplace_back(KC, PN->getIncomingBlock(i)); | |||
696 | } else { | |||
697 | Constant *CI = LVI->getConstantOnEdge(InVal, | |||
698 | PN->getIncomingBlock(i), | |||
699 | BB, CxtI); | |||
700 | if (Constant *KC = getKnownConstant(CI, Preference)) | |||
701 | Result.emplace_back(KC, PN->getIncomingBlock(i)); | |||
702 | } | |||
703 | } | |||
704 | ||||
705 | return !Result.empty(); | |||
706 | } | |||
707 | ||||
708 | // Handle Cast instructions. | |||
709 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | |||
710 | Value *Source = CI->getOperand(0); | |||
711 | computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, | |||
712 | RecursionSet, CxtI); | |||
713 | if (Result.empty()) | |||
714 | return false; | |||
715 | ||||
716 | // Convert the known values. | |||
717 | for (auto &R : Result) | |||
718 | R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType()); | |||
719 | ||||
720 | return true; | |||
721 | } | |||
722 | ||||
723 | if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { | |||
724 | Value *Source = FI->getOperand(0); | |||
725 | computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, | |||
726 | RecursionSet, CxtI); | |||
727 | ||||
728 | erase_if(Result, [](auto &Pair) { | |||
729 | return !isGuaranteedNotToBeUndefOrPoison(Pair.first); | |||
730 | }); | |||
731 | ||||
732 | return !Result.empty(); | |||
733 | } | |||
734 | ||||
735 | // Handle some boolean conditions. | |||
736 | if (I->getType()->getPrimitiveSizeInBits() == 1) { | |||
737 | using namespace PatternMatch; | |||
738 | ||||
739 | assert(Preference == WantInteger && "One-bit non-integer type?")((Preference == WantInteger && "One-bit non-integer type?" ) ? static_cast<void> (0) : __assert_fail ("Preference == WantInteger && \"One-bit non-integer type?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 739, __PRETTY_FUNCTION__)); | |||
740 | // X | true -> true | |||
741 | // X & false -> false | |||
742 | Value *Op0, *Op1; | |||
743 | if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) || | |||
744 | match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { | |||
745 | PredValueInfoTy LHSVals, RHSVals; | |||
746 | ||||
747 | computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger, | |||
748 | RecursionSet, CxtI); | |||
749 | computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger, | |||
750 | RecursionSet, CxtI); | |||
751 | ||||
752 | if (LHSVals.empty() && RHSVals.empty()) | |||
753 | return false; | |||
754 | ||||
755 | ConstantInt *InterestingVal; | |||
756 | if (match(I, m_LogicalOr())) | |||
757 | InterestingVal = ConstantInt::getTrue(I->getContext()); | |||
758 | else | |||
759 | InterestingVal = ConstantInt::getFalse(I->getContext()); | |||
760 | ||||
761 | SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; | |||
762 | ||||
763 | // Scan for the sentinel. If we find an undef, force it to the | |||
764 | // interesting value: x|undef -> true and x&undef -> false. | |||
765 | for (const auto &LHSVal : LHSVals) | |||
766 | if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { | |||
767 | Result.emplace_back(InterestingVal, LHSVal.second); | |||
768 | LHSKnownBBs.insert(LHSVal.second); | |||
769 | } | |||
770 | for (const auto &RHSVal : RHSVals) | |||
771 | if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { | |||
772 | // If we already inferred a value for this block on the LHS, don't | |||
773 | // re-add it. | |||
774 | if (!LHSKnownBBs.count(RHSVal.second)) | |||
775 | Result.emplace_back(InterestingVal, RHSVal.second); | |||
776 | } | |||
777 | ||||
778 | return !Result.empty(); | |||
779 | } | |||
780 | ||||
781 | // Handle the NOT form of XOR. | |||
782 | if (I->getOpcode() == Instruction::Xor && | |||
783 | isa<ConstantInt>(I->getOperand(1)) && | |||
784 | cast<ConstantInt>(I->getOperand(1))->isOne()) { | |||
785 | computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result, | |||
786 | WantInteger, RecursionSet, CxtI); | |||
787 | if (Result.empty()) | |||
788 | return false; | |||
789 | ||||
790 | // Invert the known values. | |||
791 | for (auto &R : Result) | |||
792 | R.first = ConstantExpr::getNot(R.first); | |||
793 | ||||
794 | return true; | |||
795 | } | |||
796 | ||||
797 | // Try to simplify some other binary operator values. | |||
798 | } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { | |||
799 | assert(Preference != WantBlockAddress((Preference != WantBlockAddress && "A binary operator creating a block address?" ) ? static_cast<void> (0) : __assert_fail ("Preference != WantBlockAddress && \"A binary operator creating a block address?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 800, __PRETTY_FUNCTION__)) | |||
800 | && "A binary operator creating a block address?")((Preference != WantBlockAddress && "A binary operator creating a block address?" ) ? static_cast<void> (0) : __assert_fail ("Preference != WantBlockAddress && \"A binary operator creating a block address?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 800, __PRETTY_FUNCTION__)); | |||
801 | if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { | |||
802 | PredValueInfoTy LHSVals; | |||
803 | computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals, | |||
804 | WantInteger, RecursionSet, CxtI); | |||
805 | ||||
806 | // Try to use constant folding to simplify the binary operator. | |||
807 | for (const auto &LHSVal : LHSVals) { | |||
808 | Constant *V = LHSVal.first; | |||
809 | Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); | |||
810 | ||||
811 | if (Constant *KC = getKnownConstant(Folded, WantInteger)) | |||
812 | Result.emplace_back(KC, LHSVal.second); | |||
813 | } | |||
814 | } | |||
815 | ||||
816 | return !Result.empty(); | |||
817 | } | |||
818 | ||||
819 | // Handle compare with phi operand, where the PHI is defined in this block. | |||
820 | if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { | |||
821 | assert(Preference == WantInteger && "Compares only produce integers")((Preference == WantInteger && "Compares only produce integers" ) ? static_cast<void> (0) : __assert_fail ("Preference == WantInteger && \"Compares only produce integers\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 821, __PRETTY_FUNCTION__)); | |||
822 | Type *CmpType = Cmp->getType(); | |||
823 | Value *CmpLHS = Cmp->getOperand(0); | |||
824 | Value *CmpRHS = Cmp->getOperand(1); | |||
825 | CmpInst::Predicate Pred = Cmp->getPredicate(); | |||
826 | ||||
827 | PHINode *PN = dyn_cast<PHINode>(CmpLHS); | |||
828 | if (!PN) | |||
829 | PN = dyn_cast<PHINode>(CmpRHS); | |||
830 | if (PN && PN->getParent() == BB) { | |||
831 | const DataLayout &DL = PN->getModule()->getDataLayout(); | |||
832 | // We can do this simplification if any comparisons fold to true or false. | |||
833 | // See if any do. | |||
834 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
835 | BasicBlock *PredBB = PN->getIncomingBlock(i); | |||
836 | Value *LHS, *RHS; | |||
837 | if (PN == CmpLHS) { | |||
838 | LHS = PN->getIncomingValue(i); | |||
839 | RHS = CmpRHS->DoPHITranslation(BB, PredBB); | |||
840 | } else { | |||
841 | LHS = CmpLHS->DoPHITranslation(BB, PredBB); | |||
842 | RHS = PN->getIncomingValue(i); | |||
843 | } | |||
844 | Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL}); | |||
845 | if (!Res) { | |||
846 | if (!isa<Constant>(RHS)) | |||
847 | continue; | |||
848 | ||||
849 | // getPredicateOnEdge call will make no sense if LHS is defined in BB. | |||
850 | auto LHSInst = dyn_cast<Instruction>(LHS); | |||
851 | if (LHSInst && LHSInst->getParent() == BB) | |||
852 | continue; | |||
853 | ||||
854 | LazyValueInfo::Tristate | |||
855 | ResT = LVI->getPredicateOnEdge(Pred, LHS, | |||
856 | cast<Constant>(RHS), PredBB, BB, | |||
857 | CxtI ? CxtI : Cmp); | |||
858 | if (ResT == LazyValueInfo::Unknown) | |||
859 | continue; | |||
860 | Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); | |||
861 | } | |||
862 | ||||
863 | if (Constant *KC = getKnownConstant(Res, WantInteger)) | |||
864 | Result.emplace_back(KC, PredBB); | |||
865 | } | |||
866 | ||||
867 | return !Result.empty(); | |||
868 | } | |||
869 | ||||
870 | // If comparing a live-in value against a constant, see if we know the | |||
871 | // live-in value on any predecessors. | |||
872 | if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { | |||
873 | Constant *CmpConst = cast<Constant>(CmpRHS); | |||
874 | ||||
875 | if (!isa<Instruction>(CmpLHS) || | |||
876 | cast<Instruction>(CmpLHS)->getParent() != BB) { | |||
877 | for (BasicBlock *P : predecessors(BB)) { | |||
878 | // If the value is known by LazyValueInfo to be a constant in a | |||
879 | // predecessor, use that information to try to thread this block. | |||
880 | LazyValueInfo::Tristate Res = | |||
881 | LVI->getPredicateOnEdge(Pred, CmpLHS, | |||
882 | CmpConst, P, BB, CxtI ? CxtI : Cmp); | |||
883 | if (Res == LazyValueInfo::Unknown) | |||
884 | continue; | |||
885 | ||||
886 | Constant *ResC = ConstantInt::get(CmpType, Res); | |||
887 | Result.emplace_back(ResC, P); | |||
888 | } | |||
889 | ||||
890 | return !Result.empty(); | |||
891 | } | |||
892 | ||||
893 | // InstCombine can fold some forms of constant range checks into | |||
894 | // (icmp (add (x, C1)), C2). See if we have we have such a thing with | |||
895 | // x as a live-in. | |||
896 | { | |||
897 | using namespace PatternMatch; | |||
898 | ||||
899 | Value *AddLHS; | |||
900 | ConstantInt *AddConst; | |||
901 | if (isa<ConstantInt>(CmpConst) && | |||
902 | match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { | |||
903 | if (!isa<Instruction>(AddLHS) || | |||
904 | cast<Instruction>(AddLHS)->getParent() != BB) { | |||
905 | for (BasicBlock *P : predecessors(BB)) { | |||
906 | // If the value is known by LazyValueInfo to be a ConstantRange in | |||
907 | // a predecessor, use that information to try to thread this | |||
908 | // block. | |||
909 | ConstantRange CR = LVI->getConstantRangeOnEdge( | |||
910 | AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); | |||
911 | // Propagate the range through the addition. | |||
912 | CR = CR.add(AddConst->getValue()); | |||
913 | ||||
914 | // Get the range where the compare returns true. | |||
915 | ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( | |||
916 | Pred, cast<ConstantInt>(CmpConst)->getValue()); | |||
917 | ||||
918 | Constant *ResC; | |||
919 | if (CmpRange.contains(CR)) | |||
920 | ResC = ConstantInt::getTrue(CmpType); | |||
921 | else if (CmpRange.inverse().contains(CR)) | |||
922 | ResC = ConstantInt::getFalse(CmpType); | |||
923 | else | |||
924 | continue; | |||
925 | ||||
926 | Result.emplace_back(ResC, P); | |||
927 | } | |||
928 | ||||
929 | return !Result.empty(); | |||
930 | } | |||
931 | } | |||
932 | } | |||
933 | ||||
934 | // Try to find a constant value for the LHS of a comparison, | |||
935 | // and evaluate it statically if we can. | |||
936 | PredValueInfoTy LHSVals; | |||
937 | computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, | |||
938 | WantInteger, RecursionSet, CxtI); | |||
939 | ||||
940 | for (const auto &LHSVal : LHSVals) { | |||
941 | Constant *V = LHSVal.first; | |||
942 | Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst); | |||
943 | if (Constant *KC = getKnownConstant(Folded, WantInteger)) | |||
944 | Result.emplace_back(KC, LHSVal.second); | |||
945 | } | |||
946 | ||||
947 | return !Result.empty(); | |||
948 | } | |||
949 | } | |||
950 | ||||
951 | if (SelectInst *SI = dyn_cast<SelectInst>(I)) { | |||
952 | // Handle select instructions where at least one operand is a known constant | |||
953 | // and we can figure out the condition value for any predecessor block. | |||
954 | Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); | |||
955 | Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); | |||
956 | PredValueInfoTy Conds; | |||
957 | if ((TrueVal || FalseVal) && | |||
958 | computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds, | |||
959 | WantInteger, RecursionSet, CxtI)) { | |||
960 | for (auto &C : Conds) { | |||
961 | Constant *Cond = C.first; | |||
962 | ||||
963 | // Figure out what value to use for the condition. | |||
964 | bool KnownCond; | |||
965 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { | |||
966 | // A known boolean. | |||
967 | KnownCond = CI->isOne(); | |||
968 | } else { | |||
969 | assert(isa<UndefValue>(Cond) && "Unexpected condition value")((isa<UndefValue>(Cond) && "Unexpected condition value" ) ? static_cast<void> (0) : __assert_fail ("isa<UndefValue>(Cond) && \"Unexpected condition value\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 969, __PRETTY_FUNCTION__)); | |||
970 | // Either operand will do, so be sure to pick the one that's a known | |||
971 | // constant. | |||
972 | // FIXME: Do this more cleverly if both values are known constants? | |||
973 | KnownCond = (TrueVal != nullptr); | |||
974 | } | |||
975 | ||||
976 | // See if the select has a known constant value for this predecessor. | |||
977 | if (Constant *Val = KnownCond ? TrueVal : FalseVal) | |||
978 | Result.emplace_back(Val, C.second); | |||
979 | } | |||
980 | ||||
981 | return !Result.empty(); | |||
982 | } | |||
983 | } | |||
984 | ||||
985 | // If all else fails, see if LVI can figure out a constant value for us. | |||
986 | assert(CxtI->getParent() == BB && "CxtI should be in BB")((CxtI->getParent() == BB && "CxtI should be in BB" ) ? static_cast<void> (0) : __assert_fail ("CxtI->getParent() == BB && \"CxtI should be in BB\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 986, __PRETTY_FUNCTION__)); | |||
987 | Constant *CI = LVI->getConstant(V, CxtI); | |||
988 | if (Constant *KC = getKnownConstant(CI, Preference)) { | |||
989 | for (BasicBlock *Pred : predecessors(BB)) | |||
990 | Result.emplace_back(KC, Pred); | |||
991 | } | |||
992 | ||||
993 | return !Result.empty(); | |||
994 | } | |||
995 | ||||
996 | /// GetBestDestForBranchOnUndef - If we determine that the specified block ends | |||
997 | /// in an undefined jump, decide which block is best to revector to. | |||
998 | /// | |||
999 | /// Since we can pick an arbitrary destination, we pick the successor with the | |||
1000 | /// fewest predecessors. This should reduce the in-degree of the others. | |||
1001 | static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) { | |||
1002 | Instruction *BBTerm = BB->getTerminator(); | |||
1003 | unsigned MinSucc = 0; | |||
1004 | BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); | |||
1005 | // Compute the successor with the minimum number of predecessors. | |||
1006 | unsigned MinNumPreds = pred_size(TestBB); | |||
1007 | for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { | |||
1008 | TestBB = BBTerm->getSuccessor(i); | |||
1009 | unsigned NumPreds = pred_size(TestBB); | |||
1010 | if (NumPreds < MinNumPreds) { | |||
1011 | MinSucc = i; | |||
1012 | MinNumPreds = NumPreds; | |||
1013 | } | |||
1014 | } | |||
1015 | ||||
1016 | return MinSucc; | |||
1017 | } | |||
1018 | ||||
1019 | static bool hasAddressTakenAndUsed(BasicBlock *BB) { | |||
1020 | if (!BB->hasAddressTaken()) return false; | |||
1021 | ||||
1022 | // If the block has its address taken, it may be a tree of dead constants | |||
1023 | // hanging off of it. These shouldn't keep the block alive. | |||
1024 | BlockAddress *BA = BlockAddress::get(BB); | |||
1025 | BA->removeDeadConstantUsers(); | |||
1026 | return !BA->use_empty(); | |||
1027 | } | |||
1028 | ||||
1029 | /// processBlock - If there are any predecessors whose control can be threaded | |||
1030 | /// through to a successor, transform them now. | |||
1031 | bool JumpThreadingPass::processBlock(BasicBlock *BB) { | |||
1032 | // If the block is trivially dead, just return and let the caller nuke it. | |||
1033 | // This simplifies other transformations. | |||
1034 | if (DTU->isBBPendingDeletion(BB) || | |||
1035 | (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock())) | |||
1036 | return false; | |||
1037 | ||||
1038 | // If this block has a single predecessor, and if that pred has a single | |||
1039 | // successor, merge the blocks. This encourages recursive jump threading | |||
1040 | // because now the condition in this block can be threaded through | |||
1041 | // predecessors of our predecessor block. | |||
1042 | if (maybeMergeBasicBlockIntoOnlyPred(BB)) | |||
1043 | return true; | |||
1044 | ||||
1045 | if (tryToUnfoldSelectInCurrBB(BB)) | |||
1046 | return true; | |||
1047 | ||||
1048 | // Look if we can propagate guards to predecessors. | |||
1049 | if (HasGuards && processGuards(BB)) | |||
1050 | return true; | |||
1051 | ||||
1052 | // What kind of constant we're looking for. | |||
1053 | ConstantPreference Preference = WantInteger; | |||
1054 | ||||
1055 | // Look to see if the terminator is a conditional branch, switch or indirect | |||
1056 | // branch, if not we can't thread it. | |||
1057 | Value *Condition; | |||
1058 | Instruction *Terminator = BB->getTerminator(); | |||
1059 | if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { | |||
1060 | // Can't thread an unconditional jump. | |||
1061 | if (BI->isUnconditional()) return false; | |||
1062 | Condition = BI->getCondition(); | |||
1063 | } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { | |||
1064 | Condition = SI->getCondition(); | |||
1065 | } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { | |||
1066 | // Can't thread indirect branch with no successors. | |||
1067 | if (IB->getNumSuccessors() == 0) return false; | |||
1068 | Condition = IB->getAddress()->stripPointerCasts(); | |||
1069 | Preference = WantBlockAddress; | |||
1070 | } else { | |||
1071 | return false; // Must be an invoke or callbr. | |||
1072 | } | |||
1073 | ||||
1074 | // Keep track if we constant folded the condition in this invocation. | |||
1075 | bool ConstantFolded = false; | |||
1076 | ||||
1077 | // Run constant folding to see if we can reduce the condition to a simple | |||
1078 | // constant. | |||
1079 | if (Instruction *I = dyn_cast<Instruction>(Condition)) { | |||
1080 | Value *SimpleVal = | |||
1081 | ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); | |||
1082 | if (SimpleVal) { | |||
1083 | I->replaceAllUsesWith(SimpleVal); | |||
1084 | if (isInstructionTriviallyDead(I, TLI)) | |||
1085 | I->eraseFromParent(); | |||
1086 | Condition = SimpleVal; | |||
1087 | ConstantFolded = true; | |||
1088 | } | |||
1089 | } | |||
1090 | ||||
1091 | // If the terminator is branching on an undef or freeze undef, we can pick any | |||
1092 | // of the successors to branch to. Let getBestDestForJumpOnUndef decide. | |||
1093 | auto *FI = dyn_cast<FreezeInst>(Condition); | |||
1094 | if (isa<UndefValue>(Condition) || | |||
1095 | (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) { | |||
1096 | unsigned BestSucc = getBestDestForJumpOnUndef(BB); | |||
1097 | std::vector<DominatorTree::UpdateType> Updates; | |||
1098 | ||||
1099 | // Fold the branch/switch. | |||
1100 | Instruction *BBTerm = BB->getTerminator(); | |||
1101 | Updates.reserve(BBTerm->getNumSuccessors()); | |||
1102 | for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { | |||
1103 | if (i == BestSucc) continue; | |||
1104 | BasicBlock *Succ = BBTerm->getSuccessor(i); | |||
1105 | Succ->removePredecessor(BB, true); | |||
1106 | Updates.push_back({DominatorTree::Delete, BB, Succ}); | |||
1107 | } | |||
1108 | ||||
1109 | LLVM_DEBUG(dbgs() << " In block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " In block '" << BB->getName() << "' folding undef terminator: " << *BBTerm << '\n'; } } while (false) | |||
1110 | << "' folding undef terminator: " << *BBTerm << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " In block '" << BB->getName() << "' folding undef terminator: " << *BBTerm << '\n'; } } while (false); | |||
1111 | BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); | |||
1112 | BBTerm->eraseFromParent(); | |||
1113 | DTU->applyUpdatesPermissive(Updates); | |||
1114 | if (FI) | |||
1115 | FI->eraseFromParent(); | |||
1116 | return true; | |||
1117 | } | |||
1118 | ||||
1119 | // If the terminator of this block is branching on a constant, simplify the | |||
1120 | // terminator to an unconditional branch. This can occur due to threading in | |||
1121 | // other blocks. | |||
1122 | if (getKnownConstant(Condition, Preference)) { | |||
1123 | LLVM_DEBUG(dbgs() << " In block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " In block '" << BB->getName() << "' folding terminator: " << * BB->getTerminator() << '\n'; } } while (false) | |||
1124 | << "' folding terminator: " << *BB->getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " In block '" << BB->getName() << "' folding terminator: " << * BB->getTerminator() << '\n'; } } while (false) | |||
1125 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " In block '" << BB->getName() << "' folding terminator: " << * BB->getTerminator() << '\n'; } } while (false); | |||
1126 | ++NumFolds; | |||
1127 | ConstantFoldTerminator(BB, true, nullptr, DTU); | |||
1128 | if (HasProfileData) | |||
1129 | BPI->eraseBlock(BB); | |||
1130 | return true; | |||
1131 | } | |||
1132 | ||||
1133 | Instruction *CondInst = dyn_cast<Instruction>(Condition); | |||
1134 | ||||
1135 | // All the rest of our checks depend on the condition being an instruction. | |||
1136 | if (!CondInst) { | |||
1137 | // FIXME: Unify this with code below. | |||
1138 | if (processThreadableEdges(Condition, BB, Preference, Terminator)) | |||
1139 | return true; | |||
1140 | return ConstantFolded; | |||
1141 | } | |||
1142 | ||||
1143 | if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { | |||
1144 | // If we're branching on a conditional, LVI might be able to determine | |||
1145 | // it's value at the branch instruction. We only handle comparisons | |||
1146 | // against a constant at this time. | |||
1147 | // TODO: This should be extended to handle switches as well. | |||
1148 | BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); | |||
1149 | Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1)); | |||
1150 | if (CondBr && CondConst) { | |||
1151 | // We should have returned as soon as we turn a conditional branch to | |||
1152 | // unconditional. Because its no longer interesting as far as jump | |||
1153 | // threading is concerned. | |||
1154 | assert(CondBr->isConditional() && "Threading on unconditional terminator")((CondBr->isConditional() && "Threading on unconditional terminator" ) ? static_cast<void> (0) : __assert_fail ("CondBr->isConditional() && \"Threading on unconditional terminator\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1154, __PRETTY_FUNCTION__)); | |||
1155 | ||||
1156 | LazyValueInfo::Tristate Ret = | |||
1157 | LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), | |||
1158 | CondConst, CondBr); | |||
1159 | if (Ret != LazyValueInfo::Unknown) { | |||
1160 | unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0; | |||
1161 | unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1; | |||
1162 | BasicBlock *ToRemoveSucc = CondBr->getSuccessor(ToRemove); | |||
1163 | ToRemoveSucc->removePredecessor(BB, true); | |||
1164 | BranchInst *UncondBr = | |||
1165 | BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr); | |||
1166 | UncondBr->setDebugLoc(CondBr->getDebugLoc()); | |||
1167 | CondBr->eraseFromParent(); | |||
1168 | if (CondCmp->use_empty()) | |||
1169 | CondCmp->eraseFromParent(); | |||
1170 | // We can safely replace *some* uses of the CondInst if it has | |||
1171 | // exactly one value as returned by LVI. RAUW is incorrect in the | |||
1172 | // presence of guards and assumes, that have the `Cond` as the use. This | |||
1173 | // is because we use the guards/assume to reason about the `Cond` value | |||
1174 | // at the end of block, but RAUW unconditionally replaces all uses | |||
1175 | // including the guards/assumes themselves and the uses before the | |||
1176 | // guard/assume. | |||
1177 | else if (CondCmp->getParent() == BB) { | |||
1178 | auto *CI = Ret == LazyValueInfo::True ? | |||
1179 | ConstantInt::getTrue(CondCmp->getType()) : | |||
1180 | ConstantInt::getFalse(CondCmp->getType()); | |||
1181 | replaceFoldableUses(CondCmp, CI); | |||
1182 | } | |||
1183 | DTU->applyUpdatesPermissive( | |||
1184 | {{DominatorTree::Delete, BB, ToRemoveSucc}}); | |||
1185 | if (HasProfileData) | |||
1186 | BPI->eraseBlock(BB); | |||
1187 | return true; | |||
1188 | } | |||
1189 | ||||
1190 | // We did not manage to simplify this branch, try to see whether | |||
1191 | // CondCmp depends on a known phi-select pattern. | |||
1192 | if (tryToUnfoldSelect(CondCmp, BB)) | |||
1193 | return true; | |||
1194 | } | |||
1195 | } | |||
1196 | ||||
1197 | if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) | |||
1198 | if (tryToUnfoldSelect(SI, BB)) | |||
1199 | return true; | |||
1200 | ||||
1201 | // Check for some cases that are worth simplifying. Right now we want to look | |||
1202 | // for loads that are used by a switch or by the condition for the branch. If | |||
1203 | // we see one, check to see if it's partially redundant. If so, insert a PHI | |||
1204 | // which can then be used to thread the values. | |||
1205 | Value *SimplifyValue = CondInst; | |||
1206 | ||||
1207 | if (auto *FI = dyn_cast<FreezeInst>(SimplifyValue)) | |||
1208 | // Look into freeze's operand | |||
1209 | SimplifyValue = FI->getOperand(0); | |||
1210 | ||||
1211 | if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) | |||
1212 | if (isa<Constant>(CondCmp->getOperand(1))) | |||
1213 | SimplifyValue = CondCmp->getOperand(0); | |||
1214 | ||||
1215 | // TODO: There are other places where load PRE would be profitable, such as | |||
1216 | // more complex comparisons. | |||
1217 | if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) | |||
1218 | if (simplifyPartiallyRedundantLoad(LoadI)) | |||
1219 | return true; | |||
1220 | ||||
1221 | // Before threading, try to propagate profile data backwards: | |||
1222 | if (PHINode *PN = dyn_cast<PHINode>(CondInst)) | |||
1223 | if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) | |||
1224 | updatePredecessorProfileMetadata(PN, BB); | |||
1225 | ||||
1226 | // Handle a variety of cases where we are branching on something derived from | |||
1227 | // a PHI node in the current block. If we can prove that any predecessors | |||
1228 | // compute a predictable value based on a PHI node, thread those predecessors. | |||
1229 | if (processThreadableEdges(CondInst, BB, Preference, Terminator)) | |||
1230 | return true; | |||
1231 | ||||
1232 | // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in | |||
1233 | // the current block, see if we can simplify. | |||
1234 | PHINode *PN = dyn_cast<PHINode>( | |||
1235 | isa<FreezeInst>(CondInst) ? cast<FreezeInst>(CondInst)->getOperand(0) | |||
1236 | : CondInst); | |||
1237 | ||||
1238 | if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) | |||
1239 | return processBranchOnPHI(PN); | |||
1240 | ||||
1241 | // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. | |||
1242 | if (CondInst->getOpcode() == Instruction::Xor && | |||
1243 | CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) | |||
1244 | return processBranchOnXOR(cast<BinaryOperator>(CondInst)); | |||
1245 | ||||
1246 | // Search for a stronger dominating condition that can be used to simplify a | |||
1247 | // conditional branch leaving BB. | |||
1248 | if (processImpliedCondition(BB)) | |||
1249 | return true; | |||
1250 | ||||
1251 | return false; | |||
1252 | } | |||
1253 | ||||
1254 | bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) { | |||
1255 | auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
1256 | if (!BI || !BI->isConditional()) | |||
1257 | return false; | |||
1258 | ||||
1259 | Value *Cond = BI->getCondition(); | |||
1260 | BasicBlock *CurrentBB = BB; | |||
1261 | BasicBlock *CurrentPred = BB->getSinglePredecessor(); | |||
1262 | unsigned Iter = 0; | |||
1263 | ||||
1264 | auto &DL = BB->getModule()->getDataLayout(); | |||
1265 | ||||
1266 | while (CurrentPred && Iter++ < ImplicationSearchThreshold) { | |||
1267 | auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); | |||
1268 | if (!PBI || !PBI->isConditional()) | |||
1269 | return false; | |||
1270 | if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) | |||
1271 | return false; | |||
1272 | ||||
1273 | bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB; | |||
1274 | Optional<bool> Implication = | |||
1275 | isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue); | |||
1276 | if (Implication) { | |||
1277 | BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1); | |||
1278 | BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0); | |||
1279 | RemoveSucc->removePredecessor(BB); | |||
1280 | BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI); | |||
1281 | UncondBI->setDebugLoc(BI->getDebugLoc()); | |||
1282 | BI->eraseFromParent(); | |||
1283 | DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}}); | |||
1284 | if (HasProfileData) | |||
1285 | BPI->eraseBlock(BB); | |||
1286 | return true; | |||
1287 | } | |||
1288 | CurrentBB = CurrentPred; | |||
1289 | CurrentPred = CurrentBB->getSinglePredecessor(); | |||
1290 | } | |||
1291 | ||||
1292 | return false; | |||
1293 | } | |||
1294 | ||||
1295 | /// Return true if Op is an instruction defined in the given block. | |||
1296 | static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { | |||
1297 | if (Instruction *OpInst = dyn_cast<Instruction>(Op)) | |||
1298 | if (OpInst->getParent() == BB) | |||
1299 | return true; | |||
1300 | return false; | |||
1301 | } | |||
1302 | ||||
1303 | /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially | |||
1304 | /// redundant load instruction, eliminate it by replacing it with a PHI node. | |||
1305 | /// This is an important optimization that encourages jump threading, and needs | |||
1306 | /// to be run interlaced with other jump threading tasks. | |||
1307 | bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) { | |||
1308 | // Don't hack volatile and ordered loads. | |||
1309 | if (!LoadI->isUnordered()) return false; | |||
| ||||
1310 | ||||
1311 | // If the load is defined in a block with exactly one predecessor, it can't be | |||
1312 | // partially redundant. | |||
1313 | BasicBlock *LoadBB = LoadI->getParent(); | |||
1314 | if (LoadBB->getSinglePredecessor()) | |||
1315 | return false; | |||
1316 | ||||
1317 | // If the load is defined in an EH pad, it can't be partially redundant, | |||
1318 | // because the edges between the invoke and the EH pad cannot have other | |||
1319 | // instructions between them. | |||
1320 | if (LoadBB->isEHPad()) | |||
1321 | return false; | |||
1322 | ||||
1323 | Value *LoadedPtr = LoadI->getOperand(0); | |||
1324 | ||||
1325 | // If the loaded operand is defined in the LoadBB and its not a phi, | |||
1326 | // it can't be available in predecessors. | |||
1327 | if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) | |||
1328 | return false; | |||
1329 | ||||
1330 | // Scan a few instructions up from the load, to see if it is obviously live at | |||
1331 | // the entry to its block. | |||
1332 | BasicBlock::iterator BBIt(LoadI); | |||
1333 | bool IsLoadCSE; | |||
1334 | if (Value *AvailableVal = FindAvailableLoadedValue( | |||
1335 | LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) { | |||
1336 | // If the value of the load is locally available within the block, just use | |||
1337 | // it. This frequently occurs for reg2mem'd allocas. | |||
1338 | ||||
1339 | if (IsLoadCSE) { | |||
1340 | LoadInst *NLoadI = cast<LoadInst>(AvailableVal); | |||
1341 | combineMetadataForCSE(NLoadI, LoadI, false); | |||
1342 | }; | |||
1343 | ||||
1344 | // If the returned value is the load itself, replace with an undef. This can | |||
1345 | // only happen in dead loops. | |||
1346 | if (AvailableVal == LoadI) | |||
1347 | AvailableVal = UndefValue::get(LoadI->getType()); | |||
1348 | if (AvailableVal->getType() != LoadI->getType()) | |||
1349 | AvailableVal = CastInst::CreateBitOrPointerCast( | |||
1350 | AvailableVal, LoadI->getType(), "", LoadI); | |||
1351 | LoadI->replaceAllUsesWith(AvailableVal); | |||
1352 | LoadI->eraseFromParent(); | |||
1353 | return true; | |||
1354 | } | |||
1355 | ||||
1356 | // Otherwise, if we scanned the whole block and got to the top of the block, | |||
1357 | // we know the block is locally transparent to the load. If not, something | |||
1358 | // might clobber its value. | |||
1359 | if (BBIt != LoadBB->begin()) | |||
1360 | return false; | |||
1361 | ||||
1362 | // If all of the loads and stores that feed the value have the same AA tags, | |||
1363 | // then we can propagate them onto any newly inserted loads. | |||
1364 | AAMDNodes AATags; | |||
1365 | LoadI->getAAMetadata(AATags); | |||
1366 | ||||
1367 | SmallPtrSet<BasicBlock*, 8> PredsScanned; | |||
1368 | ||||
1369 | using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>; | |||
1370 | ||||
1371 | AvailablePredsTy AvailablePreds; | |||
1372 | BasicBlock *OneUnavailablePred = nullptr; | |||
1373 | SmallVector<LoadInst*, 8> CSELoads; | |||
1374 | ||||
1375 | // If we got here, the loaded value is transparent through to the start of the | |||
1376 | // block. Check to see if it is available in any of the predecessor blocks. | |||
1377 | for (BasicBlock *PredBB : predecessors(LoadBB)) { | |||
1378 | // If we already scanned this predecessor, skip it. | |||
1379 | if (!PredsScanned.insert(PredBB).second) | |||
1380 | continue; | |||
1381 | ||||
1382 | BBIt = PredBB->end(); | |||
1383 | unsigned NumScanedInst = 0; | |||
1384 | Value *PredAvailable = nullptr; | |||
1385 | // NOTE: We don't CSE load that is volatile or anything stronger than | |||
1386 | // unordered, that should have been checked when we entered the function. | |||
1387 | assert(LoadI->isUnordered() &&((LoadI->isUnordered() && "Attempting to CSE volatile or atomic loads" ) ? static_cast<void> (0) : __assert_fail ("LoadI->isUnordered() && \"Attempting to CSE volatile or atomic loads\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1388, __PRETTY_FUNCTION__)) | |||
1388 | "Attempting to CSE volatile or atomic loads")((LoadI->isUnordered() && "Attempting to CSE volatile or atomic loads" ) ? static_cast<void> (0) : __assert_fail ("LoadI->isUnordered() && \"Attempting to CSE volatile or atomic loads\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1388, __PRETTY_FUNCTION__)); | |||
1389 | // If this is a load on a phi pointer, phi-translate it and search | |||
1390 | // for available load/store to the pointer in predecessors. | |||
1391 | Value *Ptr = LoadedPtr->DoPHITranslation(LoadBB, PredBB); | |||
1392 | PredAvailable = FindAvailablePtrLoadStore( | |||
1393 | Ptr, LoadI->getType(), LoadI->isAtomic(), PredBB, BBIt, | |||
1394 | DefMaxInstsToScan, AA, &IsLoadCSE, &NumScanedInst); | |||
1395 | ||||
1396 | // If PredBB has a single predecessor, continue scanning through the | |||
1397 | // single predecessor. | |||
1398 | BasicBlock *SinglePredBB = PredBB; | |||
1399 | while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && | |||
1400 | NumScanedInst < DefMaxInstsToScan) { | |||
1401 | SinglePredBB = SinglePredBB->getSinglePredecessor(); | |||
1402 | if (SinglePredBB) { | |||
1403 | BBIt = SinglePredBB->end(); | |||
1404 | PredAvailable = FindAvailablePtrLoadStore( | |||
1405 | Ptr, LoadI->getType(), LoadI->isAtomic(), SinglePredBB, BBIt, | |||
1406 | (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE, | |||
1407 | &NumScanedInst); | |||
1408 | } | |||
1409 | } | |||
1410 | ||||
1411 | if (!PredAvailable) { | |||
1412 | OneUnavailablePred = PredBB; | |||
1413 | continue; | |||
1414 | } | |||
1415 | ||||
1416 | if (IsLoadCSE) | |||
1417 | CSELoads.push_back(cast<LoadInst>(PredAvailable)); | |||
1418 | ||||
1419 | // If so, this load is partially redundant. Remember this info so that we | |||
1420 | // can create a PHI node. | |||
1421 | AvailablePreds.emplace_back(PredBB, PredAvailable); | |||
1422 | } | |||
1423 | ||||
1424 | // If the loaded value isn't available in any predecessor, it isn't partially | |||
1425 | // redundant. | |||
1426 | if (AvailablePreds.empty()) return false; | |||
1427 | ||||
1428 | // Okay, the loaded value is available in at least one (and maybe all!) | |||
1429 | // predecessors. If the value is unavailable in more than one unique | |||
1430 | // predecessor, we want to insert a merge block for those common predecessors. | |||
1431 | // This ensures that we only have to insert one reload, thus not increasing | |||
1432 | // code size. | |||
1433 | BasicBlock *UnavailablePred = nullptr; | |||
1434 | ||||
1435 | // If the value is unavailable in one of predecessors, we will end up | |||
1436 | // inserting a new instruction into them. It is only valid if all the | |||
1437 | // instructions before LoadI are guaranteed to pass execution to its | |||
1438 | // successor, or if LoadI is safe to speculate. | |||
1439 | // TODO: If this logic becomes more complex, and we will perform PRE insertion | |||
1440 | // farther than to a predecessor, we need to reuse the code from GVN's PRE. | |||
1441 | // It requires domination tree analysis, so for this simple case it is an | |||
1442 | // overkill. | |||
1443 | if (PredsScanned.size() != AvailablePreds.size() && | |||
1444 | !isSafeToSpeculativelyExecute(LoadI)) | |||
1445 | for (auto I = LoadBB->begin(); &*I != LoadI; ++I) | |||
1446 | if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) | |||
1447 | return false; | |||
1448 | ||||
1449 | // If there is exactly one predecessor where the value is unavailable, the | |||
1450 | // already computed 'OneUnavailablePred' block is it. If it ends in an | |||
1451 | // unconditional branch, we know that it isn't a critical edge. | |||
1452 | if (PredsScanned.size() == AvailablePreds.size()+1 && | |||
1453 | OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { | |||
| ||||
1454 | UnavailablePred = OneUnavailablePred; | |||
1455 | } else if (PredsScanned.size() != AvailablePreds.size()) { | |||
1456 | // Otherwise, we had multiple unavailable predecessors or we had a critical | |||
1457 | // edge from the one. | |||
1458 | SmallVector<BasicBlock*, 8> PredsToSplit; | |||
1459 | SmallPtrSet<BasicBlock*, 8> AvailablePredSet; | |||
1460 | ||||
1461 | for (const auto &AvailablePred : AvailablePreds) | |||
1462 | AvailablePredSet.insert(AvailablePred.first); | |||
1463 | ||||
1464 | // Add all the unavailable predecessors to the PredsToSplit list. | |||
1465 | for (BasicBlock *P : predecessors(LoadBB)) { | |||
1466 | // If the predecessor is an indirect goto, we can't split the edge. | |||
1467 | // Same for CallBr. | |||
1468 | if (isa<IndirectBrInst>(P->getTerminator()) || | |||
1469 | isa<CallBrInst>(P->getTerminator())) | |||
1470 | return false; | |||
1471 | ||||
1472 | if (!AvailablePredSet.count(P)) | |||
1473 | PredsToSplit.push_back(P); | |||
1474 | } | |||
1475 | ||||
1476 | // Split them out to their own block. | |||
1477 | UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); | |||
1478 | } | |||
1479 | ||||
1480 | // If the value isn't available in all predecessors, then there will be | |||
1481 | // exactly one where it isn't available. Insert a load on that edge and add | |||
1482 | // it to the AvailablePreds list. | |||
1483 | if (UnavailablePred) { | |||
1484 | assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&((UnavailablePred->getTerminator()->getNumSuccessors() == 1 && "Can't handle critical edge here!") ? static_cast <void> (0) : __assert_fail ("UnavailablePred->getTerminator()->getNumSuccessors() == 1 && \"Can't handle critical edge here!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1485, __PRETTY_FUNCTION__)) | |||
1485 | "Can't handle critical edge here!")((UnavailablePred->getTerminator()->getNumSuccessors() == 1 && "Can't handle critical edge here!") ? static_cast <void> (0) : __assert_fail ("UnavailablePred->getTerminator()->getNumSuccessors() == 1 && \"Can't handle critical edge here!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1485, __PRETTY_FUNCTION__)); | |||
1486 | LoadInst *NewVal = new LoadInst( | |||
1487 | LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), | |||
1488 | LoadI->getName() + ".pr", false, LoadI->getAlign(), | |||
1489 | LoadI->getOrdering(), LoadI->getSyncScopeID(), | |||
1490 | UnavailablePred->getTerminator()); | |||
1491 | NewVal->setDebugLoc(LoadI->getDebugLoc()); | |||
1492 | if (AATags) | |||
1493 | NewVal->setAAMetadata(AATags); | |||
1494 | ||||
1495 | AvailablePreds.emplace_back(UnavailablePred, NewVal); | |||
1496 | } | |||
1497 | ||||
1498 | // Now we know that each predecessor of this block has a value in | |||
1499 | // AvailablePreds, sort them for efficient access as we're walking the preds. | |||
1500 | array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); | |||
1501 | ||||
1502 | // Create a PHI node at the start of the block for the PRE'd load value. | |||
1503 | pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); | |||
1504 | PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), "", | |||
1505 | &LoadBB->front()); | |||
1506 | PN->takeName(LoadI); | |||
1507 | PN->setDebugLoc(LoadI->getDebugLoc()); | |||
1508 | ||||
1509 | // Insert new entries into the PHI for each predecessor. A single block may | |||
1510 | // have multiple entries here. | |||
1511 | for (pred_iterator PI = PB; PI != PE; ++PI) { | |||
1512 | BasicBlock *P = *PI; | |||
1513 | AvailablePredsTy::iterator I = | |||
1514 | llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr)); | |||
1515 | ||||
1516 | assert(I != AvailablePreds.end() && I->first == P &&((I != AvailablePreds.end() && I->first == P && "Didn't find entry for predecessor!") ? static_cast<void> (0) : __assert_fail ("I != AvailablePreds.end() && I->first == P && \"Didn't find entry for predecessor!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1517, __PRETTY_FUNCTION__)) | |||
1517 | "Didn't find entry for predecessor!")((I != AvailablePreds.end() && I->first == P && "Didn't find entry for predecessor!") ? static_cast<void> (0) : __assert_fail ("I != AvailablePreds.end() && I->first == P && \"Didn't find entry for predecessor!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1517, __PRETTY_FUNCTION__)); | |||
1518 | ||||
1519 | // If we have an available predecessor but it requires casting, insert the | |||
1520 | // cast in the predecessor and use the cast. Note that we have to update the | |||
1521 | // AvailablePreds vector as we go so that all of the PHI entries for this | |||
1522 | // predecessor use the same bitcast. | |||
1523 | Value *&PredV = I->second; | |||
1524 | if (PredV->getType() != LoadI->getType()) | |||
1525 | PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "", | |||
1526 | P->getTerminator()); | |||
1527 | ||||
1528 | PN->addIncoming(PredV, I->first); | |||
1529 | } | |||
1530 | ||||
1531 | for (LoadInst *PredLoadI : CSELoads) { | |||
1532 | combineMetadataForCSE(PredLoadI, LoadI, true); | |||
1533 | } | |||
1534 | ||||
1535 | LoadI->replaceAllUsesWith(PN); | |||
1536 | LoadI->eraseFromParent(); | |||
1537 | ||||
1538 | return true; | |||
1539 | } | |||
1540 | ||||
1541 | /// findMostPopularDest - The specified list contains multiple possible | |||
1542 | /// threadable destinations. Pick the one that occurs the most frequently in | |||
1543 | /// the list. | |||
1544 | static BasicBlock * | |||
1545 | findMostPopularDest(BasicBlock *BB, | |||
1546 | const SmallVectorImpl<std::pair<BasicBlock *, | |||
1547 | BasicBlock *>> &PredToDestList) { | |||
1548 | assert(!PredToDestList.empty())((!PredToDestList.empty()) ? static_cast<void> (0) : __assert_fail ("!PredToDestList.empty()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1548, __PRETTY_FUNCTION__)); | |||
1549 | ||||
1550 | // Determine popularity. If there are multiple possible destinations, we | |||
1551 | // explicitly choose to ignore 'undef' destinations. We prefer to thread | |||
1552 | // blocks with known and real destinations to threading undef. We'll handle | |||
1553 | // them later if interesting. | |||
1554 | MapVector<BasicBlock *, unsigned> DestPopularity; | |||
1555 | ||||
1556 | // Populate DestPopularity with the successors in the order they appear in the | |||
1557 | // successor list. This way, we ensure determinism by iterating it in the | |||
1558 | // same order in std::max_element below. We map nullptr to 0 so that we can | |||
1559 | // return nullptr when PredToDestList contains nullptr only. | |||
1560 | DestPopularity[nullptr] = 0; | |||
1561 | for (auto *SuccBB : successors(BB)) | |||
1562 | DestPopularity[SuccBB] = 0; | |||
1563 | ||||
1564 | for (const auto &PredToDest : PredToDestList) | |||
1565 | if (PredToDest.second) | |||
1566 | DestPopularity[PredToDest.second]++; | |||
1567 | ||||
1568 | // Find the most popular dest. | |||
1569 | using VT = decltype(DestPopularity)::value_type; | |||
1570 | auto MostPopular = std::max_element( | |||
1571 | DestPopularity.begin(), DestPopularity.end(), | |||
1572 | [](const VT &L, const VT &R) { return L.second < R.second; }); | |||
1573 | ||||
1574 | // Okay, we have finally picked the most popular destination. | |||
1575 | return MostPopular->first; | |||
1576 | } | |||
1577 | ||||
1578 | // Try to evaluate the value of V when the control flows from PredPredBB to | |||
1579 | // BB->getSinglePredecessor() and then on to BB. | |||
1580 | Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB, | |||
1581 | BasicBlock *PredPredBB, | |||
1582 | Value *V) { | |||
1583 | BasicBlock *PredBB = BB->getSinglePredecessor(); | |||
1584 | assert(PredBB && "Expected a single predecessor")((PredBB && "Expected a single predecessor") ? static_cast <void> (0) : __assert_fail ("PredBB && \"Expected a single predecessor\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1584, __PRETTY_FUNCTION__)); | |||
1585 | ||||
1586 | if (Constant *Cst = dyn_cast<Constant>(V)) { | |||
1587 | return Cst; | |||
1588 | } | |||
1589 | ||||
1590 | // Consult LVI if V is not an instruction in BB or PredBB. | |||
1591 | Instruction *I = dyn_cast<Instruction>(V); | |||
1592 | if (!I || (I->getParent() != BB && I->getParent() != PredBB)) { | |||
1593 | return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr); | |||
1594 | } | |||
1595 | ||||
1596 | // Look into a PHI argument. | |||
1597 | if (PHINode *PHI = dyn_cast<PHINode>(V)) { | |||
1598 | if (PHI->getParent() == PredBB) | |||
1599 | return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB)); | |||
1600 | return nullptr; | |||
1601 | } | |||
1602 | ||||
1603 | // If we have a CmpInst, try to fold it for each incoming edge into PredBB. | |||
1604 | if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) { | |||
1605 | if (CondCmp->getParent() == BB) { | |||
1606 | Constant *Op0 = | |||
1607 | evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0)); | |||
1608 | Constant *Op1 = | |||
1609 | evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1)); | |||
1610 | if (Op0 && Op1) { | |||
1611 | return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1); | |||
1612 | } | |||
1613 | } | |||
1614 | return nullptr; | |||
1615 | } | |||
1616 | ||||
1617 | return nullptr; | |||
1618 | } | |||
1619 | ||||
1620 | bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB, | |||
1621 | ConstantPreference Preference, | |||
1622 | Instruction *CxtI) { | |||
1623 | // If threading this would thread across a loop header, don't even try to | |||
1624 | // thread the edge. | |||
1625 | if (LoopHeaders.count(BB)) | |||
1626 | return false; | |||
1627 | ||||
1628 | PredValueInfoTy PredValues; | |||
1629 | if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference, | |||
1630 | CxtI)) { | |||
1631 | // We don't have known values in predecessors. See if we can thread through | |||
1632 | // BB and its sole predecessor. | |||
1633 | return maybethreadThroughTwoBasicBlocks(BB, Cond); | |||
1634 | } | |||
1635 | ||||
1636 | assert(!PredValues.empty() &&((!PredValues.empty() && "computeValueKnownInPredecessors returned true with no values" ) ? static_cast<void> (0) : __assert_fail ("!PredValues.empty() && \"computeValueKnownInPredecessors returned true with no values\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1637, __PRETTY_FUNCTION__)) | |||
1637 | "computeValueKnownInPredecessors returned true with no values")((!PredValues.empty() && "computeValueKnownInPredecessors returned true with no values" ) ? static_cast<void> (0) : __assert_fail ("!PredValues.empty() && \"computeValueKnownInPredecessors returned true with no values\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1637, __PRETTY_FUNCTION__)); | |||
1638 | ||||
1639 | LLVM_DEBUG(dbgs() << "IN BB: " << *BB;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false) | |||
1640 | for (const auto &PredValue : PredValues) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false) | |||
1641 | dbgs() << " BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false) | |||
1642 | << "': FOUND condition = " << *PredValue.firstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false) | |||
1643 | << " for pred '" << PredValue.second->getName() << "'.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false) | |||
1644 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "IN BB: " << *BB; for (const auto &PredValue : PredValues) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValue.first << " for pred '" << PredValue .second->getName() << "'.\n"; }; } } while (false); | |||
1645 | ||||
1646 | // Decide what we want to thread through. Convert our list of known values to | |||
1647 | // a list of known destinations for each pred. This also discards duplicate | |||
1648 | // predecessors and keeps track of the undefined inputs (which are represented | |||
1649 | // as a null dest in the PredToDestList). | |||
1650 | SmallPtrSet<BasicBlock*, 16> SeenPreds; | |||
1651 | SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; | |||
1652 | ||||
1653 | BasicBlock *OnlyDest = nullptr; | |||
1654 | BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; | |||
1655 | Constant *OnlyVal = nullptr; | |||
1656 | Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; | |||
1657 | ||||
1658 | for (const auto &PredValue : PredValues) { | |||
1659 | BasicBlock *Pred = PredValue.second; | |||
1660 | if (!SeenPreds.insert(Pred).second) | |||
1661 | continue; // Duplicate predecessor entry. | |||
1662 | ||||
1663 | Constant *Val = PredValue.first; | |||
1664 | ||||
1665 | BasicBlock *DestBB; | |||
1666 | if (isa<UndefValue>(Val)) | |||
1667 | DestBB = nullptr; | |||
1668 | else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { | |||
1669 | assert(isa<ConstantInt>(Val) && "Expecting a constant integer")((isa<ConstantInt>(Val) && "Expecting a constant integer" ) ? static_cast<void> (0) : __assert_fail ("isa<ConstantInt>(Val) && \"Expecting a constant integer\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1669, __PRETTY_FUNCTION__)); | |||
1670 | DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); | |||
1671 | } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { | |||
1672 | assert(isa<ConstantInt>(Val) && "Expecting a constant integer")((isa<ConstantInt>(Val) && "Expecting a constant integer" ) ? static_cast<void> (0) : __assert_fail ("isa<ConstantInt>(Val) && \"Expecting a constant integer\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1672, __PRETTY_FUNCTION__)); | |||
1673 | DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); | |||
1674 | } else { | |||
1675 | assert(isa<IndirectBrInst>(BB->getTerminator())((isa<IndirectBrInst>(BB->getTerminator()) && "Unexpected terminator") ? static_cast<void> (0) : __assert_fail ("isa<IndirectBrInst>(BB->getTerminator()) && \"Unexpected terminator\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1676, __PRETTY_FUNCTION__)) | |||
1676 | && "Unexpected terminator")((isa<IndirectBrInst>(BB->getTerminator()) && "Unexpected terminator") ? static_cast<void> (0) : __assert_fail ("isa<IndirectBrInst>(BB->getTerminator()) && \"Unexpected terminator\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1676, __PRETTY_FUNCTION__)); | |||
1677 | assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress")((isa<BlockAddress>(Val) && "Expecting a constant blockaddress" ) ? static_cast<void> (0) : __assert_fail ("isa<BlockAddress>(Val) && \"Expecting a constant blockaddress\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1677, __PRETTY_FUNCTION__)); | |||
1678 | DestBB = cast<BlockAddress>(Val)->getBasicBlock(); | |||
1679 | } | |||
1680 | ||||
1681 | // If we have exactly one destination, remember it for efficiency below. | |||
1682 | if (PredToDestList.empty()) { | |||
1683 | OnlyDest = DestBB; | |||
1684 | OnlyVal = Val; | |||
1685 | } else { | |||
1686 | if (OnlyDest != DestBB) | |||
1687 | OnlyDest = MultipleDestSentinel; | |||
1688 | // It possible we have same destination, but different value, e.g. default | |||
1689 | // case in switchinst. | |||
1690 | if (Val != OnlyVal) | |||
1691 | OnlyVal = MultipleVal; | |||
1692 | } | |||
1693 | ||||
1694 | // If the predecessor ends with an indirect goto, we can't change its | |||
1695 | // destination. Same for CallBr. | |||
1696 | if (isa<IndirectBrInst>(Pred->getTerminator()) || | |||
1697 | isa<CallBrInst>(Pred->getTerminator())) | |||
1698 | continue; | |||
1699 | ||||
1700 | PredToDestList.emplace_back(Pred, DestBB); | |||
1701 | } | |||
1702 | ||||
1703 | // If all edges were unthreadable, we fail. | |||
1704 | if (PredToDestList.empty()) | |||
1705 | return false; | |||
1706 | ||||
1707 | // If all the predecessors go to a single known successor, we want to fold, | |||
1708 | // not thread. By doing so, we do not need to duplicate the current block and | |||
1709 | // also miss potential opportunities in case we dont/cant duplicate. | |||
1710 | if (OnlyDest && OnlyDest != MultipleDestSentinel) { | |||
1711 | if (BB->hasNPredecessors(PredToDestList.size())) { | |||
1712 | bool SeenFirstBranchToOnlyDest = false; | |||
1713 | std::vector <DominatorTree::UpdateType> Updates; | |||
1714 | Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1); | |||
1715 | for (BasicBlock *SuccBB : successors(BB)) { | |||
1716 | if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) { | |||
1717 | SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. | |||
1718 | } else { | |||
1719 | SuccBB->removePredecessor(BB, true); // This is unreachable successor. | |||
1720 | Updates.push_back({DominatorTree::Delete, BB, SuccBB}); | |||
1721 | } | |||
1722 | } | |||
1723 | ||||
1724 | // Finally update the terminator. | |||
1725 | Instruction *Term = BB->getTerminator(); | |||
1726 | BranchInst::Create(OnlyDest, Term); | |||
1727 | Term->eraseFromParent(); | |||
1728 | DTU->applyUpdatesPermissive(Updates); | |||
1729 | if (HasProfileData) | |||
1730 | BPI->eraseBlock(BB); | |||
1731 | ||||
1732 | // If the condition is now dead due to the removal of the old terminator, | |||
1733 | // erase it. | |||
1734 | if (auto *CondInst = dyn_cast<Instruction>(Cond)) { | |||
1735 | if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) | |||
1736 | CondInst->eraseFromParent(); | |||
1737 | // We can safely replace *some* uses of the CondInst if it has | |||
1738 | // exactly one value as returned by LVI. RAUW is incorrect in the | |||
1739 | // presence of guards and assumes, that have the `Cond` as the use. This | |||
1740 | // is because we use the guards/assume to reason about the `Cond` value | |||
1741 | // at the end of block, but RAUW unconditionally replaces all uses | |||
1742 | // including the guards/assumes themselves and the uses before the | |||
1743 | // guard/assume. | |||
1744 | else if (OnlyVal && OnlyVal != MultipleVal && | |||
1745 | CondInst->getParent() == BB) | |||
1746 | replaceFoldableUses(CondInst, OnlyVal); | |||
1747 | } | |||
1748 | return true; | |||
1749 | } | |||
1750 | } | |||
1751 | ||||
1752 | // Determine which is the most common successor. If we have many inputs and | |||
1753 | // this block is a switch, we want to start by threading the batch that goes | |||
1754 | // to the most popular destination first. If we only know about one | |||
1755 | // threadable destination (the common case) we can avoid this. | |||
1756 | BasicBlock *MostPopularDest = OnlyDest; | |||
1757 | ||||
1758 | if (MostPopularDest == MultipleDestSentinel) { | |||
1759 | // Remove any loop headers from the Dest list, threadEdge conservatively | |||
1760 | // won't process them, but we might have other destination that are eligible | |||
1761 | // and we still want to process. | |||
1762 | erase_if(PredToDestList, | |||
1763 | [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) { | |||
1764 | return LoopHeaders.contains(PredToDest.second); | |||
1765 | }); | |||
1766 | ||||
1767 | if (PredToDestList.empty()) | |||
1768 | return false; | |||
1769 | ||||
1770 | MostPopularDest = findMostPopularDest(BB, PredToDestList); | |||
1771 | } | |||
1772 | ||||
1773 | // Now that we know what the most popular destination is, factor all | |||
1774 | // predecessors that will jump to it into a single predecessor. | |||
1775 | SmallVector<BasicBlock*, 16> PredsToFactor; | |||
1776 | for (const auto &PredToDest : PredToDestList) | |||
1777 | if (PredToDest.second == MostPopularDest) { | |||
1778 | BasicBlock *Pred = PredToDest.first; | |||
1779 | ||||
1780 | // This predecessor may be a switch or something else that has multiple | |||
1781 | // edges to the block. Factor each of these edges by listing them | |||
1782 | // according to # occurrences in PredsToFactor. | |||
1783 | for (BasicBlock *Succ : successors(Pred)) | |||
1784 | if (Succ == BB) | |||
1785 | PredsToFactor.push_back(Pred); | |||
1786 | } | |||
1787 | ||||
1788 | // If the threadable edges are branching on an undefined value, we get to pick | |||
1789 | // the destination that these predecessors should get to. | |||
1790 | if (!MostPopularDest) | |||
1791 | MostPopularDest = BB->getTerminator()-> | |||
1792 | getSuccessor(getBestDestForJumpOnUndef(BB)); | |||
1793 | ||||
1794 | // Ok, try to thread it! | |||
1795 | return tryThreadEdge(BB, PredsToFactor, MostPopularDest); | |||
1796 | } | |||
1797 | ||||
1798 | /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on | |||
1799 | /// a PHI node (or freeze PHI) in the current block. See if there are any | |||
1800 | /// simplifications we can do based on inputs to the phi node. | |||
1801 | bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) { | |||
1802 | BasicBlock *BB = PN->getParent(); | |||
1803 | ||||
1804 | // TODO: We could make use of this to do it once for blocks with common PHI | |||
1805 | // values. | |||
1806 | SmallVector<BasicBlock*, 1> PredBBs; | |||
1807 | PredBBs.resize(1); | |||
1808 | ||||
1809 | // If any of the predecessor blocks end in an unconditional branch, we can | |||
1810 | // *duplicate* the conditional branch into that block in order to further | |||
1811 | // encourage jump threading and to eliminate cases where we have branch on a | |||
1812 | // phi of an icmp (branch on icmp is much better). | |||
1813 | // This is still beneficial when a frozen phi is used as the branch condition | |||
1814 | // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp)) | |||
1815 | // to br(icmp(freeze ...)). | |||
1816 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
1817 | BasicBlock *PredBB = PN->getIncomingBlock(i); | |||
1818 | if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) | |||
1819 | if (PredBr->isUnconditional()) { | |||
1820 | PredBBs[0] = PredBB; | |||
1821 | // Try to duplicate BB into PredBB. | |||
1822 | if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs)) | |||
1823 | return true; | |||
1824 | } | |||
1825 | } | |||
1826 | ||||
1827 | return false; | |||
1828 | } | |||
1829 | ||||
1830 | /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on | |||
1831 | /// a xor instruction in the current block. See if there are any | |||
1832 | /// simplifications we can do based on inputs to the xor. | |||
1833 | bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) { | |||
1834 | BasicBlock *BB = BO->getParent(); | |||
1835 | ||||
1836 | // If either the LHS or RHS of the xor is a constant, don't do this | |||
1837 | // optimization. | |||
1838 | if (isa<ConstantInt>(BO->getOperand(0)) || | |||
1839 | isa<ConstantInt>(BO->getOperand(1))) | |||
1840 | return false; | |||
1841 | ||||
1842 | // If the first instruction in BB isn't a phi, we won't be able to infer | |||
1843 | // anything special about any particular predecessor. | |||
1844 | if (!isa<PHINode>(BB->front())) | |||
1845 | return false; | |||
1846 | ||||
1847 | // If this BB is a landing pad, we won't be able to split the edge into it. | |||
1848 | if (BB->isEHPad()) | |||
1849 | return false; | |||
1850 | ||||
1851 | // If we have a xor as the branch input to this block, and we know that the | |||
1852 | // LHS or RHS of the xor in any predecessor is true/false, then we can clone | |||
1853 | // the condition into the predecessor and fix that value to true, saving some | |||
1854 | // logical ops on that path and encouraging other paths to simplify. | |||
1855 | // | |||
1856 | // This copies something like this: | |||
1857 | // | |||
1858 | // BB: | |||
1859 | // %X = phi i1 [1], [%X'] | |||
1860 | // %Y = icmp eq i32 %A, %B | |||
1861 | // %Z = xor i1 %X, %Y | |||
1862 | // br i1 %Z, ... | |||
1863 | // | |||
1864 | // Into: | |||
1865 | // BB': | |||
1866 | // %Y = icmp ne i32 %A, %B | |||
1867 | // br i1 %Y, ... | |||
1868 | ||||
1869 | PredValueInfoTy XorOpValues; | |||
1870 | bool isLHS = true; | |||
1871 | if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, | |||
1872 | WantInteger, BO)) { | |||
1873 | assert(XorOpValues.empty())((XorOpValues.empty()) ? static_cast<void> (0) : __assert_fail ("XorOpValues.empty()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1873, __PRETTY_FUNCTION__)); | |||
1874 | if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, | |||
1875 | WantInteger, BO)) | |||
1876 | return false; | |||
1877 | isLHS = false; | |||
1878 | } | |||
1879 | ||||
1880 | assert(!XorOpValues.empty() &&((!XorOpValues.empty() && "computeValueKnownInPredecessors returned true with no values" ) ? static_cast<void> (0) : __assert_fail ("!XorOpValues.empty() && \"computeValueKnownInPredecessors returned true with no values\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1881, __PRETTY_FUNCTION__)) | |||
1881 | "computeValueKnownInPredecessors returned true with no values")((!XorOpValues.empty() && "computeValueKnownInPredecessors returned true with no values" ) ? static_cast<void> (0) : __assert_fail ("!XorOpValues.empty() && \"computeValueKnownInPredecessors returned true with no values\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 1881, __PRETTY_FUNCTION__)); | |||
1882 | ||||
1883 | // Scan the information to see which is most popular: true or false. The | |||
1884 | // predecessors can be of the set true, false, or undef. | |||
1885 | unsigned NumTrue = 0, NumFalse = 0; | |||
1886 | for (const auto &XorOpValue : XorOpValues) { | |||
1887 | if (isa<UndefValue>(XorOpValue.first)) | |||
1888 | // Ignore undefs for the count. | |||
1889 | continue; | |||
1890 | if (cast<ConstantInt>(XorOpValue.first)->isZero()) | |||
1891 | ++NumFalse; | |||
1892 | else | |||
1893 | ++NumTrue; | |||
1894 | } | |||
1895 | ||||
1896 | // Determine which value to split on, true, false, or undef if neither. | |||
1897 | ConstantInt *SplitVal = nullptr; | |||
1898 | if (NumTrue > NumFalse) | |||
1899 | SplitVal = ConstantInt::getTrue(BB->getContext()); | |||
1900 | else if (NumTrue != 0 || NumFalse != 0) | |||
1901 | SplitVal = ConstantInt::getFalse(BB->getContext()); | |||
1902 | ||||
1903 | // Collect all of the blocks that this can be folded into so that we can | |||
1904 | // factor this once and clone it once. | |||
1905 | SmallVector<BasicBlock*, 8> BlocksToFoldInto; | |||
1906 | for (const auto &XorOpValue : XorOpValues) { | |||
1907 | if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) | |||
1908 | continue; | |||
1909 | ||||
1910 | BlocksToFoldInto.push_back(XorOpValue.second); | |||
1911 | } | |||
1912 | ||||
1913 | // If we inferred a value for all of the predecessors, then duplication won't | |||
1914 | // help us. However, we can just replace the LHS or RHS with the constant. | |||
1915 | if (BlocksToFoldInto.size() == | |||
1916 | cast<PHINode>(BB->front()).getNumIncomingValues()) { | |||
1917 | if (!SplitVal) { | |||
1918 | // If all preds provide undef, just nuke the xor, because it is undef too. | |||
1919 | BO->replaceAllUsesWith(UndefValue::get(BO->getType())); | |||
1920 | BO->eraseFromParent(); | |||
1921 | } else if (SplitVal->isZero()) { | |||
1922 | // If all preds provide 0, replace the xor with the other input. | |||
1923 | BO->replaceAllUsesWith(BO->getOperand(isLHS)); | |||
1924 | BO->eraseFromParent(); | |||
1925 | } else { | |||
1926 | // If all preds provide 1, set the computed value to 1. | |||
1927 | BO->setOperand(!isLHS, SplitVal); | |||
1928 | } | |||
1929 | ||||
1930 | return true; | |||
1931 | } | |||
1932 | ||||
1933 | // If any of predecessors end with an indirect goto, we can't change its | |||
1934 | // destination. Same for CallBr. | |||
1935 | if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) { | |||
1936 | return isa<IndirectBrInst>(Pred->getTerminator()) || | |||
1937 | isa<CallBrInst>(Pred->getTerminator()); | |||
1938 | })) | |||
1939 | return false; | |||
1940 | ||||
1941 | // Try to duplicate BB into PredBB. | |||
1942 | return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); | |||
1943 | } | |||
1944 | ||||
1945 | /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new | |||
1946 | /// predecessor to the PHIBB block. If it has PHI nodes, add entries for | |||
1947 | /// NewPred using the entries from OldPred (suitably mapped). | |||
1948 | static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, | |||
1949 | BasicBlock *OldPred, | |||
1950 | BasicBlock *NewPred, | |||
1951 | DenseMap<Instruction*, Value*> &ValueMap) { | |||
1952 | for (PHINode &PN : PHIBB->phis()) { | |||
1953 | // Ok, we have a PHI node. Figure out what the incoming value was for the | |||
1954 | // DestBlock. | |||
1955 | Value *IV = PN.getIncomingValueForBlock(OldPred); | |||
1956 | ||||
1957 | // Remap the value if necessary. | |||
1958 | if (Instruction *Inst = dyn_cast<Instruction>(IV)) { | |||
1959 | DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); | |||
1960 | if (I != ValueMap.end()) | |||
1961 | IV = I->second; | |||
1962 | } | |||
1963 | ||||
1964 | PN.addIncoming(IV, NewPred); | |||
1965 | } | |||
1966 | } | |||
1967 | ||||
1968 | /// Merge basic block BB into its sole predecessor if possible. | |||
1969 | bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) { | |||
1970 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
1971 | if (!SinglePred) | |||
1972 | return false; | |||
1973 | ||||
1974 | const Instruction *TI = SinglePred->getTerminator(); | |||
1975 | if (TI->isExceptionalTerminator() || TI->getNumSuccessors() != 1 || | |||
1976 | SinglePred == BB || hasAddressTakenAndUsed(BB)) | |||
1977 | return false; | |||
1978 | ||||
1979 | // If SinglePred was a loop header, BB becomes one. | |||
1980 | if (LoopHeaders.erase(SinglePred)) | |||
1981 | LoopHeaders.insert(BB); | |||
1982 | ||||
1983 | LVI->eraseBlock(SinglePred); | |||
1984 | MergeBasicBlockIntoOnlyPred(BB, DTU); | |||
1985 | ||||
1986 | // Now that BB is merged into SinglePred (i.e. SinglePred code followed by | |||
1987 | // BB code within one basic block `BB`), we need to invalidate the LVI | |||
1988 | // information associated with BB, because the LVI information need not be | |||
1989 | // true for all of BB after the merge. For example, | |||
1990 | // Before the merge, LVI info and code is as follows: | |||
1991 | // SinglePred: <LVI info1 for %p val> | |||
1992 | // %y = use of %p | |||
1993 | // call @exit() // need not transfer execution to successor. | |||
1994 | // assume(%p) // from this point on %p is true | |||
1995 | // br label %BB | |||
1996 | // BB: <LVI info2 for %p val, i.e. %p is true> | |||
1997 | // %x = use of %p | |||
1998 | // br label exit | |||
1999 | // | |||
2000 | // Note that this LVI info for blocks BB and SinglPred is correct for %p | |||
2001 | // (info2 and info1 respectively). After the merge and the deletion of the | |||
2002 | // LVI info1 for SinglePred. We have the following code: | |||
2003 | // BB: <LVI info2 for %p val> | |||
2004 | // %y = use of %p | |||
2005 | // call @exit() | |||
2006 | // assume(%p) | |||
2007 | // %x = use of %p <-- LVI info2 is correct from here onwards. | |||
2008 | // br label exit | |||
2009 | // LVI info2 for BB is incorrect at the beginning of BB. | |||
2010 | ||||
2011 | // Invalidate LVI information for BB if the LVI is not provably true for | |||
2012 | // all of BB. | |||
2013 | if (!isGuaranteedToTransferExecutionToSuccessor(BB)) | |||
2014 | LVI->eraseBlock(BB); | |||
2015 | return true; | |||
2016 | } | |||
2017 | ||||
2018 | /// Update the SSA form. NewBB contains instructions that are copied from BB. | |||
2019 | /// ValueMapping maps old values in BB to new ones in NewBB. | |||
2020 | void JumpThreadingPass::updateSSA( | |||
2021 | BasicBlock *BB, BasicBlock *NewBB, | |||
2022 | DenseMap<Instruction *, Value *> &ValueMapping) { | |||
2023 | // If there were values defined in BB that are used outside the block, then we | |||
2024 | // now have to update all uses of the value to use either the original value, | |||
2025 | // the cloned value, or some PHI derived value. This can require arbitrary | |||
2026 | // PHI insertion, of which we are prepared to do, clean these up now. | |||
2027 | SSAUpdater SSAUpdate; | |||
2028 | SmallVector<Use *, 16> UsesToRename; | |||
2029 | ||||
2030 | for (Instruction &I : *BB) { | |||
2031 | // Scan all uses of this instruction to see if it is used outside of its | |||
2032 | // block, and if so, record them in UsesToRename. | |||
2033 | for (Use &U : I.uses()) { | |||
2034 | Instruction *User = cast<Instruction>(U.getUser()); | |||
2035 | if (PHINode *UserPN = dyn_cast<PHINode>(User)) { | |||
2036 | if (UserPN->getIncomingBlock(U) == BB) | |||
2037 | continue; | |||
2038 | } else if (User->getParent() == BB) | |||
2039 | continue; | |||
2040 | ||||
2041 | UsesToRename.push_back(&U); | |||
2042 | } | |||
2043 | ||||
2044 | // If there are no uses outside the block, we're done with this instruction. | |||
2045 | if (UsesToRename.empty()) | |||
2046 | continue; | |||
2047 | LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "JT: Renaming non-local uses of: " << I << "\n"; } } while (false); | |||
2048 | ||||
2049 | // We found a use of I outside of BB. Rename all uses of I that are outside | |||
2050 | // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks | |||
2051 | // with the two values we know. | |||
2052 | SSAUpdate.Initialize(I.getType(), I.getName()); | |||
2053 | SSAUpdate.AddAvailableValue(BB, &I); | |||
2054 | SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); | |||
2055 | ||||
2056 | while (!UsesToRename.empty()) | |||
2057 | SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); | |||
2058 | LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "\n"; } } while (false); | |||
2059 | } | |||
2060 | } | |||
2061 | ||||
2062 | /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone | |||
2063 | /// arguments that come from PredBB. Return the map from the variables in the | |||
2064 | /// source basic block to the variables in the newly created basic block. | |||
2065 | DenseMap<Instruction *, Value *> | |||
2066 | JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI, | |||
2067 | BasicBlock::iterator BE, BasicBlock *NewBB, | |||
2068 | BasicBlock *PredBB) { | |||
2069 | // We are going to have to map operands from the source basic block to the new | |||
2070 | // copy of the block 'NewBB'. If there are PHI nodes in the source basic | |||
2071 | // block, evaluate them to account for entry from PredBB. | |||
2072 | DenseMap<Instruction *, Value *> ValueMapping; | |||
2073 | ||||
2074 | // Clone the phi nodes of the source basic block into NewBB. The resulting | |||
2075 | // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater | |||
2076 | // might need to rewrite the operand of the cloned phi. | |||
2077 | for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { | |||
2078 | PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB); | |||
2079 | NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB); | |||
2080 | ValueMapping[PN] = NewPN; | |||
2081 | } | |||
2082 | ||||
2083 | // Clone noalias scope declarations in the threaded block. When threading a | |||
2084 | // loop exit, we would otherwise end up with two idential scope declarations | |||
2085 | // visible at the same time. | |||
2086 | SmallVector<MDNode *> NoAliasScopes; | |||
2087 | DenseMap<MDNode *, MDNode *> ClonedScopes; | |||
2088 | LLVMContext &Context = PredBB->getContext(); | |||
2089 | identifyNoAliasScopesToClone(BI, BE, NoAliasScopes); | |||
2090 | cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context); | |||
2091 | ||||
2092 | // Clone the non-phi instructions of the source basic block into NewBB, | |||
2093 | // keeping track of the mapping and using it to remap operands in the cloned | |||
2094 | // instructions. | |||
2095 | for (; BI != BE; ++BI) { | |||
2096 | Instruction *New = BI->clone(); | |||
2097 | New->setName(BI->getName()); | |||
2098 | NewBB->getInstList().push_back(New); | |||
2099 | ValueMapping[&*BI] = New; | |||
2100 | adaptNoAliasScopes(New, ClonedScopes, Context); | |||
2101 | ||||
2102 | // Remap operands to patch up intra-block references. | |||
2103 | for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) | |||
2104 | if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { | |||
2105 | DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst); | |||
2106 | if (I != ValueMapping.end()) | |||
2107 | New->setOperand(i, I->second); | |||
2108 | } | |||
2109 | } | |||
2110 | ||||
2111 | return ValueMapping; | |||
2112 | } | |||
2113 | ||||
2114 | /// Attempt to thread through two successive basic blocks. | |||
2115 | bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB, | |||
2116 | Value *Cond) { | |||
2117 | // Consider: | |||
2118 | // | |||
2119 | // PredBB: | |||
2120 | // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ] | |||
2121 | // %tobool = icmp eq i32 %cond, 0 | |||
2122 | // br i1 %tobool, label %BB, label ... | |||
2123 | // | |||
2124 | // BB: | |||
2125 | // %cmp = icmp eq i32* %var, null | |||
2126 | // br i1 %cmp, label ..., label ... | |||
2127 | // | |||
2128 | // We don't know the value of %var at BB even if we know which incoming edge | |||
2129 | // we take to BB. However, once we duplicate PredBB for each of its incoming | |||
2130 | // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of | |||
2131 | // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB. | |||
2132 | ||||
2133 | // Require that BB end with a Branch for simplicity. | |||
2134 | BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); | |||
2135 | if (!CondBr) | |||
2136 | return false; | |||
2137 | ||||
2138 | // BB must have exactly one predecessor. | |||
2139 | BasicBlock *PredBB = BB->getSinglePredecessor(); | |||
2140 | if (!PredBB) | |||
2141 | return false; | |||
2142 | ||||
2143 | // Require that PredBB end with a conditional Branch. If PredBB ends with an | |||
2144 | // unconditional branch, we should be merging PredBB and BB instead. For | |||
2145 | // simplicity, we don't deal with a switch. | |||
2146 | BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); | |||
2147 | if (!PredBBBranch || PredBBBranch->isUnconditional()) | |||
2148 | return false; | |||
2149 | ||||
2150 | // If PredBB has exactly one incoming edge, we don't gain anything by copying | |||
2151 | // PredBB. | |||
2152 | if (PredBB->getSinglePredecessor()) | |||
2153 | return false; | |||
2154 | ||||
2155 | // Don't thread through PredBB if it contains a successor edge to itself, in | |||
2156 | // which case we would infinite loop. Suppose we are threading an edge from | |||
2157 | // PredPredBB through PredBB and BB to SuccBB with PredBB containing a | |||
2158 | // successor edge to itself. If we allowed jump threading in this case, we | |||
2159 | // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since | |||
2160 | // PredBB.thread has a successor edge to PredBB, we would immediately come up | |||
2161 | // with another jump threading opportunity from PredBB.thread through PredBB | |||
2162 | // and BB to SuccBB. This jump threading would repeatedly occur. That is, we | |||
2163 | // would keep peeling one iteration from PredBB. | |||
2164 | if (llvm::is_contained(successors(PredBB), PredBB)) | |||
2165 | return false; | |||
2166 | ||||
2167 | // Don't thread across a loop header. | |||
2168 | if (LoopHeaders.count(PredBB)) | |||
2169 | return false; | |||
2170 | ||||
2171 | // Avoid complication with duplicating EH pads. | |||
2172 | if (PredBB->isEHPad()) | |||
2173 | return false; | |||
2174 | ||||
2175 | // Find a predecessor that we can thread. For simplicity, we only consider a | |||
2176 | // successor edge out of BB to which we thread exactly one incoming edge into | |||
2177 | // PredBB. | |||
2178 | unsigned ZeroCount = 0; | |||
2179 | unsigned OneCount = 0; | |||
2180 | BasicBlock *ZeroPred = nullptr; | |||
2181 | BasicBlock *OnePred = nullptr; | |||
2182 | for (BasicBlock *P : predecessors(PredBB)) { | |||
2183 | if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>( | |||
2184 | evaluateOnPredecessorEdge(BB, P, Cond))) { | |||
2185 | if (CI->isZero()) { | |||
2186 | ZeroCount++; | |||
2187 | ZeroPred = P; | |||
2188 | } else if (CI->isOne()) { | |||
2189 | OneCount++; | |||
2190 | OnePred = P; | |||
2191 | } | |||
2192 | } | |||
2193 | } | |||
2194 | ||||
2195 | // Disregard complicated cases where we have to thread multiple edges. | |||
2196 | BasicBlock *PredPredBB; | |||
2197 | if (ZeroCount == 1) { | |||
2198 | PredPredBB = ZeroPred; | |||
2199 | } else if (OneCount == 1) { | |||
2200 | PredPredBB = OnePred; | |||
2201 | } else { | |||
2202 | return false; | |||
2203 | } | |||
2204 | ||||
2205 | BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred); | |||
2206 | ||||
2207 | // If threading to the same block as we come from, we would infinite loop. | |||
2208 | if (SuccBB == BB) { | |||
2209 | LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading across BB '" << BB->getName() << "' - would thread to self!\n" ; } } while (false) | |||
2210 | << "' - would thread to self!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading across BB '" << BB->getName() << "' - would thread to self!\n" ; } } while (false); | |||
2211 | return false; | |||
2212 | } | |||
2213 | ||||
2214 | // If threading this would thread across a loop header, don't thread the edge. | |||
2215 | // See the comments above findLoopHeaders for justifications and caveats. | |||
2216 | if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { | |||
2217 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2218 | bool BBIsHeader = LoopHeaders.count(BB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2219 | bool SuccIsHeader = LoopHeaders.count(SuccBB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2220 | dbgs() << " Not threading across "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2221 | << (BBIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2222 | << BB->getName() << "' to dest "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2223 | << (SuccIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2224 | << SuccBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2225 | << "' - it might create an irreducible loop!\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2226 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false); | |||
2227 | return false; | |||
2228 | } | |||
2229 | ||||
2230 | // Compute the cost of duplicating BB and PredBB. | |||
2231 | unsigned BBCost = | |||
2232 | getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); | |||
2233 | unsigned PredBBCost = getJumpThreadDuplicationCost( | |||
2234 | PredBB, PredBB->getTerminator(), BBDupThreshold); | |||
2235 | ||||
2236 | // Give up if costs are too high. We need to check BBCost and PredBBCost | |||
2237 | // individually before checking their sum because getJumpThreadDuplicationCost | |||
2238 | // return (unsigned)~0 for those basic blocks that cannot be duplicated. | |||
2239 | if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold || | |||
2240 | BBCost + PredBBCost > BBDupThreshold) { | |||
2241 | LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << PredBBCost << " for PredBB, " << BBCost << "for BB\n" ; } } while (false) | |||
2242 | << "' - Cost is too high: " << PredBBCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << PredBBCost << " for PredBB, " << BBCost << "for BB\n" ; } } while (false) | |||
2243 | << " for PredBB, " << BBCost << "for BB\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << PredBBCost << " for PredBB, " << BBCost << "for BB\n" ; } } while (false); | |||
2244 | return false; | |||
2245 | } | |||
2246 | ||||
2247 | // Now we are ready to duplicate PredBB. | |||
2248 | threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB); | |||
2249 | return true; | |||
2250 | } | |||
2251 | ||||
2252 | void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, | |||
2253 | BasicBlock *PredBB, | |||
2254 | BasicBlock *BB, | |||
2255 | BasicBlock *SuccBB) { | |||
2256 | LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Threading through '" << PredBB->getName() << "' and '" << BB ->getName() << "'\n"; } } while (false) | |||
2257 | << BB->getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Threading through '" << PredBB->getName() << "' and '" << BB ->getName() << "'\n"; } } while (false); | |||
2258 | ||||
2259 | BranchInst *CondBr = cast<BranchInst>(BB->getTerminator()); | |||
2260 | BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator()); | |||
2261 | ||||
2262 | BasicBlock *NewBB = | |||
2263 | BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread", | |||
2264 | PredBB->getParent(), PredBB); | |||
2265 | NewBB->moveAfter(PredBB); | |||
2266 | ||||
2267 | // Set the block frequency of NewBB. | |||
2268 | if (HasProfileData) { | |||
2269 | auto NewBBFreq = BFI->getBlockFreq(PredPredBB) * | |||
2270 | BPI->getEdgeProbability(PredPredBB, PredBB); | |||
2271 | BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); | |||
2272 | } | |||
2273 | ||||
2274 | // We are going to have to map operands from the original BB block to the new | |||
2275 | // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them | |||
2276 | // to account for entry from PredPredBB. | |||
2277 | DenseMap<Instruction *, Value *> ValueMapping = | |||
2278 | cloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB); | |||
2279 | ||||
2280 | // Copy the edge probabilities from PredBB to NewBB. | |||
2281 | if (HasProfileData) | |||
2282 | BPI->copyEdgeProbabilities(PredBB, NewBB); | |||
2283 | ||||
2284 | // Update the terminator of PredPredBB to jump to NewBB instead of PredBB. | |||
2285 | // This eliminates predecessors from PredPredBB, which requires us to simplify | |||
2286 | // any PHI nodes in PredBB. | |||
2287 | Instruction *PredPredTerm = PredPredBB->getTerminator(); | |||
2288 | for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i) | |||
2289 | if (PredPredTerm->getSuccessor(i) == PredBB) { | |||
2290 | PredBB->removePredecessor(PredPredBB, true); | |||
2291 | PredPredTerm->setSuccessor(i, NewBB); | |||
2292 | } | |||
2293 | ||||
2294 | addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB, | |||
2295 | ValueMapping); | |||
2296 | addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB, | |||
2297 | ValueMapping); | |||
2298 | ||||
2299 | DTU->applyUpdatesPermissive( | |||
2300 | {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)}, | |||
2301 | {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)}, | |||
2302 | {DominatorTree::Insert, PredPredBB, NewBB}, | |||
2303 | {DominatorTree::Delete, PredPredBB, PredBB}}); | |||
2304 | ||||
2305 | updateSSA(PredBB, NewBB, ValueMapping); | |||
2306 | ||||
2307 | // Clean up things like PHI nodes with single operands, dead instructions, | |||
2308 | // etc. | |||
2309 | SimplifyInstructionsInBlock(NewBB, TLI); | |||
2310 | SimplifyInstructionsInBlock(PredBB, TLI); | |||
2311 | ||||
2312 | SmallVector<BasicBlock *, 1> PredsToFactor; | |||
2313 | PredsToFactor.push_back(NewBB); | |||
2314 | threadEdge(BB, PredsToFactor, SuccBB); | |||
2315 | } | |||
2316 | ||||
2317 | /// tryThreadEdge - Thread an edge if it's safe and profitable to do so. | |||
2318 | bool JumpThreadingPass::tryThreadEdge( | |||
2319 | BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs, | |||
2320 | BasicBlock *SuccBB) { | |||
2321 | // If threading to the same block as we come from, we would infinite loop. | |||
2322 | if (SuccBB == BB) { | |||
2323 | LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading across BB '" << BB->getName() << "' - would thread to self!\n" ; } } while (false) | |||
2324 | << "' - would thread to self!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading across BB '" << BB->getName() << "' - would thread to self!\n" ; } } while (false); | |||
2325 | return false; | |||
2326 | } | |||
2327 | ||||
2328 | // If threading this would thread across a loop header, don't thread the edge. | |||
2329 | // See the comments above findLoopHeaders for justifications and caveats. | |||
2330 | if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { | |||
2331 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2332 | bool BBIsHeader = LoopHeaders.count(BB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2333 | bool SuccIsHeader = LoopHeaders.count(SuccBB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2334 | dbgs() << " Not threading across "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2335 | << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2336 | << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2337 | << SuccBB->getName() << "' - it might create an irreducible loop!\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false) | |||
2338 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB ); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") << SuccBB->getName() << "' - it might create an irreducible loop!\n" ; }; } } while (false); | |||
2339 | return false; | |||
2340 | } | |||
2341 | ||||
2342 | unsigned JumpThreadCost = | |||
2343 | getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); | |||
2344 | if (JumpThreadCost > BBDupThreshold) { | |||
2345 | LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << JumpThreadCost << "\n"; } } while (false) | |||
2346 | << "' - Cost is too high: " << JumpThreadCost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << JumpThreadCost << "\n"; } } while (false); | |||
2347 | return false; | |||
2348 | } | |||
2349 | ||||
2350 | threadEdge(BB, PredBBs, SuccBB); | |||
2351 | return true; | |||
2352 | } | |||
2353 | ||||
2354 | /// threadEdge - We have decided that it is safe and profitable to factor the | |||
2355 | /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB | |||
2356 | /// across BB. Transform the IR to reflect this change. | |||
2357 | void JumpThreadingPass::threadEdge(BasicBlock *BB, | |||
2358 | const SmallVectorImpl<BasicBlock *> &PredBBs, | |||
2359 | BasicBlock *SuccBB) { | |||
2360 | assert(SuccBB != BB && "Don't create an infinite loop")((SuccBB != BB && "Don't create an infinite loop") ? static_cast <void> (0) : __assert_fail ("SuccBB != BB && \"Don't create an infinite loop\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2360, __PRETTY_FUNCTION__)); | |||
2361 | ||||
2362 | assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) &&((!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB ) && "Don't thread across loop headers") ? static_cast <void> (0) : __assert_fail ("!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && \"Don't thread across loop headers\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2363, __PRETTY_FUNCTION__)) | |||
2363 | "Don't thread across loop headers")((!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB ) && "Don't thread across loop headers") ? static_cast <void> (0) : __assert_fail ("!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && \"Don't thread across loop headers\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2363, __PRETTY_FUNCTION__)); | |||
2364 | ||||
2365 | // And finally, do it! Start by factoring the predecessors if needed. | |||
2366 | BasicBlock *PredBB; | |||
2367 | if (PredBBs.size() == 1) | |||
2368 | PredBB = PredBBs[0]; | |||
2369 | else { | |||
2370 | LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"; } } while (false) | |||
2371 | << " common predecessors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"; } } while (false); | |||
2372 | PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); | |||
2373 | } | |||
2374 | ||||
2375 | // And finally, do it! | |||
2376 | LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" << SuccBB ->getName() << ", across block:\n " << *BB << "\n"; } } while (false) | |||
2377 | << "' to '" << SuccBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" << SuccBB ->getName() << ", across block:\n " << *BB << "\n"; } } while (false) | |||
2378 | << ", across block:\n " << *BB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" << SuccBB ->getName() << ", across block:\n " << *BB << "\n"; } } while (false); | |||
2379 | ||||
2380 | LVI->threadEdge(PredBB, BB, SuccBB); | |||
2381 | ||||
2382 | BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), | |||
2383 | BB->getName()+".thread", | |||
2384 | BB->getParent(), BB); | |||
2385 | NewBB->moveAfter(PredBB); | |||
2386 | ||||
2387 | // Set the block frequency of NewBB. | |||
2388 | if (HasProfileData) { | |||
2389 | auto NewBBFreq = | |||
2390 | BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); | |||
2391 | BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); | |||
2392 | } | |||
2393 | ||||
2394 | // Copy all the instructions from BB to NewBB except the terminator. | |||
2395 | DenseMap<Instruction *, Value *> ValueMapping = | |||
2396 | cloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB); | |||
2397 | ||||
2398 | // We didn't copy the terminator from BB over to NewBB, because there is now | |||
2399 | // an unconditional jump to SuccBB. Insert the unconditional jump. | |||
2400 | BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); | |||
2401 | NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); | |||
2402 | ||||
2403 | // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the | |||
2404 | // PHI nodes for NewBB now. | |||
2405 | addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); | |||
2406 | ||||
2407 | // Update the terminator of PredBB to jump to NewBB instead of BB. This | |||
2408 | // eliminates predecessors from BB, which requires us to simplify any PHI | |||
2409 | // nodes in BB. | |||
2410 | Instruction *PredTerm = PredBB->getTerminator(); | |||
2411 | for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) | |||
2412 | if (PredTerm->getSuccessor(i) == BB) { | |||
2413 | BB->removePredecessor(PredBB, true); | |||
2414 | PredTerm->setSuccessor(i, NewBB); | |||
2415 | } | |||
2416 | ||||
2417 | // Enqueue required DT updates. | |||
2418 | DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB}, | |||
2419 | {DominatorTree::Insert, PredBB, NewBB}, | |||
2420 | {DominatorTree::Delete, PredBB, BB}}); | |||
2421 | ||||
2422 | updateSSA(BB, NewBB, ValueMapping); | |||
2423 | ||||
2424 | // At this point, the IR is fully up to date and consistent. Do a quick scan | |||
2425 | // over the new instructions and zap any that are constants or dead. This | |||
2426 | // frequently happens because of phi translation. | |||
2427 | SimplifyInstructionsInBlock(NewBB, TLI); | |||
2428 | ||||
2429 | // Update the edge weight from BB to SuccBB, which should be less than before. | |||
2430 | updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB); | |||
2431 | ||||
2432 | // Threaded an edge! | |||
2433 | ++NumThreads; | |||
2434 | } | |||
2435 | ||||
2436 | /// Create a new basic block that will be the predecessor of BB and successor of | |||
2437 | /// all blocks in Preds. When profile data is available, update the frequency of | |||
2438 | /// this new block. | |||
2439 | BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB, | |||
2440 | ArrayRef<BasicBlock *> Preds, | |||
2441 | const char *Suffix) { | |||
2442 | SmallVector<BasicBlock *, 2> NewBBs; | |||
2443 | ||||
2444 | // Collect the frequencies of all predecessors of BB, which will be used to | |||
2445 | // update the edge weight of the result of splitting predecessors. | |||
2446 | DenseMap<BasicBlock *, BlockFrequency> FreqMap; | |||
2447 | if (HasProfileData) | |||
2448 | for (auto Pred : Preds) | |||
2449 | FreqMap.insert(std::make_pair( | |||
2450 | Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB))); | |||
2451 | ||||
2452 | // In the case when BB is a LandingPad block we create 2 new predecessors | |||
2453 | // instead of just one. | |||
2454 | if (BB->isLandingPad()) { | |||
2455 | std::string NewName = std::string(Suffix) + ".split-lp"; | |||
2456 | SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs); | |||
2457 | } else { | |||
2458 | NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix)); | |||
2459 | } | |||
2460 | ||||
2461 | std::vector<DominatorTree::UpdateType> Updates; | |||
2462 | Updates.reserve((2 * Preds.size()) + NewBBs.size()); | |||
2463 | for (auto NewBB : NewBBs) { | |||
2464 | BlockFrequency NewBBFreq(0); | |||
2465 | Updates.push_back({DominatorTree::Insert, NewBB, BB}); | |||
2466 | for (auto Pred : predecessors(NewBB)) { | |||
2467 | Updates.push_back({DominatorTree::Delete, Pred, BB}); | |||
2468 | Updates.push_back({DominatorTree::Insert, Pred, NewBB}); | |||
2469 | if (HasProfileData) // Update frequencies between Pred -> NewBB. | |||
2470 | NewBBFreq += FreqMap.lookup(Pred); | |||
2471 | } | |||
2472 | if (HasProfileData) // Apply the summed frequency to NewBB. | |||
2473 | BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); | |||
2474 | } | |||
2475 | ||||
2476 | DTU->applyUpdatesPermissive(Updates); | |||
2477 | return NewBBs[0]; | |||
2478 | } | |||
2479 | ||||
2480 | bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { | |||
2481 | const Instruction *TI = BB->getTerminator(); | |||
2482 | assert(TI->getNumSuccessors() > 1 && "not a split")((TI->getNumSuccessors() > 1 && "not a split") ? static_cast<void> (0) : __assert_fail ("TI->getNumSuccessors() > 1 && \"not a split\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2482, __PRETTY_FUNCTION__)); | |||
2483 | ||||
2484 | MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); | |||
2485 | if (!WeightsNode) | |||
2486 | return false; | |||
2487 | ||||
2488 | MDString *MDName = cast<MDString>(WeightsNode->getOperand(0)); | |||
2489 | if (MDName->getString() != "branch_weights") | |||
2490 | return false; | |||
2491 | ||||
2492 | // Ensure there are weights for all of the successors. Note that the first | |||
2493 | // operand to the metadata node is a name, not a weight. | |||
2494 | return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1; | |||
2495 | } | |||
2496 | ||||
2497 | /// Update the block frequency of BB and branch weight and the metadata on the | |||
2498 | /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - | |||
2499 | /// Freq(PredBB->BB) / Freq(BB->SuccBB). | |||
2500 | void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, | |||
2501 | BasicBlock *BB, | |||
2502 | BasicBlock *NewBB, | |||
2503 | BasicBlock *SuccBB) { | |||
2504 | if (!HasProfileData) | |||
2505 | return; | |||
2506 | ||||
2507 | assert(BFI && BPI && "BFI & BPI should have been created here")((BFI && BPI && "BFI & BPI should have been created here" ) ? static_cast<void> (0) : __assert_fail ("BFI && BPI && \"BFI & BPI should have been created here\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2507, __PRETTY_FUNCTION__)); | |||
2508 | ||||
2509 | // As the edge from PredBB to BB is deleted, we have to update the block | |||
2510 | // frequency of BB. | |||
2511 | auto BBOrigFreq = BFI->getBlockFreq(BB); | |||
2512 | auto NewBBFreq = BFI->getBlockFreq(NewBB); | |||
2513 | auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); | |||
2514 | auto BBNewFreq = BBOrigFreq - NewBBFreq; | |||
2515 | BFI->setBlockFreq(BB, BBNewFreq.getFrequency()); | |||
2516 | ||||
2517 | // Collect updated outgoing edges' frequencies from BB and use them to update | |||
2518 | // edge probabilities. | |||
2519 | SmallVector<uint64_t, 4> BBSuccFreq; | |||
2520 | for (BasicBlock *Succ : successors(BB)) { | |||
2521 | auto SuccFreq = (Succ == SuccBB) | |||
2522 | ? BB2SuccBBFreq - NewBBFreq | |||
2523 | : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); | |||
2524 | BBSuccFreq.push_back(SuccFreq.getFrequency()); | |||
2525 | } | |||
2526 | ||||
2527 | uint64_t MaxBBSuccFreq = | |||
2528 | *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); | |||
2529 | ||||
2530 | SmallVector<BranchProbability, 4> BBSuccProbs; | |||
2531 | if (MaxBBSuccFreq == 0) | |||
2532 | BBSuccProbs.assign(BBSuccFreq.size(), | |||
2533 | {1, static_cast<unsigned>(BBSuccFreq.size())}); | |||
2534 | else { | |||
2535 | for (uint64_t Freq : BBSuccFreq) | |||
2536 | BBSuccProbs.push_back( | |||
2537 | BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); | |||
2538 | // Normalize edge probabilities so that they sum up to one. | |||
2539 | BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), | |||
2540 | BBSuccProbs.end()); | |||
2541 | } | |||
2542 | ||||
2543 | // Update edge probabilities in BPI. | |||
2544 | BPI->setEdgeProbability(BB, BBSuccProbs); | |||
2545 | ||||
2546 | // Update the profile metadata as well. | |||
2547 | // | |||
2548 | // Don't do this if the profile of the transformed blocks was statically | |||
2549 | // estimated. (This could occur despite the function having an entry | |||
2550 | // frequency in completely cold parts of the CFG.) | |||
2551 | // | |||
2552 | // In this case we don't want to suggest to subsequent passes that the | |||
2553 | // calculated weights are fully consistent. Consider this graph: | |||
2554 | // | |||
2555 | // check_1 | |||
2556 | // 50% / | | |||
2557 | // eq_1 | 50% | |||
2558 | // \ | | |||
2559 | // check_2 | |||
2560 | // 50% / | | |||
2561 | // eq_2 | 50% | |||
2562 | // \ | | |||
2563 | // check_3 | |||
2564 | // 50% / | | |||
2565 | // eq_3 | 50% | |||
2566 | // \ | | |||
2567 | // | |||
2568 | // Assuming the blocks check_* all compare the same value against 1, 2 and 3, | |||
2569 | // the overall probabilities are inconsistent; the total probability that the | |||
2570 | // value is either 1, 2 or 3 is 150%. | |||
2571 | // | |||
2572 | // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 | |||
2573 | // becomes 0%. This is even worse if the edge whose probability becomes 0% is | |||
2574 | // the loop exit edge. Then based solely on static estimation we would assume | |||
2575 | // the loop was extremely hot. | |||
2576 | // | |||
2577 | // FIXME this locally as well so that BPI and BFI are consistent as well. We | |||
2578 | // shouldn't make edges extremely likely or unlikely based solely on static | |||
2579 | // estimation. | |||
2580 | if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) { | |||
2581 | SmallVector<uint32_t, 4> Weights; | |||
2582 | for (auto Prob : BBSuccProbs) | |||
2583 | Weights.push_back(Prob.getNumerator()); | |||
2584 | ||||
2585 | auto TI = BB->getTerminator(); | |||
2586 | TI->setMetadata( | |||
2587 | LLVMContext::MD_prof, | |||
2588 | MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); | |||
2589 | } | |||
2590 | } | |||
2591 | ||||
2592 | /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch | |||
2593 | /// to BB which contains an i1 PHI node and a conditional branch on that PHI. | |||
2594 | /// If we can duplicate the contents of BB up into PredBB do so now, this | |||
2595 | /// improves the odds that the branch will be on an analyzable instruction like | |||
2596 | /// a compare. | |||
2597 | bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred( | |||
2598 | BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { | |||
2599 | assert(!PredBBs.empty() && "Can't handle an empty set")((!PredBBs.empty() && "Can't handle an empty set") ? static_cast <void> (0) : __assert_fail ("!PredBBs.empty() && \"Can't handle an empty set\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2599, __PRETTY_FUNCTION__)); | |||
2600 | ||||
2601 | // If BB is a loop header, then duplicating this block outside the loop would | |||
2602 | // cause us to transform this into an irreducible loop, don't do this. | |||
2603 | // See the comments above findLoopHeaders for justifications and caveats. | |||
2604 | if (LoopHeaders.count(BB)) { | |||
2605 | LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not duplicating loop header '" << BB->getName() << "' into predecessor block '" << PredBBs[0]->getName() << "' - it might create an irreducible loop!\n" ; } } while (false) | |||
2606 | << "' into predecessor block '" << PredBBs[0]->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not duplicating loop header '" << BB->getName() << "' into predecessor block '" << PredBBs[0]->getName() << "' - it might create an irreducible loop!\n" ; } } while (false) | |||
2607 | << "' - it might create an irreducible loop!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not duplicating loop header '" << BB->getName() << "' into predecessor block '" << PredBBs[0]->getName() << "' - it might create an irreducible loop!\n" ; } } while (false); | |||
2608 | return false; | |||
2609 | } | |||
2610 | ||||
2611 | unsigned DuplicationCost = | |||
2612 | getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); | |||
2613 | if (DuplicationCost > BBDupThreshold) { | |||
2614 | LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not duplicating BB '" << BB->getName() << "' - Cost is too high: " << DuplicationCost << "\n"; } } while (false) | |||
2615 | << "' - Cost is too high: " << DuplicationCost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Not duplicating BB '" << BB->getName() << "' - Cost is too high: " << DuplicationCost << "\n"; } } while (false); | |||
2616 | return false; | |||
2617 | } | |||
2618 | ||||
2619 | // And finally, do it! Start by factoring the predecessors if needed. | |||
2620 | std::vector<DominatorTree::UpdateType> Updates; | |||
2621 | BasicBlock *PredBB; | |||
2622 | if (PredBBs.size() == 1) | |||
2623 | PredBB = PredBBs[0]; | |||
2624 | else { | |||
2625 | LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"; } } while (false) | |||
2626 | << " common predecessors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"; } } while (false); | |||
2627 | PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); | |||
2628 | } | |||
2629 | Updates.push_back({DominatorTree::Delete, PredBB, BB}); | |||
2630 | ||||
2631 | // Okay, we decided to do this! Clone all the instructions in BB onto the end | |||
2632 | // of PredBB. | |||
2633 | LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" << PredBB->getName() << "' to eliminate branch on phi. Cost: " << DuplicationCost << " block is:" << *BB << "\n"; } } while (false) | |||
2634 | << "' into end of '" << PredBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" << PredBB->getName() << "' to eliminate branch on phi. Cost: " << DuplicationCost << " block is:" << *BB << "\n"; } } while (false) | |||
2635 | << "' to eliminate branch on phi. Cost: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" << PredBB->getName() << "' to eliminate branch on phi. Cost: " << DuplicationCost << " block is:" << *BB << "\n"; } } while (false) | |||
2636 | << DuplicationCost << " block is:" << *BB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" << PredBB->getName() << "' to eliminate branch on phi. Cost: " << DuplicationCost << " block is:" << *BB << "\n"; } } while (false); | |||
2637 | ||||
2638 | // Unless PredBB ends with an unconditional branch, split the edge so that we | |||
2639 | // can just clone the bits from BB into the end of the new PredBB. | |||
2640 | BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); | |||
2641 | ||||
2642 | if (!OldPredBranch || !OldPredBranch->isUnconditional()) { | |||
2643 | BasicBlock *OldPredBB = PredBB; | |||
2644 | PredBB = SplitEdge(OldPredBB, BB); | |||
2645 | Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB}); | |||
2646 | Updates.push_back({DominatorTree::Insert, PredBB, BB}); | |||
2647 | Updates.push_back({DominatorTree::Delete, OldPredBB, BB}); | |||
2648 | OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); | |||
2649 | } | |||
2650 | ||||
2651 | // We are going to have to map operands from the original BB block into the | |||
2652 | // PredBB block. Evaluate PHI nodes in BB. | |||
2653 | DenseMap<Instruction*, Value*> ValueMapping; | |||
2654 | ||||
2655 | BasicBlock::iterator BI = BB->begin(); | |||
2656 | for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) | |||
2657 | ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); | |||
2658 | // Clone the non-phi instructions of BB into PredBB, keeping track of the | |||
2659 | // mapping and using it to remap operands in the cloned instructions. | |||
2660 | for (; BI != BB->end(); ++BI) { | |||
2661 | Instruction *New = BI->clone(); | |||
2662 | ||||
2663 | // Remap operands to patch up intra-block references. | |||
2664 | for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) | |||
2665 | if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { | |||
2666 | DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); | |||
2667 | if (I != ValueMapping.end()) | |||
2668 | New->setOperand(i, I->second); | |||
2669 | } | |||
2670 | ||||
2671 | // If this instruction can be simplified after the operands are updated, | |||
2672 | // just use the simplified value instead. This frequently happens due to | |||
2673 | // phi translation. | |||
2674 | if (Value *IV = SimplifyInstruction( | |||
2675 | New, | |||
2676 | {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) { | |||
2677 | ValueMapping[&*BI] = IV; | |||
2678 | if (!New->mayHaveSideEffects()) { | |||
2679 | New->deleteValue(); | |||
2680 | New = nullptr; | |||
2681 | } | |||
2682 | } else { | |||
2683 | ValueMapping[&*BI] = New; | |||
2684 | } | |||
2685 | if (New) { | |||
2686 | // Otherwise, insert the new instruction into the block. | |||
2687 | New->setName(BI->getName()); | |||
2688 | PredBB->getInstList().insert(OldPredBranch->getIterator(), New); | |||
2689 | // Update Dominance from simplified New instruction operands. | |||
2690 | for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) | |||
2691 | if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i))) | |||
2692 | Updates.push_back({DominatorTree::Insert, PredBB, SuccBB}); | |||
2693 | } | |||
2694 | } | |||
2695 | ||||
2696 | // Check to see if the targets of the branch had PHI nodes. If so, we need to | |||
2697 | // add entries to the PHI nodes for branch from PredBB now. | |||
2698 | BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); | |||
2699 | addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, | |||
2700 | ValueMapping); | |||
2701 | addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, | |||
2702 | ValueMapping); | |||
2703 | ||||
2704 | updateSSA(BB, PredBB, ValueMapping); | |||
2705 | ||||
2706 | // PredBB no longer jumps to BB, remove entries in the PHI node for the edge | |||
2707 | // that we nuked. | |||
2708 | BB->removePredecessor(PredBB, true); | |||
2709 | ||||
2710 | // Remove the unconditional branch at the end of the PredBB block. | |||
2711 | OldPredBranch->eraseFromParent(); | |||
2712 | if (HasProfileData) | |||
2713 | BPI->copyEdgeProbabilities(BB, PredBB); | |||
2714 | DTU->applyUpdatesPermissive(Updates); | |||
2715 | ||||
2716 | ++NumDupes; | |||
2717 | return true; | |||
2718 | } | |||
2719 | ||||
2720 | // Pred is a predecessor of BB with an unconditional branch to BB. SI is | |||
2721 | // a Select instruction in Pred. BB has other predecessors and SI is used in | |||
2722 | // a PHI node in BB. SI has no other use. | |||
2723 | // A new basic block, NewBB, is created and SI is converted to compare and | |||
2724 | // conditional branch. SI is erased from parent. | |||
2725 | void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, | |||
2726 | SelectInst *SI, PHINode *SIUse, | |||
2727 | unsigned Idx) { | |||
2728 | // Expand the select. | |||
2729 | // | |||
2730 | // Pred -- | |||
2731 | // | v | |||
2732 | // | NewBB | |||
2733 | // | | | |||
2734 | // |----- | |||
2735 | // v | |||
2736 | // BB | |||
2737 | BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator()); | |||
2738 | BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", | |||
2739 | BB->getParent(), BB); | |||
2740 | // Move the unconditional branch to NewBB. | |||
2741 | PredTerm->removeFromParent(); | |||
2742 | NewBB->getInstList().insert(NewBB->end(), PredTerm); | |||
2743 | // Create a conditional branch and update PHI nodes. | |||
2744 | BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); | |||
2745 | SIUse->setIncomingValue(Idx, SI->getFalseValue()); | |||
2746 | SIUse->addIncoming(SI->getTrueValue(), NewBB); | |||
2747 | ||||
2748 | // The select is now dead. | |||
2749 | SI->eraseFromParent(); | |||
2750 | DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB}, | |||
2751 | {DominatorTree::Insert, Pred, NewBB}}); | |||
2752 | ||||
2753 | // Update any other PHI nodes in BB. | |||
2754 | for (BasicBlock::iterator BI = BB->begin(); | |||
2755 | PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) | |||
2756 | if (Phi != SIUse) | |||
2757 | Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); | |||
2758 | } | |||
2759 | ||||
2760 | bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) { | |||
2761 | PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition()); | |||
2762 | ||||
2763 | if (!CondPHI || CondPHI->getParent() != BB) | |||
2764 | return false; | |||
2765 | ||||
2766 | for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) { | |||
2767 | BasicBlock *Pred = CondPHI->getIncomingBlock(I); | |||
2768 | SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I)); | |||
2769 | ||||
2770 | // The second and third condition can be potentially relaxed. Currently | |||
2771 | // the conditions help to simplify the code and allow us to reuse existing | |||
2772 | // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *) | |||
2773 | if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse()) | |||
2774 | continue; | |||
2775 | ||||
2776 | BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); | |||
2777 | if (!PredTerm || !PredTerm->isUnconditional()) | |||
2778 | continue; | |||
2779 | ||||
2780 | unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I); | |||
2781 | return true; | |||
2782 | } | |||
2783 | return false; | |||
2784 | } | |||
2785 | ||||
2786 | /// tryToUnfoldSelect - Look for blocks of the form | |||
2787 | /// bb1: | |||
2788 | /// %a = select | |||
2789 | /// br bb2 | |||
2790 | /// | |||
2791 | /// bb2: | |||
2792 | /// %p = phi [%a, %bb1] ... | |||
2793 | /// %c = icmp %p | |||
2794 | /// br i1 %c | |||
2795 | /// | |||
2796 | /// And expand the select into a branch structure if one of its arms allows %c | |||
2797 | /// to be folded. This later enables threading from bb1 over bb2. | |||
2798 | bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { | |||
2799 | BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); | |||
2800 | PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); | |||
2801 | Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); | |||
2802 | ||||
2803 | if (!CondBr || !CondBr->isConditional() || !CondLHS || | |||
2804 | CondLHS->getParent() != BB) | |||
2805 | return false; | |||
2806 | ||||
2807 | for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { | |||
2808 | BasicBlock *Pred = CondLHS->getIncomingBlock(I); | |||
2809 | SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); | |||
2810 | ||||
2811 | // Look if one of the incoming values is a select in the corresponding | |||
2812 | // predecessor. | |||
2813 | if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) | |||
2814 | continue; | |||
2815 | ||||
2816 | BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); | |||
2817 | if (!PredTerm || !PredTerm->isUnconditional()) | |||
2818 | continue; | |||
2819 | ||||
2820 | // Now check if one of the select values would allow us to constant fold the | |||
2821 | // terminator in BB. We don't do the transform if both sides fold, those | |||
2822 | // cases will be threaded in any case. | |||
2823 | LazyValueInfo::Tristate LHSFolds = | |||
2824 | LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), | |||
2825 | CondRHS, Pred, BB, CondCmp); | |||
2826 | LazyValueInfo::Tristate RHSFolds = | |||
2827 | LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), | |||
2828 | CondRHS, Pred, BB, CondCmp); | |||
2829 | if ((LHSFolds != LazyValueInfo::Unknown || | |||
2830 | RHSFolds != LazyValueInfo::Unknown) && | |||
2831 | LHSFolds != RHSFolds) { | |||
2832 | unfoldSelectInstr(Pred, BB, SI, CondLHS, I); | |||
2833 | return true; | |||
2834 | } | |||
2835 | } | |||
2836 | return false; | |||
2837 | } | |||
2838 | ||||
2839 | /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the | |||
2840 | /// same BB in the form | |||
2841 | /// bb: | |||
2842 | /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... | |||
2843 | /// %s = select %p, trueval, falseval | |||
2844 | /// | |||
2845 | /// or | |||
2846 | /// | |||
2847 | /// bb: | |||
2848 | /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ... | |||
2849 | /// %c = cmp %p, 0 | |||
2850 | /// %s = select %c, trueval, falseval | |||
2851 | /// | |||
2852 | /// And expand the select into a branch structure. This later enables | |||
2853 | /// jump-threading over bb in this pass. | |||
2854 | /// | |||
2855 | /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold | |||
2856 | /// select if the associated PHI has at least one constant. If the unfolded | |||
2857 | /// select is not jump-threaded, it will be folded again in the later | |||
2858 | /// optimizations. | |||
2859 | bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) { | |||
2860 | // This transform would reduce the quality of msan diagnostics. | |||
2861 | // Disable this transform under MemorySanitizer. | |||
2862 | if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory)) | |||
2863 | return false; | |||
2864 | ||||
2865 | // If threading this would thread across a loop header, don't thread the edge. | |||
2866 | // See the comments above findLoopHeaders for justifications and caveats. | |||
2867 | if (LoopHeaders.count(BB)) | |||
2868 | return false; | |||
2869 | ||||
2870 | for (BasicBlock::iterator BI = BB->begin(); | |||
2871 | PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { | |||
2872 | // Look for a Phi having at least one constant incoming value. | |||
2873 | if (llvm::all_of(PN->incoming_values(), | |||
2874 | [](Value *V) { return !isa<ConstantInt>(V); })) | |||
2875 | continue; | |||
2876 | ||||
2877 | auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) { | |||
2878 | using namespace PatternMatch; | |||
2879 | ||||
2880 | // Check if SI is in BB and use V as condition. | |||
2881 | if (SI->getParent() != BB) | |||
2882 | return false; | |||
2883 | Value *Cond = SI->getCondition(); | |||
2884 | bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr())); | |||
2885 | return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr; | |||
2886 | }; | |||
2887 | ||||
2888 | SelectInst *SI = nullptr; | |||
2889 | for (Use &U : PN->uses()) { | |||
2890 | if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) { | |||
2891 | // Look for a ICmp in BB that compares PN with a constant and is the | |||
2892 | // condition of a Select. | |||
2893 | if (Cmp->getParent() == BB && Cmp->hasOneUse() && | |||
2894 | isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo()))) | |||
2895 | if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back())) | |||
2896 | if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) { | |||
2897 | SI = SelectI; | |||
2898 | break; | |||
2899 | } | |||
2900 | } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) { | |||
2901 | // Look for a Select in BB that uses PN as condition. | |||
2902 | if (isUnfoldCandidate(SelectI, U.get())) { | |||
2903 | SI = SelectI; | |||
2904 | break; | |||
2905 | } | |||
2906 | } | |||
2907 | } | |||
2908 | ||||
2909 | if (!SI) | |||
2910 | continue; | |||
2911 | // Expand the select. | |||
2912 | Value *Cond = SI->getCondition(); | |||
2913 | if (InsertFreezeWhenUnfoldingSelect && | |||
2914 | !isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI, | |||
2915 | &DTU->getDomTree())) | |||
2916 | Cond = new FreezeInst(Cond, "cond.fr", SI); | |||
2917 | Instruction *Term = SplitBlockAndInsertIfThen(Cond, SI, false); | |||
2918 | BasicBlock *SplitBB = SI->getParent(); | |||
2919 | BasicBlock *NewBB = Term->getParent(); | |||
2920 | PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); | |||
2921 | NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); | |||
2922 | NewPN->addIncoming(SI->getFalseValue(), BB); | |||
2923 | SI->replaceAllUsesWith(NewPN); | |||
2924 | SI->eraseFromParent(); | |||
2925 | // NewBB and SplitBB are newly created blocks which require insertion. | |||
2926 | std::vector<DominatorTree::UpdateType> Updates; | |||
2927 | Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3); | |||
2928 | Updates.push_back({DominatorTree::Insert, BB, SplitBB}); | |||
2929 | Updates.push_back({DominatorTree::Insert, BB, NewBB}); | |||
2930 | Updates.push_back({DominatorTree::Insert, NewBB, SplitBB}); | |||
2931 | // BB's successors were moved to SplitBB, update DTU accordingly. | |||
2932 | for (auto *Succ : successors(SplitBB)) { | |||
2933 | Updates.push_back({DominatorTree::Delete, BB, Succ}); | |||
2934 | Updates.push_back({DominatorTree::Insert, SplitBB, Succ}); | |||
2935 | } | |||
2936 | DTU->applyUpdatesPermissive(Updates); | |||
2937 | return true; | |||
2938 | } | |||
2939 | return false; | |||
2940 | } | |||
2941 | ||||
2942 | /// Try to propagate a guard from the current BB into one of its predecessors | |||
2943 | /// in case if another branch of execution implies that the condition of this | |||
2944 | /// guard is always true. Currently we only process the simplest case that | |||
2945 | /// looks like: | |||
2946 | /// | |||
2947 | /// Start: | |||
2948 | /// %cond = ... | |||
2949 | /// br i1 %cond, label %T1, label %F1 | |||
2950 | /// T1: | |||
2951 | /// br label %Merge | |||
2952 | /// F1: | |||
2953 | /// br label %Merge | |||
2954 | /// Merge: | |||
2955 | /// %condGuard = ... | |||
2956 | /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] | |||
2957 | /// | |||
2958 | /// And cond either implies condGuard or !condGuard. In this case all the | |||
2959 | /// instructions before the guard can be duplicated in both branches, and the | |||
2960 | /// guard is then threaded to one of them. | |||
2961 | bool JumpThreadingPass::processGuards(BasicBlock *BB) { | |||
2962 | using namespace PatternMatch; | |||
2963 | ||||
2964 | // We only want to deal with two predecessors. | |||
2965 | BasicBlock *Pred1, *Pred2; | |||
2966 | auto PI = pred_begin(BB), PE = pred_end(BB); | |||
2967 | if (PI == PE) | |||
2968 | return false; | |||
2969 | Pred1 = *PI++; | |||
2970 | if (PI == PE) | |||
2971 | return false; | |||
2972 | Pred2 = *PI++; | |||
2973 | if (PI != PE) | |||
2974 | return false; | |||
2975 | if (Pred1 == Pred2) | |||
2976 | return false; | |||
2977 | ||||
2978 | // Try to thread one of the guards of the block. | |||
2979 | // TODO: Look up deeper than to immediate predecessor? | |||
2980 | auto *Parent = Pred1->getSinglePredecessor(); | |||
2981 | if (!Parent || Parent != Pred2->getSinglePredecessor()) | |||
2982 | return false; | |||
2983 | ||||
2984 | if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) | |||
2985 | for (auto &I : *BB) | |||
2986 | if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI)) | |||
2987 | return true; | |||
2988 | ||||
2989 | return false; | |||
2990 | } | |||
2991 | ||||
2992 | /// Try to propagate the guard from BB which is the lower block of a diamond | |||
2993 | /// to one of its branches, in case if diamond's condition implies guard's | |||
2994 | /// condition. | |||
2995 | bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard, | |||
2996 | BranchInst *BI) { | |||
2997 | assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?")((BI->getNumSuccessors() == 2 && "Wrong number of successors?" ) ? static_cast<void> (0) : __assert_fail ("BI->getNumSuccessors() == 2 && \"Wrong number of successors?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2997, __PRETTY_FUNCTION__)); | |||
2998 | assert(BI->isConditional() && "Unconditional branch has 2 successors?")((BI->isConditional() && "Unconditional branch has 2 successors?" ) ? static_cast<void> (0) : __assert_fail ("BI->isConditional() && \"Unconditional branch has 2 successors?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 2998, __PRETTY_FUNCTION__)); | |||
2999 | Value *GuardCond = Guard->getArgOperand(0); | |||
3000 | Value *BranchCond = BI->getCondition(); | |||
3001 | BasicBlock *TrueDest = BI->getSuccessor(0); | |||
3002 | BasicBlock *FalseDest = BI->getSuccessor(1); | |||
3003 | ||||
3004 | auto &DL = BB->getModule()->getDataLayout(); | |||
3005 | bool TrueDestIsSafe = false; | |||
3006 | bool FalseDestIsSafe = false; | |||
3007 | ||||
3008 | // True dest is safe if BranchCond => GuardCond. | |||
3009 | auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); | |||
3010 | if (Impl && *Impl) | |||
3011 | TrueDestIsSafe = true; | |||
3012 | else { | |||
3013 | // False dest is safe if !BranchCond => GuardCond. | |||
3014 | Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false); | |||
3015 | if (Impl && *Impl) | |||
3016 | FalseDestIsSafe = true; | |||
3017 | } | |||
3018 | ||||
3019 | if (!TrueDestIsSafe && !FalseDestIsSafe) | |||
3020 | return false; | |||
3021 | ||||
3022 | BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; | |||
3023 | BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; | |||
3024 | ||||
3025 | ValueToValueMapTy UnguardedMapping, GuardedMapping; | |||
3026 | Instruction *AfterGuard = Guard->getNextNode(); | |||
3027 | unsigned Cost = getJumpThreadDuplicationCost(BB, AfterGuard, BBDupThreshold); | |||
3028 | if (Cost > BBDupThreshold) | |||
3029 | return false; | |||
3030 | // Duplicate all instructions before the guard and the guard itself to the | |||
3031 | // branch where implication is not proved. | |||
3032 | BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween( | |||
3033 | BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU); | |||
3034 | assert(GuardedBlock && "Could not create the guarded block?")((GuardedBlock && "Could not create the guarded block?" ) ? static_cast<void> (0) : __assert_fail ("GuardedBlock && \"Could not create the guarded block?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 3034, __PRETTY_FUNCTION__)); | |||
3035 | // Duplicate all instructions before the guard in the unguarded branch. | |||
3036 | // Since we have successfully duplicated the guarded block and this block | |||
3037 | // has fewer instructions, we expect it to succeed. | |||
3038 | BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( | |||
3039 | BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU); | |||
3040 | assert(UnguardedBlock && "Could not create the unguarded block?")((UnguardedBlock && "Could not create the unguarded block?" ) ? static_cast<void> (0) : __assert_fail ("UnguardedBlock && \"Could not create the unguarded block?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 3040, __PRETTY_FUNCTION__)); | |||
3041 | LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "Moved guard " << *Guard << " to block " << GuardedBlock->getName () << "\n"; } } while (false) | |||
3042 | << GuardedBlock->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("jump-threading")) { dbgs() << "Moved guard " << *Guard << " to block " << GuardedBlock->getName () << "\n"; } } while (false); | |||
3043 | // Some instructions before the guard may still have uses. For them, we need | |||
3044 | // to create Phi nodes merging their copies in both guarded and unguarded | |||
3045 | // branches. Those instructions that have no uses can be just removed. | |||
3046 | SmallVector<Instruction *, 4> ToRemove; | |||
3047 | for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) | |||
3048 | if (!isa<PHINode>(&*BI)) | |||
3049 | ToRemove.push_back(&*BI); | |||
3050 | ||||
3051 | Instruction *InsertionPoint = &*BB->getFirstInsertionPt(); | |||
3052 | assert(InsertionPoint && "Empty block?")((InsertionPoint && "Empty block?") ? static_cast< void> (0) : __assert_fail ("InsertionPoint && \"Empty block?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/lib/Transforms/Scalar/JumpThreading.cpp" , 3052, __PRETTY_FUNCTION__)); | |||
3053 | // Substitute with Phis & remove. | |||
3054 | for (auto *Inst : reverse(ToRemove)) { | |||
3055 | if (!Inst->use_empty()) { | |||
3056 | PHINode *NewPN = PHINode::Create(Inst->getType(), 2); | |||
3057 | NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); | |||
3058 | NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); | |||
3059 | NewPN->insertBefore(InsertionPoint); | |||
3060 | Inst->replaceAllUsesWith(NewPN); | |||
3061 | } | |||
3062 | Inst->eraseFromParent(); | |||
3063 | } | |||
3064 | return true; | |||
3065 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file exposes the class definitions of all of the subclasses of the |
10 | // Instruction class. This is meant to be an easy way to get access to all |
11 | // instruction subclasses. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_INSTRUCTIONS_H |
16 | #define LLVM_IR_INSTRUCTIONS_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/Bitfields.h" |
20 | #include "llvm/ADT/None.h" |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/StringRef.h" |
24 | #include "llvm/ADT/Twine.h" |
25 | #include "llvm/ADT/iterator.h" |
26 | #include "llvm/ADT/iterator_range.h" |
27 | #include "llvm/IR/Attributes.h" |
28 | #include "llvm/IR/BasicBlock.h" |
29 | #include "llvm/IR/CallingConv.h" |
30 | #include "llvm/IR/CFG.h" |
31 | #include "llvm/IR/Constant.h" |
32 | #include "llvm/IR/DerivedTypes.h" |
33 | #include "llvm/IR/Function.h" |
34 | #include "llvm/IR/InstrTypes.h" |
35 | #include "llvm/IR/Instruction.h" |
36 | #include "llvm/IR/OperandTraits.h" |
37 | #include "llvm/IR/Type.h" |
38 | #include "llvm/IR/Use.h" |
39 | #include "llvm/IR/User.h" |
40 | #include "llvm/IR/Value.h" |
41 | #include "llvm/Support/AtomicOrdering.h" |
42 | #include "llvm/Support/Casting.h" |
43 | #include "llvm/Support/ErrorHandling.h" |
44 | #include <cassert> |
45 | #include <cstddef> |
46 | #include <cstdint> |
47 | #include <iterator> |
48 | |
49 | namespace llvm { |
50 | |
51 | class APInt; |
52 | class ConstantInt; |
53 | class DataLayout; |
54 | class LLVMContext; |
55 | |
56 | //===----------------------------------------------------------------------===// |
57 | // AllocaInst Class |
58 | //===----------------------------------------------------------------------===// |
59 | |
60 | /// an instruction to allocate memory on the stack |
61 | class AllocaInst : public UnaryInstruction { |
62 | Type *AllocatedType; |
63 | |
64 | using AlignmentField = AlignmentBitfieldElementT<0>; |
65 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; |
66 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; |
67 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, |
68 | SwiftErrorField>(), |
69 | "Bitfields must be contiguous"); |
70 | |
71 | protected: |
72 | // Note: Instruction needs to be a friend here to call cloneImpl. |
73 | friend class Instruction; |
74 | |
75 | AllocaInst *cloneImpl() const; |
76 | |
77 | public: |
78 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
79 | const Twine &Name, Instruction *InsertBefore); |
80 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
81 | const Twine &Name, BasicBlock *InsertAtEnd); |
82 | |
83 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, |
84 | Instruction *InsertBefore); |
85 | AllocaInst(Type *Ty, unsigned AddrSpace, |
86 | const Twine &Name, BasicBlock *InsertAtEnd); |
87 | |
88 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
89 | const Twine &Name = "", Instruction *InsertBefore = nullptr); |
90 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
91 | const Twine &Name, BasicBlock *InsertAtEnd); |
92 | |
93 | /// Return true if there is an allocation size parameter to the allocation |
94 | /// instruction that is not 1. |
95 | bool isArrayAllocation() const; |
96 | |
97 | /// Get the number of elements allocated. For a simple allocation of a single |
98 | /// element, this will return a constant 1 value. |
99 | const Value *getArraySize() const { return getOperand(0); } |
100 | Value *getArraySize() { return getOperand(0); } |
101 | |
102 | /// Overload to return most specific pointer type. |
103 | PointerType *getType() const { |
104 | return cast<PointerType>(Instruction::getType()); |
105 | } |
106 | |
107 | /// Get allocation size in bits. Returns None if size can't be determined, |
108 | /// e.g. in case of a VLA. |
109 | Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; |
110 | |
111 | /// Return the type that is being allocated by the instruction. |
112 | Type *getAllocatedType() const { return AllocatedType; } |
113 | /// for use only in special circumstances that need to generically |
114 | /// transform a whole instruction (eg: IR linking and vectorization). |
115 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
116 | |
117 | /// Return the alignment of the memory that is being allocated by the |
118 | /// instruction. |
119 | Align getAlign() const { |
120 | return Align(1ULL << getSubclassData<AlignmentField>()); |
121 | } |
122 | |
123 | void setAlignment(Align Align) { |
124 | setSubclassData<AlignmentField>(Log2(Align)); |
125 | } |
126 | |
127 | // FIXME: Remove this one transition to Align is over. |
128 | unsigned getAlignment() const { return getAlign().value(); } |
129 | |
130 | /// Return true if this alloca is in the entry block of the function and is a |
131 | /// constant size. If so, the code generator will fold it into the |
132 | /// prolog/epilog code, so it is basically free. |
133 | bool isStaticAlloca() const; |
134 | |
135 | /// Return true if this alloca is used as an inalloca argument to a call. Such |
136 | /// allocas are never considered static even if they are in the entry block. |
137 | bool isUsedWithInAlloca() const { |
138 | return getSubclassData<UsedWithInAllocaField>(); |
139 | } |
140 | |
141 | /// Specify whether this alloca is used to represent the arguments to a call. |
142 | void setUsedWithInAlloca(bool V) { |
143 | setSubclassData<UsedWithInAllocaField>(V); |
144 | } |
145 | |
146 | /// Return true if this alloca is used as a swifterror argument to a call. |
147 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } |
148 | /// Specify whether this alloca is used to represent a swifterror. |
149 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } |
150 | |
151 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
152 | static bool classof(const Instruction *I) { |
153 | return (I->getOpcode() == Instruction::Alloca); |
154 | } |
155 | static bool classof(const Value *V) { |
156 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
157 | } |
158 | |
159 | private: |
160 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
161 | // method so that subclasses cannot accidentally use it. |
162 | template <typename Bitfield> |
163 | void setSubclassData(typename Bitfield::Type Value) { |
164 | Instruction::setSubclassData<Bitfield>(Value); |
165 | } |
166 | }; |
167 | |
168 | //===----------------------------------------------------------------------===// |
169 | // LoadInst Class |
170 | //===----------------------------------------------------------------------===// |
171 | |
172 | /// An instruction for reading from memory. This uses the SubclassData field in |
173 | /// Value to store whether or not the load is volatile. |
174 | class LoadInst : public UnaryInstruction { |
175 | using VolatileField = BoolBitfieldElementT<0>; |
176 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
177 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
178 | static_assert( |
179 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
180 | "Bitfields must be contiguous"); |
181 | |
182 | void AssertOK(); |
183 | |
184 | protected: |
185 | // Note: Instruction needs to be a friend here to call cloneImpl. |
186 | friend class Instruction; |
187 | |
188 | LoadInst *cloneImpl() const; |
189 | |
190 | public: |
191 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, |
192 | Instruction *InsertBefore); |
193 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
195 | Instruction *InsertBefore); |
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
197 | BasicBlock *InsertAtEnd); |
198 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
199 | Align Align, Instruction *InsertBefore = nullptr); |
200 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
201 | Align Align, BasicBlock *InsertAtEnd); |
202 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
203 | Align Align, AtomicOrdering Order, |
204 | SyncScope::ID SSID = SyncScope::System, |
205 | Instruction *InsertBefore = nullptr); |
206 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
207 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, |
208 | BasicBlock *InsertAtEnd); |
209 | |
210 | /// Return true if this is a load from a volatile memory location. |
211 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
212 | |
213 | /// Specify whether this is a volatile load or not. |
214 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
215 | |
216 | /// Return the alignment of the access that is being performed. |
217 | /// FIXME: Remove this function once transition to Align is over. |
218 | /// Use getAlign() instead. |
219 | unsigned getAlignment() const { return getAlign().value(); } |
220 | |
221 | /// Return the alignment of the access that is being performed. |
222 | Align getAlign() const { |
223 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
224 | } |
225 | |
226 | void setAlignment(Align Align) { |
227 | setSubclassData<AlignmentField>(Log2(Align)); |
228 | } |
229 | |
230 | /// Returns the ordering constraint of this load instruction. |
231 | AtomicOrdering getOrdering() const { |
232 | return getSubclassData<OrderingField>(); |
233 | } |
234 | /// Sets the ordering constraint of this load instruction. May not be Release |
235 | /// or AcquireRelease. |
236 | void setOrdering(AtomicOrdering Ordering) { |
237 | setSubclassData<OrderingField>(Ordering); |
238 | } |
239 | |
240 | /// Returns the synchronization scope ID of this load instruction. |
241 | SyncScope::ID getSyncScopeID() const { |
242 | return SSID; |
243 | } |
244 | |
245 | /// Sets the synchronization scope ID of this load instruction. |
246 | void setSyncScopeID(SyncScope::ID SSID) { |
247 | this->SSID = SSID; |
248 | } |
249 | |
250 | /// Sets the ordering constraint and the synchronization scope ID of this load |
251 | /// instruction. |
252 | void setAtomic(AtomicOrdering Ordering, |
253 | SyncScope::ID SSID = SyncScope::System) { |
254 | setOrdering(Ordering); |
255 | setSyncScopeID(SSID); |
256 | } |
257 | |
258 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
259 | |
260 | bool isUnordered() const { |
261 | return (getOrdering() == AtomicOrdering::NotAtomic || |
262 | getOrdering() == AtomicOrdering::Unordered) && |
263 | !isVolatile(); |
264 | } |
265 | |
266 | Value *getPointerOperand() { return getOperand(0); } |
267 | const Value *getPointerOperand() const { return getOperand(0); } |
268 | static unsigned getPointerOperandIndex() { return 0U; } |
269 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
270 | |
271 | /// Returns the address space of the pointer operand. |
272 | unsigned getPointerAddressSpace() const { |
273 | return getPointerOperandType()->getPointerAddressSpace(); |
274 | } |
275 | |
276 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
277 | static bool classof(const Instruction *I) { |
278 | return I->getOpcode() == Instruction::Load; |
279 | } |
280 | static bool classof(const Value *V) { |
281 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
282 | } |
283 | |
284 | private: |
285 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
286 | // method so that subclasses cannot accidentally use it. |
287 | template <typename Bitfield> |
288 | void setSubclassData(typename Bitfield::Type Value) { |
289 | Instruction::setSubclassData<Bitfield>(Value); |
290 | } |
291 | |
292 | /// The synchronization scope ID of this load instruction. Not quite enough |
293 | /// room in SubClassData for everything, so synchronization scope ID gets its |
294 | /// own field. |
295 | SyncScope::ID SSID; |
296 | }; |
297 | |
298 | //===----------------------------------------------------------------------===// |
299 | // StoreInst Class |
300 | //===----------------------------------------------------------------------===// |
301 | |
302 | /// An instruction for storing to memory. |
303 | class StoreInst : public Instruction { |
304 | using VolatileField = BoolBitfieldElementT<0>; |
305 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
306 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
307 | static_assert( |
308 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
309 | "Bitfields must be contiguous"); |
310 | |
311 | void AssertOK(); |
312 | |
313 | protected: |
314 | // Note: Instruction needs to be a friend here to call cloneImpl. |
315 | friend class Instruction; |
316 | |
317 | StoreInst *cloneImpl() const; |
318 | |
319 | public: |
320 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
321 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
322 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); |
323 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
324 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
325 | Instruction *InsertBefore = nullptr); |
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
327 | BasicBlock *InsertAtEnd); |
328 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
329 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
330 | Instruction *InsertBefore = nullptr); |
331 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
332 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
333 | |
334 | // allocate space for exactly two operands |
335 | void *operator new(size_t s) { |
336 | return User::operator new(s, 2); |
337 | } |
338 | |
339 | /// Return true if this is a store to a volatile memory location. |
340 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
341 | |
342 | /// Specify whether this is a volatile store or not. |
343 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
344 | |
345 | /// Transparently provide more efficient getOperand methods. |
346 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
347 | |
348 | /// Return the alignment of the access that is being performed |
349 | /// FIXME: Remove this function once transition to Align is over. |
350 | /// Use getAlign() instead. |
351 | unsigned getAlignment() const { return getAlign().value(); } |
352 | |
353 | Align getAlign() const { |
354 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
355 | } |
356 | |
357 | void setAlignment(Align Align) { |
358 | setSubclassData<AlignmentField>(Log2(Align)); |
359 | } |
360 | |
361 | /// Returns the ordering constraint of this store instruction. |
362 | AtomicOrdering getOrdering() const { |
363 | return getSubclassData<OrderingField>(); |
364 | } |
365 | |
366 | /// Sets the ordering constraint of this store instruction. May not be |
367 | /// Acquire or AcquireRelease. |
368 | void setOrdering(AtomicOrdering Ordering) { |
369 | setSubclassData<OrderingField>(Ordering); |
370 | } |
371 | |
372 | /// Returns the synchronization scope ID of this store instruction. |
373 | SyncScope::ID getSyncScopeID() const { |
374 | return SSID; |
375 | } |
376 | |
377 | /// Sets the synchronization scope ID of this store instruction. |
378 | void setSyncScopeID(SyncScope::ID SSID) { |
379 | this->SSID = SSID; |
380 | } |
381 | |
382 | /// Sets the ordering constraint and the synchronization scope ID of this |
383 | /// store instruction. |
384 | void setAtomic(AtomicOrdering Ordering, |
385 | SyncScope::ID SSID = SyncScope::System) { |
386 | setOrdering(Ordering); |
387 | setSyncScopeID(SSID); |
388 | } |
389 | |
390 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
391 | |
392 | bool isUnordered() const { |
393 | return (getOrdering() == AtomicOrdering::NotAtomic || |
394 | getOrdering() == AtomicOrdering::Unordered) && |
395 | !isVolatile(); |
396 | } |
397 | |
398 | Value *getValueOperand() { return getOperand(0); } |
399 | const Value *getValueOperand() const { return getOperand(0); } |
400 | |
401 | Value *getPointerOperand() { return getOperand(1); } |
402 | const Value *getPointerOperand() const { return getOperand(1); } |
403 | static unsigned getPointerOperandIndex() { return 1U; } |
404 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
405 | |
406 | /// Returns the address space of the pointer operand. |
407 | unsigned getPointerAddressSpace() const { |
408 | return getPointerOperandType()->getPointerAddressSpace(); |
409 | } |
410 | |
411 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
412 | static bool classof(const Instruction *I) { |
413 | return I->getOpcode() == Instruction::Store; |
414 | } |
415 | static bool classof(const Value *V) { |
416 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
417 | } |
418 | |
419 | private: |
420 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
421 | // method so that subclasses cannot accidentally use it. |
422 | template <typename Bitfield> |
423 | void setSubclassData(typename Bitfield::Type Value) { |
424 | Instruction::setSubclassData<Bitfield>(Value); |
425 | } |
426 | |
427 | /// The synchronization scope ID of this store instruction. Not quite enough |
428 | /// room in SubClassData for everything, so synchronization scope ID gets its |
429 | /// own field. |
430 | SyncScope::ID SSID; |
431 | }; |
432 | |
433 | template <> |
434 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
435 | }; |
436 | |
437 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <StoreInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 437, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst *>(this))[i_nocapture].get()); } void StoreInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst ::getNumOperands() const { return OperandTraits<StoreInst> ::operands(this); } template <int Idx_nocapture> Use & StoreInst::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &StoreInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
438 | |
439 | //===----------------------------------------------------------------------===// |
440 | // FenceInst Class |
441 | //===----------------------------------------------------------------------===// |
442 | |
443 | /// An instruction for ordering other memory operations. |
444 | class FenceInst : public Instruction { |
445 | using OrderingField = AtomicOrderingBitfieldElementT<0>; |
446 | |
447 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
448 | |
449 | protected: |
450 | // Note: Instruction needs to be a friend here to call cloneImpl. |
451 | friend class Instruction; |
452 | |
453 | FenceInst *cloneImpl() const; |
454 | |
455 | public: |
456 | // Ordering may only be Acquire, Release, AcquireRelease, or |
457 | // SequentiallyConsistent. |
458 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
459 | SyncScope::ID SSID = SyncScope::System, |
460 | Instruction *InsertBefore = nullptr); |
461 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
462 | BasicBlock *InsertAtEnd); |
463 | |
464 | // allocate space for exactly zero operands |
465 | void *operator new(size_t s) { |
466 | return User::operator new(s, 0); |
467 | } |
468 | |
469 | /// Returns the ordering constraint of this fence instruction. |
470 | AtomicOrdering getOrdering() const { |
471 | return getSubclassData<OrderingField>(); |
472 | } |
473 | |
474 | /// Sets the ordering constraint of this fence instruction. May only be |
475 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
476 | void setOrdering(AtomicOrdering Ordering) { |
477 | setSubclassData<OrderingField>(Ordering); |
478 | } |
479 | |
480 | /// Returns the synchronization scope ID of this fence instruction. |
481 | SyncScope::ID getSyncScopeID() const { |
482 | return SSID; |
483 | } |
484 | |
485 | /// Sets the synchronization scope ID of this fence instruction. |
486 | void setSyncScopeID(SyncScope::ID SSID) { |
487 | this->SSID = SSID; |
488 | } |
489 | |
490 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
491 | static bool classof(const Instruction *I) { |
492 | return I->getOpcode() == Instruction::Fence; |
493 | } |
494 | static bool classof(const Value *V) { |
495 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
496 | } |
497 | |
498 | private: |
499 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
500 | // method so that subclasses cannot accidentally use it. |
501 | template <typename Bitfield> |
502 | void setSubclassData(typename Bitfield::Type Value) { |
503 | Instruction::setSubclassData<Bitfield>(Value); |
504 | } |
505 | |
506 | /// The synchronization scope ID of this fence instruction. Not quite enough |
507 | /// room in SubClassData for everything, so synchronization scope ID gets its |
508 | /// own field. |
509 | SyncScope::ID SSID; |
510 | }; |
511 | |
512 | //===----------------------------------------------------------------------===// |
513 | // AtomicCmpXchgInst Class |
514 | //===----------------------------------------------------------------------===// |
515 | |
516 | /// An instruction that atomically checks whether a |
517 | /// specified value is in a memory location, and, if it is, stores a new value |
518 | /// there. The value returned by this instruction is a pair containing the |
519 | /// original value as first element, and an i1 indicating success (true) or |
520 | /// failure (false) as second element. |
521 | /// |
522 | class AtomicCmpXchgInst : public Instruction { |
523 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, |
524 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
525 | SyncScope::ID SSID); |
526 | |
527 | template <unsigned Offset> |
528 | using AtomicOrderingBitfieldElement = |
529 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
530 | AtomicOrdering::LAST>; |
531 | |
532 | protected: |
533 | // Note: Instruction needs to be a friend here to call cloneImpl. |
534 | friend class Instruction; |
535 | |
536 | AtomicCmpXchgInst *cloneImpl() const; |
537 | |
538 | public: |
539 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
540 | AtomicOrdering SuccessOrdering, |
541 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
542 | Instruction *InsertBefore = nullptr); |
543 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
544 | AtomicOrdering SuccessOrdering, |
545 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
546 | BasicBlock *InsertAtEnd); |
547 | |
548 | // allocate space for exactly three operands |
549 | void *operator new(size_t s) { |
550 | return User::operator new(s, 3); |
551 | } |
552 | |
553 | using VolatileField = BoolBitfieldElementT<0>; |
554 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; |
555 | using SuccessOrderingField = |
556 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; |
557 | using FailureOrderingField = |
558 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; |
559 | using AlignmentField = |
560 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; |
561 | static_assert( |
562 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, |
563 | FailureOrderingField, AlignmentField>(), |
564 | "Bitfields must be contiguous"); |
565 | |
566 | /// Return the alignment of the memory that is being allocated by the |
567 | /// instruction. |
568 | Align getAlign() const { |
569 | return Align(1ULL << getSubclassData<AlignmentField>()); |
570 | } |
571 | |
572 | void setAlignment(Align Align) { |
573 | setSubclassData<AlignmentField>(Log2(Align)); |
574 | } |
575 | |
576 | /// Return true if this is a cmpxchg from a volatile memory |
577 | /// location. |
578 | /// |
579 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
580 | |
581 | /// Specify whether this is a volatile cmpxchg. |
582 | /// |
583 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
584 | |
585 | /// Return true if this cmpxchg may spuriously fail. |
586 | bool isWeak() const { return getSubclassData<WeakField>(); } |
587 | |
588 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } |
589 | |
590 | /// Transparently provide more efficient getOperand methods. |
591 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
592 | |
593 | /// Returns the success ordering constraint of this cmpxchg instruction. |
594 | AtomicOrdering getSuccessOrdering() const { |
595 | return getSubclassData<SuccessOrderingField>(); |
596 | } |
597 | |
598 | /// Sets the success ordering constraint of this cmpxchg instruction. |
599 | void setSuccessOrdering(AtomicOrdering Ordering) { |
600 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 601, __PRETTY_FUNCTION__)) |
601 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 601, __PRETTY_FUNCTION__)); |
602 | setSubclassData<SuccessOrderingField>(Ordering); |
603 | } |
604 | |
605 | /// Returns the failure ordering constraint of this cmpxchg instruction. |
606 | AtomicOrdering getFailureOrdering() const { |
607 | return getSubclassData<FailureOrderingField>(); |
608 | } |
609 | |
610 | /// Sets the failure ordering constraint of this cmpxchg instruction. |
611 | void setFailureOrdering(AtomicOrdering Ordering) { |
612 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 613, __PRETTY_FUNCTION__)) |
613 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 613, __PRETTY_FUNCTION__)); |
614 | setSubclassData<FailureOrderingField>(Ordering); |
615 | } |
616 | |
617 | /// Returns the synchronization scope ID of this cmpxchg instruction. |
618 | SyncScope::ID getSyncScopeID() const { |
619 | return SSID; |
620 | } |
621 | |
622 | /// Sets the synchronization scope ID of this cmpxchg instruction. |
623 | void setSyncScopeID(SyncScope::ID SSID) { |
624 | this->SSID = SSID; |
625 | } |
626 | |
627 | Value *getPointerOperand() { return getOperand(0); } |
628 | const Value *getPointerOperand() const { return getOperand(0); } |
629 | static unsigned getPointerOperandIndex() { return 0U; } |
630 | |
631 | Value *getCompareOperand() { return getOperand(1); } |
632 | const Value *getCompareOperand() const { return getOperand(1); } |
633 | |
634 | Value *getNewValOperand() { return getOperand(2); } |
635 | const Value *getNewValOperand() const { return getOperand(2); } |
636 | |
637 | /// Returns the address space of the pointer operand. |
638 | unsigned getPointerAddressSpace() const { |
639 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
640 | } |
641 | |
642 | /// Returns the strongest permitted ordering on failure, given the |
643 | /// desired ordering on success. |
644 | /// |
645 | /// If the comparison in a cmpxchg operation fails, there is no atomic store |
646 | /// so release semantics cannot be provided. So this function drops explicit |
647 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
648 | /// operation would remain SequentiallyConsistent. |
649 | static AtomicOrdering |
650 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
651 | switch (SuccessOrdering) { |
652 | default: |
653 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 653); |
654 | case AtomicOrdering::Release: |
655 | case AtomicOrdering::Monotonic: |
656 | return AtomicOrdering::Monotonic; |
657 | case AtomicOrdering::AcquireRelease: |
658 | case AtomicOrdering::Acquire: |
659 | return AtomicOrdering::Acquire; |
660 | case AtomicOrdering::SequentiallyConsistent: |
661 | return AtomicOrdering::SequentiallyConsistent; |
662 | } |
663 | } |
664 | |
665 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
666 | static bool classof(const Instruction *I) { |
667 | return I->getOpcode() == Instruction::AtomicCmpXchg; |
668 | } |
669 | static bool classof(const Value *V) { |
670 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
671 | } |
672 | |
673 | private: |
674 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
675 | // method so that subclasses cannot accidentally use it. |
676 | template <typename Bitfield> |
677 | void setSubclassData(typename Bitfield::Type Value) { |
678 | Instruction::setSubclassData<Bitfield>(Value); |
679 | } |
680 | |
681 | /// The synchronization scope ID of this cmpxchg instruction. Not quite |
682 | /// enough room in SubClassData for everything, so synchronization scope ID |
683 | /// gets its own field. |
684 | SyncScope::ID SSID; |
685 | }; |
686 | |
687 | template <> |
688 | struct OperandTraits<AtomicCmpXchgInst> : |
689 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
690 | }; |
691 | |
692 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 692, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast <AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands() const { return OperandTraits <AtomicCmpXchgInst>::operands(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &AtomicCmpXchgInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
693 | |
694 | //===----------------------------------------------------------------------===// |
695 | // AtomicRMWInst Class |
696 | //===----------------------------------------------------------------------===// |
697 | |
698 | /// an instruction that atomically reads a memory location, |
699 | /// combines it with another value, and then stores the result back. Returns |
700 | /// the old value. |
701 | /// |
702 | class AtomicRMWInst : public Instruction { |
703 | protected: |
704 | // Note: Instruction needs to be a friend here to call cloneImpl. |
705 | friend class Instruction; |
706 | |
707 | AtomicRMWInst *cloneImpl() const; |
708 | |
709 | public: |
710 | /// This enumeration lists the possible modifications atomicrmw can make. In |
711 | /// the descriptions, 'p' is the pointer to the instruction's memory location, |
712 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
713 | /// instruction. These instructions always return 'old'. |
714 | enum BinOp : unsigned { |
715 | /// *p = v |
716 | Xchg, |
717 | /// *p = old + v |
718 | Add, |
719 | /// *p = old - v |
720 | Sub, |
721 | /// *p = old & v |
722 | And, |
723 | /// *p = ~(old & v) |
724 | Nand, |
725 | /// *p = old | v |
726 | Or, |
727 | /// *p = old ^ v |
728 | Xor, |
729 | /// *p = old >signed v ? old : v |
730 | Max, |
731 | /// *p = old <signed v ? old : v |
732 | Min, |
733 | /// *p = old >unsigned v ? old : v |
734 | UMax, |
735 | /// *p = old <unsigned v ? old : v |
736 | UMin, |
737 | |
738 | /// *p = old + v |
739 | FAdd, |
740 | |
741 | /// *p = old - v |
742 | FSub, |
743 | |
744 | FIRST_BINOP = Xchg, |
745 | LAST_BINOP = FSub, |
746 | BAD_BINOP |
747 | }; |
748 | |
749 | private: |
750 | template <unsigned Offset> |
751 | using AtomicOrderingBitfieldElement = |
752 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
753 | AtomicOrdering::LAST>; |
754 | |
755 | template <unsigned Offset> |
756 | using BinOpBitfieldElement = |
757 | typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; |
758 | |
759 | public: |
760 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
761 | AtomicOrdering Ordering, SyncScope::ID SSID, |
762 | Instruction *InsertBefore = nullptr); |
763 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
764 | AtomicOrdering Ordering, SyncScope::ID SSID, |
765 | BasicBlock *InsertAtEnd); |
766 | |
767 | // allocate space for exactly two operands |
768 | void *operator new(size_t s) { |
769 | return User::operator new(s, 2); |
770 | } |
771 | |
772 | using VolatileField = BoolBitfieldElementT<0>; |
773 | using AtomicOrderingField = |
774 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; |
775 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; |
776 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; |
777 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, |
778 | OperationField, AlignmentField>(), |
779 | "Bitfields must be contiguous"); |
780 | |
781 | BinOp getOperation() const { return getSubclassData<OperationField>(); } |
782 | |
783 | static StringRef getOperationName(BinOp Op); |
784 | |
785 | static bool isFPOperation(BinOp Op) { |
786 | switch (Op) { |
787 | case AtomicRMWInst::FAdd: |
788 | case AtomicRMWInst::FSub: |
789 | return true; |
790 | default: |
791 | return false; |
792 | } |
793 | } |
794 | |
795 | void setOperation(BinOp Operation) { |
796 | setSubclassData<OperationField>(Operation); |
797 | } |
798 | |
799 | /// Return the alignment of the memory that is being allocated by the |
800 | /// instruction. |
801 | Align getAlign() const { |
802 | return Align(1ULL << getSubclassData<AlignmentField>()); |
803 | } |
804 | |
805 | void setAlignment(Align Align) { |
806 | setSubclassData<AlignmentField>(Log2(Align)); |
807 | } |
808 | |
809 | /// Return true if this is a RMW on a volatile memory location. |
810 | /// |
811 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
812 | |
813 | /// Specify whether this is a volatile RMW or not. |
814 | /// |
815 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
816 | |
817 | /// Transparently provide more efficient getOperand methods. |
818 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
819 | |
820 | /// Returns the ordering constraint of this rmw instruction. |
821 | AtomicOrdering getOrdering() const { |
822 | return getSubclassData<AtomicOrderingField>(); |
823 | } |
824 | |
825 | /// Sets the ordering constraint of this rmw instruction. |
826 | void setOrdering(AtomicOrdering Ordering) { |
827 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 828, __PRETTY_FUNCTION__)) |
828 | "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 828, __PRETTY_FUNCTION__)); |
829 | setSubclassData<AtomicOrderingField>(Ordering); |
830 | } |
831 | |
832 | /// Returns the synchronization scope ID of this rmw instruction. |
833 | SyncScope::ID getSyncScopeID() const { |
834 | return SSID; |
835 | } |
836 | |
837 | /// Sets the synchronization scope ID of this rmw instruction. |
838 | void setSyncScopeID(SyncScope::ID SSID) { |
839 | this->SSID = SSID; |
840 | } |
841 | |
842 | Value *getPointerOperand() { return getOperand(0); } |
843 | const Value *getPointerOperand() const { return getOperand(0); } |
844 | static unsigned getPointerOperandIndex() { return 0U; } |
845 | |
846 | Value *getValOperand() { return getOperand(1); } |
847 | const Value *getValOperand() const { return getOperand(1); } |
848 | |
849 | /// Returns the address space of the pointer operand. |
850 | unsigned getPointerAddressSpace() const { |
851 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
852 | } |
853 | |
854 | bool isFloatingPointOperation() const { |
855 | return isFPOperation(getOperation()); |
856 | } |
857 | |
858 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
859 | static bool classof(const Instruction *I) { |
860 | return I->getOpcode() == Instruction::AtomicRMW; |
861 | } |
862 | static bool classof(const Value *V) { |
863 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
864 | } |
865 | |
866 | private: |
867 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, |
868 | AtomicOrdering Ordering, SyncScope::ID SSID); |
869 | |
870 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
871 | // method so that subclasses cannot accidentally use it. |
872 | template <typename Bitfield> |
873 | void setSubclassData(typename Bitfield::Type Value) { |
874 | Instruction::setSubclassData<Bitfield>(Value); |
875 | } |
876 | |
877 | /// The synchronization scope ID of this rmw instruction. Not quite enough |
878 | /// room in SubClassData for everything, so synchronization scope ID gets its |
879 | /// own field. |
880 | SyncScope::ID SSID; |
881 | }; |
882 | |
883 | template <> |
884 | struct OperandTraits<AtomicRMWInst> |
885 | : public FixedNumOperandTraits<AtomicRMWInst,2> { |
886 | }; |
887 | |
888 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <AtomicRMWInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 888, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicRMWInst>::op_begin(const_cast< AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<AtomicRMWInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst ::getNumOperands() const { return OperandTraits<AtomicRMWInst >::operands(this); } template <int Idx_nocapture> Use &AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
889 | |
890 | //===----------------------------------------------------------------------===// |
891 | // GetElementPtrInst Class |
892 | //===----------------------------------------------------------------------===// |
893 | |
894 | // checkGEPType - Simple wrapper function to give a better assertion failure |
895 | // message on bad indexes for a gep instruction. |
896 | // |
897 | inline Type *checkGEPType(Type *Ty) { |
898 | assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!" ) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 898, __PRETTY_FUNCTION__)); |
899 | return Ty; |
900 | } |
901 | |
902 | /// an instruction for type-safe pointer arithmetic to |
903 | /// access elements of arrays and structs |
904 | /// |
905 | class GetElementPtrInst : public Instruction { |
906 | Type *SourceElementType; |
907 | Type *ResultElementType; |
908 | |
909 | GetElementPtrInst(const GetElementPtrInst &GEPI); |
910 | |
911 | /// Constructors - Create a getelementptr instruction with a base pointer an |
912 | /// list of indices. The first ctor can optionally insert before an existing |
913 | /// instruction, the second appends the new instruction to the specified |
914 | /// BasicBlock. |
915 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
916 | ArrayRef<Value *> IdxList, unsigned Values, |
917 | const Twine &NameStr, Instruction *InsertBefore); |
918 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
919 | ArrayRef<Value *> IdxList, unsigned Values, |
920 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
921 | |
922 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
923 | |
924 | protected: |
925 | // Note: Instruction needs to be a friend here to call cloneImpl. |
926 | friend class Instruction; |
927 | |
928 | GetElementPtrInst *cloneImpl() const; |
929 | |
930 | public: |
931 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
932 | ArrayRef<Value *> IdxList, |
933 | const Twine &NameStr = "", |
934 | Instruction *InsertBefore = nullptr) { |
935 | unsigned Values = 1 + unsigned(IdxList.size()); |
936 | if (!PointeeType) |
937 | PointeeType = |
938 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
939 | else |
940 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)) |
941 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)) |
942 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)); |
943 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
944 | NameStr, InsertBefore); |
945 | } |
946 | |
947 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
948 | ArrayRef<Value *> IdxList, |
949 | const Twine &NameStr, |
950 | BasicBlock *InsertAtEnd) { |
951 | unsigned Values = 1 + unsigned(IdxList.size()); |
952 | if (!PointeeType) |
953 | PointeeType = |
954 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
955 | else |
956 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)) |
957 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)) |
958 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)); |
959 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
960 | NameStr, InsertAtEnd); |
961 | } |
962 | |
963 | /// Create an "inbounds" getelementptr. See the documentation for the |
964 | /// "inbounds" flag in LangRef.html for details. |
965 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
966 | ArrayRef<Value *> IdxList, |
967 | const Twine &NameStr = "", |
968 | Instruction *InsertBefore = nullptr){ |
969 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); |
970 | } |
971 | |
972 | static GetElementPtrInst * |
973 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
974 | const Twine &NameStr = "", |
975 | Instruction *InsertBefore = nullptr) { |
976 | GetElementPtrInst *GEP = |
977 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
978 | GEP->setIsInBounds(true); |
979 | return GEP; |
980 | } |
981 | |
982 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
983 | ArrayRef<Value *> IdxList, |
984 | const Twine &NameStr, |
985 | BasicBlock *InsertAtEnd) { |
986 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); |
987 | } |
988 | |
989 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
990 | ArrayRef<Value *> IdxList, |
991 | const Twine &NameStr, |
992 | BasicBlock *InsertAtEnd) { |
993 | GetElementPtrInst *GEP = |
994 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
995 | GEP->setIsInBounds(true); |
996 | return GEP; |
997 | } |
998 | |
999 | /// Transparently provide more efficient getOperand methods. |
1000 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1001 | |
1002 | Type *getSourceElementType() const { return SourceElementType; } |
1003 | |
1004 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
1005 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
1006 | |
1007 | Type *getResultElementType() const { |
1008 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1009, __PRETTY_FUNCTION__)) |
1009 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1009, __PRETTY_FUNCTION__)); |
1010 | return ResultElementType; |
1011 | } |
1012 | |
1013 | /// Returns the address space of this instruction's pointer type. |
1014 | unsigned getAddressSpace() const { |
1015 | // Note that this is always the same as the pointer operand's address space |
1016 | // and that is cheaper to compute, so cheat here. |
1017 | return getPointerAddressSpace(); |
1018 | } |
1019 | |
1020 | /// Returns the result type of a getelementptr with the given source |
1021 | /// element type and indexes. |
1022 | /// |
1023 | /// Null is returned if the indices are invalid for the specified |
1024 | /// source element type. |
1025 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
1026 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
1027 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
1028 | |
1029 | /// Return the type of the element at the given index of an indexable |
1030 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". |
1031 | /// |
1032 | /// Returns null if the type can't be indexed, or the given index is not |
1033 | /// legal for the given type. |
1034 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); |
1035 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); |
1036 | |
1037 | inline op_iterator idx_begin() { return op_begin()+1; } |
1038 | inline const_op_iterator idx_begin() const { return op_begin()+1; } |
1039 | inline op_iterator idx_end() { return op_end(); } |
1040 | inline const_op_iterator idx_end() const { return op_end(); } |
1041 | |
1042 | inline iterator_range<op_iterator> indices() { |
1043 | return make_range(idx_begin(), idx_end()); |
1044 | } |
1045 | |
1046 | inline iterator_range<const_op_iterator> indices() const { |
1047 | return make_range(idx_begin(), idx_end()); |
1048 | } |
1049 | |
1050 | Value *getPointerOperand() { |
1051 | return getOperand(0); |
1052 | } |
1053 | const Value *getPointerOperand() const { |
1054 | return getOperand(0); |
1055 | } |
1056 | static unsigned getPointerOperandIndex() { |
1057 | return 0U; // get index for modifying correct operand. |
1058 | } |
1059 | |
1060 | /// Method to return the pointer operand as a |
1061 | /// PointerType. |
1062 | Type *getPointerOperandType() const { |
1063 | return getPointerOperand()->getType(); |
1064 | } |
1065 | |
1066 | /// Returns the address space of the pointer operand. |
1067 | unsigned getPointerAddressSpace() const { |
1068 | return getPointerOperandType()->getPointerAddressSpace(); |
1069 | } |
1070 | |
1071 | /// Returns the pointer type returned by the GEP |
1072 | /// instruction, which may be a vector of pointers. |
1073 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
1074 | ArrayRef<Value *> IdxList) { |
1075 | Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), |
1076 | Ptr->getType()->getPointerAddressSpace()); |
1077 | // Vector GEP |
1078 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { |
1079 | ElementCount EltCount = PtrVTy->getElementCount(); |
1080 | return VectorType::get(PtrTy, EltCount); |
1081 | } |
1082 | for (Value *Index : IdxList) |
1083 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { |
1084 | ElementCount EltCount = IndexVTy->getElementCount(); |
1085 | return VectorType::get(PtrTy, EltCount); |
1086 | } |
1087 | // Scalar GEP |
1088 | return PtrTy; |
1089 | } |
1090 | |
1091 | unsigned getNumIndices() const { // Note: always non-negative |
1092 | return getNumOperands() - 1; |
1093 | } |
1094 | |
1095 | bool hasIndices() const { |
1096 | return getNumOperands() > 1; |
1097 | } |
1098 | |
1099 | /// Return true if all of the indices of this GEP are |
1100 | /// zeros. If so, the result pointer and the first operand have the same |
1101 | /// value, just potentially different types. |
1102 | bool hasAllZeroIndices() const; |
1103 | |
1104 | /// Return true if all of the indices of this GEP are |
1105 | /// constant integers. If so, the result pointer and the first operand have |
1106 | /// a constant offset between them. |
1107 | bool hasAllConstantIndices() const; |
1108 | |
1109 | /// Set or clear the inbounds flag on this GEP instruction. |
1110 | /// See LangRef.html for the meaning of inbounds on a getelementptr. |
1111 | void setIsInBounds(bool b = true); |
1112 | |
1113 | /// Determine whether the GEP has the inbounds flag. |
1114 | bool isInBounds() const; |
1115 | |
1116 | /// Accumulate the constant address offset of this GEP if possible. |
1117 | /// |
1118 | /// This routine accepts an APInt into which it will accumulate the constant |
1119 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
1120 | /// all-constant, it returns false and the value of the offset APInt is |
1121 | /// undefined (it is *not* preserved!). The APInt passed into this routine |
1122 | /// must be at least as wide as the IntPtr type for the address space of |
1123 | /// the base GEP pointer. |
1124 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
1125 | |
1126 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1127 | static bool classof(const Instruction *I) { |
1128 | return (I->getOpcode() == Instruction::GetElementPtr); |
1129 | } |
1130 | static bool classof(const Value *V) { |
1131 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1132 | } |
1133 | }; |
1134 | |
1135 | template <> |
1136 | struct OperandTraits<GetElementPtrInst> : |
1137 | public VariadicOperandTraits<GetElementPtrInst, 1> { |
1138 | }; |
1139 | |
1140 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1141 | ArrayRef<Value *> IdxList, unsigned Values, |
1142 | const Twine &NameStr, |
1143 | Instruction *InsertBefore) |
1144 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1145 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1146 | Values, InsertBefore), |
1147 | SourceElementType(PointeeType), |
1148 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1149 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1150, __PRETTY_FUNCTION__)) |
1150 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1150, __PRETTY_FUNCTION__)); |
1151 | init(Ptr, IdxList, NameStr); |
1152 | } |
1153 | |
1154 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1155 | ArrayRef<Value *> IdxList, unsigned Values, |
1156 | const Twine &NameStr, |
1157 | BasicBlock *InsertAtEnd) |
1158 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1159 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1160 | Values, InsertAtEnd), |
1161 | SourceElementType(PointeeType), |
1162 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1163 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1164, __PRETTY_FUNCTION__)) |
1164 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1164, __PRETTY_FUNCTION__)); |
1165 | init(Ptr, IdxList, NameStr); |
1166 | } |
1167 | |
1168 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<GetElementPtrInst>::op_begin(const_cast <GetElementPtrInst*>(this))[i_nocapture].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<GetElementPtrInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands() const { return OperandTraits <GetElementPtrInst>::operands(this); } template <int Idx_nocapture> Use &GetElementPtrInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &GetElementPtrInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1169 | |
1170 | //===----------------------------------------------------------------------===// |
1171 | // ICmpInst Class |
1172 | //===----------------------------------------------------------------------===// |
1173 | |
1174 | /// This instruction compares its operands according to the predicate given |
1175 | /// to the constructor. It only operates on integers or pointers. The operands |
1176 | /// must be identical types. |
1177 | /// Represent an integer comparison operator. |
1178 | class ICmpInst: public CmpInst { |
1179 | void AssertOK() { |
1180 | assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1181, __PRETTY_FUNCTION__)) |
1181 | "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1181, __PRETTY_FUNCTION__)); |
1182 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1183, __PRETTY_FUNCTION__)) |
1183 | "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1183, __PRETTY_FUNCTION__)); |
1184 | // Check that the operands are the right type |
1185 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)) |
1186 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)) |
1187 | "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)); |
1188 | } |
1189 | |
1190 | protected: |
1191 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1192 | friend class Instruction; |
1193 | |
1194 | /// Clone an identical ICmpInst |
1195 | ICmpInst *cloneImpl() const; |
1196 | |
1197 | public: |
1198 | /// Constructor with insert-before-instruction semantics. |
1199 | ICmpInst( |
1200 | Instruction *InsertBefore, ///< Where to insert |
1201 | Predicate pred, ///< The predicate to use for the comparison |
1202 | Value *LHS, ///< The left-hand-side of the expression |
1203 | Value *RHS, ///< The right-hand-side of the expression |
1204 | const Twine &NameStr = "" ///< Name of the instruction |
1205 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1206 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1207 | InsertBefore) { |
1208 | #ifndef NDEBUG |
1209 | AssertOK(); |
1210 | #endif |
1211 | } |
1212 | |
1213 | /// Constructor with insert-at-end semantics. |
1214 | ICmpInst( |
1215 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1216 | Predicate pred, ///< The predicate to use for the comparison |
1217 | Value *LHS, ///< The left-hand-side of the expression |
1218 | Value *RHS, ///< The right-hand-side of the expression |
1219 | const Twine &NameStr = "" ///< Name of the instruction |
1220 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1221 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1222 | &InsertAtEnd) { |
1223 | #ifndef NDEBUG |
1224 | AssertOK(); |
1225 | #endif |
1226 | } |
1227 | |
1228 | /// Constructor with no-insertion semantics |
1229 | ICmpInst( |
1230 | Predicate pred, ///< The predicate to use for the comparison |
1231 | Value *LHS, ///< The left-hand-side of the expression |
1232 | Value *RHS, ///< The right-hand-side of the expression |
1233 | const Twine &NameStr = "" ///< Name of the instruction |
1234 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1235 | Instruction::ICmp, pred, LHS, RHS, NameStr) { |
1236 | #ifndef NDEBUG |
1237 | AssertOK(); |
1238 | #endif |
1239 | } |
1240 | |
1241 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
1242 | /// @returns the predicate that would be the result if the operand were |
1243 | /// regarded as signed. |
1244 | /// Return the signed version of the predicate |
1245 | Predicate getSignedPredicate() const { |
1246 | return getSignedPredicate(getPredicate()); |
1247 | } |
1248 | |
1249 | /// This is a static version that you can use without an instruction. |
1250 | /// Return the signed version of the predicate. |
1251 | static Predicate getSignedPredicate(Predicate pred); |
1252 | |
1253 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
1254 | /// @returns the predicate that would be the result if the operand were |
1255 | /// regarded as unsigned. |
1256 | /// Return the unsigned version of the predicate |
1257 | Predicate getUnsignedPredicate() const { |
1258 | return getUnsignedPredicate(getPredicate()); |
1259 | } |
1260 | |
1261 | /// This is a static version that you can use without an instruction. |
1262 | /// Return the unsigned version of the predicate. |
1263 | static Predicate getUnsignedPredicate(Predicate pred); |
1264 | |
1265 | /// Return true if this predicate is either EQ or NE. This also |
1266 | /// tests for commutativity. |
1267 | static bool isEquality(Predicate P) { |
1268 | return P == ICMP_EQ || P == ICMP_NE; |
1269 | } |
1270 | |
1271 | /// Return true if this predicate is either EQ or NE. This also |
1272 | /// tests for commutativity. |
1273 | bool isEquality() const { |
1274 | return isEquality(getPredicate()); |
1275 | } |
1276 | |
1277 | /// @returns true if the predicate of this ICmpInst is commutative |
1278 | /// Determine if this relation is commutative. |
1279 | bool isCommutative() const { return isEquality(); } |
1280 | |
1281 | /// Return true if the predicate is relational (not EQ or NE). |
1282 | /// |
1283 | bool isRelational() const { |
1284 | return !isEquality(); |
1285 | } |
1286 | |
1287 | /// Return true if the predicate is relational (not EQ or NE). |
1288 | /// |
1289 | static bool isRelational(Predicate P) { |
1290 | return !isEquality(P); |
1291 | } |
1292 | |
1293 | /// Return true if the predicate is SGT or UGT. |
1294 | /// |
1295 | static bool isGT(Predicate P) { |
1296 | return P == ICMP_SGT || P == ICMP_UGT; |
1297 | } |
1298 | |
1299 | /// Return true if the predicate is SLT or ULT. |
1300 | /// |
1301 | static bool isLT(Predicate P) { |
1302 | return P == ICMP_SLT || P == ICMP_ULT; |
1303 | } |
1304 | |
1305 | /// Return true if the predicate is SGE or UGE. |
1306 | /// |
1307 | static bool isGE(Predicate P) { |
1308 | return P == ICMP_SGE || P == ICMP_UGE; |
1309 | } |
1310 | |
1311 | /// Return true if the predicate is SLE or ULE. |
1312 | /// |
1313 | static bool isLE(Predicate P) { |
1314 | return P == ICMP_SLE || P == ICMP_ULE; |
1315 | } |
1316 | |
1317 | /// Exchange the two operands to this instruction in such a way that it does |
1318 | /// not modify the semantics of the instruction. The predicate value may be |
1319 | /// changed to retain the same result if the predicate is order dependent |
1320 | /// (e.g. ult). |
1321 | /// Swap operands and adjust predicate. |
1322 | void swapOperands() { |
1323 | setPredicate(getSwappedPredicate()); |
1324 | Op<0>().swap(Op<1>()); |
1325 | } |
1326 | |
1327 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1328 | static bool classof(const Instruction *I) { |
1329 | return I->getOpcode() == Instruction::ICmp; |
1330 | } |
1331 | static bool classof(const Value *V) { |
1332 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1333 | } |
1334 | }; |
1335 | |
1336 | //===----------------------------------------------------------------------===// |
1337 | // FCmpInst Class |
1338 | //===----------------------------------------------------------------------===// |
1339 | |
1340 | /// This instruction compares its operands according to the predicate given |
1341 | /// to the constructor. It only operates on floating point values or packed |
1342 | /// vectors of floating point values. The operands must be identical types. |
1343 | /// Represents a floating point comparison operator. |
1344 | class FCmpInst: public CmpInst { |
1345 | void AssertOK() { |
1346 | assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ? static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1346, __PRETTY_FUNCTION__)); |
1347 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1348, __PRETTY_FUNCTION__)) |
1348 | "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1348, __PRETTY_FUNCTION__)); |
1349 | // Check that the operands are the right type |
1350 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1351, __PRETTY_FUNCTION__)) |
1351 | "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1351, __PRETTY_FUNCTION__)); |
1352 | } |
1353 | |
1354 | protected: |
1355 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1356 | friend class Instruction; |
1357 | |
1358 | /// Clone an identical FCmpInst |
1359 | FCmpInst *cloneImpl() const; |
1360 | |
1361 | public: |
1362 | /// Constructor with insert-before-instruction semantics. |
1363 | FCmpInst( |
1364 | Instruction *InsertBefore, ///< Where to insert |
1365 | Predicate pred, ///< The predicate to use for the comparison |
1366 | Value *LHS, ///< The left-hand-side of the expression |
1367 | Value *RHS, ///< The right-hand-side of the expression |
1368 | const Twine &NameStr = "" ///< Name of the instruction |
1369 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1370 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1371 | InsertBefore) { |
1372 | AssertOK(); |
1373 | } |
1374 | |
1375 | /// Constructor with insert-at-end semantics. |
1376 | FCmpInst( |
1377 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1378 | Predicate pred, ///< The predicate to use for the comparison |
1379 | Value *LHS, ///< The left-hand-side of the expression |
1380 | Value *RHS, ///< The right-hand-side of the expression |
1381 | const Twine &NameStr = "" ///< Name of the instruction |
1382 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1383 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1384 | &InsertAtEnd) { |
1385 | AssertOK(); |
1386 | } |
1387 | |
1388 | /// Constructor with no-insertion semantics |
1389 | FCmpInst( |
1390 | Predicate Pred, ///< The predicate to use for the comparison |
1391 | Value *LHS, ///< The left-hand-side of the expression |
1392 | Value *RHS, ///< The right-hand-side of the expression |
1393 | const Twine &NameStr = "", ///< Name of the instruction |
1394 | Instruction *FlagsSource = nullptr |
1395 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
1396 | RHS, NameStr, nullptr, FlagsSource) { |
1397 | AssertOK(); |
1398 | } |
1399 | |
1400 | /// @returns true if the predicate of this instruction is EQ or NE. |
1401 | /// Determine if this is an equality predicate. |
1402 | static bool isEquality(Predicate Pred) { |
1403 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
1404 | Pred == FCMP_UNE; |
1405 | } |
1406 | |
1407 | /// @returns true if the predicate of this instruction is EQ or NE. |
1408 | /// Determine if this is an equality predicate. |
1409 | bool isEquality() const { return isEquality(getPredicate()); } |
1410 | |
1411 | /// @returns true if the predicate of this instruction is commutative. |
1412 | /// Determine if this is a commutative predicate. |
1413 | bool isCommutative() const { |
1414 | return isEquality() || |
1415 | getPredicate() == FCMP_FALSE || |
1416 | getPredicate() == FCMP_TRUE || |
1417 | getPredicate() == FCMP_ORD || |
1418 | getPredicate() == FCMP_UNO; |
1419 | } |
1420 | |
1421 | /// @returns true if the predicate is relational (not EQ or NE). |
1422 | /// Determine if this a relational predicate. |
1423 | bool isRelational() const { return !isEquality(); } |
1424 | |
1425 | /// Exchange the two operands to this instruction in such a way that it does |
1426 | /// not modify the semantics of the instruction. The predicate value may be |
1427 | /// changed to retain the same result if the predicate is order dependent |
1428 | /// (e.g. ult). |
1429 | /// Swap operands and adjust predicate. |
1430 | void swapOperands() { |
1431 | setPredicate(getSwappedPredicate()); |
1432 | Op<0>().swap(Op<1>()); |
1433 | } |
1434 | |
1435 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1436 | static bool classof(const Instruction *I) { |
1437 | return I->getOpcode() == Instruction::FCmp; |
1438 | } |
1439 | static bool classof(const Value *V) { |
1440 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1441 | } |
1442 | }; |
1443 | |
1444 | //===----------------------------------------------------------------------===// |
1445 | /// This class represents a function call, abstracting a target |
1446 | /// machine's calling convention. This class uses low bit of the SubClassData |
1447 | /// field to indicate whether or not this is a tail call. The rest of the bits |
1448 | /// hold the calling convention of the call. |
1449 | /// |
1450 | class CallInst : public CallBase { |
1451 | CallInst(const CallInst &CI); |
1452 | |
1453 | /// Construct a CallInst given a range of arguments. |
1454 | /// Construct a CallInst from a range of arguments |
1455 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1456 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1457 | Instruction *InsertBefore); |
1458 | |
1459 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1460 | const Twine &NameStr, Instruction *InsertBefore) |
1461 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} |
1462 | |
1463 | /// Construct a CallInst given a range of arguments. |
1464 | /// Construct a CallInst from a range of arguments |
1465 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1466 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1467 | BasicBlock *InsertAtEnd); |
1468 | |
1469 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, |
1470 | Instruction *InsertBefore); |
1471 | |
1472 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, |
1473 | BasicBlock *InsertAtEnd); |
1474 | |
1475 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
1476 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
1477 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); |
1478 | |
1479 | /// Compute the number of operands to allocate. |
1480 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
1481 | // We need one operand for the called function, plus the input operand |
1482 | // counts provided. |
1483 | return 1 + NumArgs + NumBundleInputs; |
1484 | } |
1485 | |
1486 | protected: |
1487 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1488 | friend class Instruction; |
1489 | |
1490 | CallInst *cloneImpl() const; |
1491 | |
1492 | public: |
1493 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", |
1494 | Instruction *InsertBefore = nullptr) { |
1495 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); |
1496 | } |
1497 | |
1498 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1499 | const Twine &NameStr, |
1500 | Instruction *InsertBefore = nullptr) { |
1501 | return new (ComputeNumOperands(Args.size())) |
1502 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
1503 | } |
1504 | |
1505 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1506 | ArrayRef<OperandBundleDef> Bundles = None, |
1507 | const Twine &NameStr = "", |
1508 | Instruction *InsertBefore = nullptr) { |
1509 | const int NumOperands = |
1510 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1511 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1512 | |
1513 | return new (NumOperands, DescriptorBytes) |
1514 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
1515 | } |
1516 | |
1517 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, |
1518 | BasicBlock *InsertAtEnd) { |
1519 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); |
1520 | } |
1521 | |
1522 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1523 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1524 | return new (ComputeNumOperands(Args.size())) |
1525 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); |
1526 | } |
1527 | |
1528 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1529 | ArrayRef<OperandBundleDef> Bundles, |
1530 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1531 | const int NumOperands = |
1532 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1533 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1534 | |
1535 | return new (NumOperands, DescriptorBytes) |
1536 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); |
1537 | } |
1538 | |
1539 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", |
1540 | Instruction *InsertBefore = nullptr) { |
1541 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1542 | InsertBefore); |
1543 | } |
1544 | |
1545 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1546 | ArrayRef<OperandBundleDef> Bundles = None, |
1547 | const Twine &NameStr = "", |
1548 | Instruction *InsertBefore = nullptr) { |
1549 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1550 | NameStr, InsertBefore); |
1551 | } |
1552 | |
1553 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1554 | const Twine &NameStr, |
1555 | Instruction *InsertBefore = nullptr) { |
1556 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1557 | InsertBefore); |
1558 | } |
1559 | |
1560 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, |
1561 | BasicBlock *InsertAtEnd) { |
1562 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1563 | InsertAtEnd); |
1564 | } |
1565 | |
1566 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1567 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1568 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1569 | InsertAtEnd); |
1570 | } |
1571 | |
1572 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1573 | ArrayRef<OperandBundleDef> Bundles, |
1574 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1575 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1576 | NameStr, InsertAtEnd); |
1577 | } |
1578 | |
1579 | /// Create a clone of \p CI with a different set of operand bundles and |
1580 | /// insert it before \p InsertPt. |
1581 | /// |
1582 | /// The returned call instruction is identical \p CI in every way except that |
1583 | /// the operand bundles for the new instruction are set to the operand bundles |
1584 | /// in \p Bundles. |
1585 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
1586 | Instruction *InsertPt = nullptr); |
1587 | |
1588 | /// Generate the IR for a call to malloc: |
1589 | /// 1. Compute the malloc call's argument as the specified type's size, |
1590 | /// possibly multiplied by the array size if the array size is not |
1591 | /// constant 1. |
1592 | /// 2. Call malloc with that argument. |
1593 | /// 3. Bitcast the result of the malloc call to the specified type. |
1594 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1595 | Type *AllocTy, Value *AllocSize, |
1596 | Value *ArraySize = nullptr, |
1597 | Function *MallocF = nullptr, |
1598 | const Twine &Name = ""); |
1599 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1600 | Type *AllocTy, Value *AllocSize, |
1601 | Value *ArraySize = nullptr, |
1602 | Function *MallocF = nullptr, |
1603 | const Twine &Name = ""); |
1604 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1605 | Type *AllocTy, Value *AllocSize, |
1606 | Value *ArraySize = nullptr, |
1607 | ArrayRef<OperandBundleDef> Bundles = None, |
1608 | Function *MallocF = nullptr, |
1609 | const Twine &Name = ""); |
1610 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1611 | Type *AllocTy, Value *AllocSize, |
1612 | Value *ArraySize = nullptr, |
1613 | ArrayRef<OperandBundleDef> Bundles = None, |
1614 | Function *MallocF = nullptr, |
1615 | const Twine &Name = ""); |
1616 | /// Generate the IR for a call to the builtin free function. |
1617 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
1618 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
1619 | static Instruction *CreateFree(Value *Source, |
1620 | ArrayRef<OperandBundleDef> Bundles, |
1621 | Instruction *InsertBefore); |
1622 | static Instruction *CreateFree(Value *Source, |
1623 | ArrayRef<OperandBundleDef> Bundles, |
1624 | BasicBlock *InsertAtEnd); |
1625 | |
1626 | // Note that 'musttail' implies 'tail'. |
1627 | enum TailCallKind : unsigned { |
1628 | TCK_None = 0, |
1629 | TCK_Tail = 1, |
1630 | TCK_MustTail = 2, |
1631 | TCK_NoTail = 3, |
1632 | TCK_LAST = TCK_NoTail |
1633 | }; |
1634 | |
1635 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; |
1636 | static_assert( |
1637 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), |
1638 | "Bitfields must be contiguous"); |
1639 | |
1640 | TailCallKind getTailCallKind() const { |
1641 | return getSubclassData<TailCallKindField>(); |
1642 | } |
1643 | |
1644 | bool isTailCall() const { |
1645 | TailCallKind Kind = getTailCallKind(); |
1646 | return Kind == TCK_Tail || Kind == TCK_MustTail; |
1647 | } |
1648 | |
1649 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } |
1650 | |
1651 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } |
1652 | |
1653 | void setTailCallKind(TailCallKind TCK) { |
1654 | setSubclassData<TailCallKindField>(TCK); |
1655 | } |
1656 | |
1657 | void setTailCall(bool IsTc = true) { |
1658 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); |
1659 | } |
1660 | |
1661 | /// Return true if the call can return twice |
1662 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
1663 | void setCanReturnTwice() { |
1664 | addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); |
1665 | } |
1666 | |
1667 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1668 | static bool classof(const Instruction *I) { |
1669 | return I->getOpcode() == Instruction::Call; |
1670 | } |
1671 | static bool classof(const Value *V) { |
1672 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1673 | } |
1674 | |
1675 | /// Updates profile metadata by scaling it by \p S / \p T. |
1676 | void updateProfWeight(uint64_t S, uint64_t T); |
1677 | |
1678 | private: |
1679 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
1680 | // method so that subclasses cannot accidentally use it. |
1681 | template <typename Bitfield> |
1682 | void setSubclassData(typename Bitfield::Type Value) { |
1683 | Instruction::setSubclassData<Bitfield>(Value); |
1684 | } |
1685 | }; |
1686 | |
1687 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1688 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1689 | BasicBlock *InsertAtEnd) |
1690 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1691 | OperandTraits<CallBase>::op_end(this) - |
1692 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1693 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1694 | InsertAtEnd) { |
1695 | init(Ty, Func, Args, Bundles, NameStr); |
1696 | } |
1697 | |
1698 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1699 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1700 | Instruction *InsertBefore) |
1701 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1702 | OperandTraits<CallBase>::op_end(this) - |
1703 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1704 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1705 | InsertBefore) { |
1706 | init(Ty, Func, Args, Bundles, NameStr); |
1707 | } |
1708 | |
1709 | //===----------------------------------------------------------------------===// |
1710 | // SelectInst Class |
1711 | //===----------------------------------------------------------------------===// |
1712 | |
1713 | /// This class represents the LLVM 'select' instruction. |
1714 | /// |
1715 | class SelectInst : public Instruction { |
1716 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1717 | Instruction *InsertBefore) |
1718 | : Instruction(S1->getType(), Instruction::Select, |
1719 | &Op<0>(), 3, InsertBefore) { |
1720 | init(C, S1, S2); |
1721 | setName(NameStr); |
1722 | } |
1723 | |
1724 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1725 | BasicBlock *InsertAtEnd) |
1726 | : Instruction(S1->getType(), Instruction::Select, |
1727 | &Op<0>(), 3, InsertAtEnd) { |
1728 | init(C, S1, S2); |
1729 | setName(NameStr); |
1730 | } |
1731 | |
1732 | void init(Value *C, Value *S1, Value *S2) { |
1733 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select" ) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1733, __PRETTY_FUNCTION__)); |
1734 | Op<0>() = C; |
1735 | Op<1>() = S1; |
1736 | Op<2>() = S2; |
1737 | } |
1738 | |
1739 | protected: |
1740 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1741 | friend class Instruction; |
1742 | |
1743 | SelectInst *cloneImpl() const; |
1744 | |
1745 | public: |
1746 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1747 | const Twine &NameStr = "", |
1748 | Instruction *InsertBefore = nullptr, |
1749 | Instruction *MDFrom = nullptr) { |
1750 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
1751 | if (MDFrom) |
1752 | Sel->copyMetadata(*MDFrom); |
1753 | return Sel; |
1754 | } |
1755 | |
1756 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1757 | const Twine &NameStr, |
1758 | BasicBlock *InsertAtEnd) { |
1759 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
1760 | } |
1761 | |
1762 | const Value *getCondition() const { return Op<0>(); } |
1763 | const Value *getTrueValue() const { return Op<1>(); } |
1764 | const Value *getFalseValue() const { return Op<2>(); } |
1765 | Value *getCondition() { return Op<0>(); } |
1766 | Value *getTrueValue() { return Op<1>(); } |
1767 | Value *getFalseValue() { return Op<2>(); } |
1768 | |
1769 | void setCondition(Value *V) { Op<0>() = V; } |
1770 | void setTrueValue(Value *V) { Op<1>() = V; } |
1771 | void setFalseValue(Value *V) { Op<2>() = V; } |
1772 | |
1773 | /// Swap the true and false values of the select instruction. |
1774 | /// This doesn't swap prof metadata. |
1775 | void swapValues() { Op<1>().swap(Op<2>()); } |
1776 | |
1777 | /// Return a string if the specified operands are invalid |
1778 | /// for a select operation, otherwise return null. |
1779 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
1780 | |
1781 | /// Transparently provide more efficient getOperand methods. |
1782 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1783 | |
1784 | OtherOps getOpcode() const { |
1785 | return static_cast<OtherOps>(Instruction::getOpcode()); |
1786 | } |
1787 | |
1788 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1789 | static bool classof(const Instruction *I) { |
1790 | return I->getOpcode() == Instruction::Select; |
1791 | } |
1792 | static bool classof(const Value *V) { |
1793 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1794 | } |
1795 | }; |
1796 | |
1797 | template <> |
1798 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
1799 | }; |
1800 | |
1801 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1801, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst *>(this))[i_nocapture].get()); } void SelectInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1801, __PRETTY_FUNCTION__)); OperandTraits<SelectInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst ::getNumOperands() const { return OperandTraits<SelectInst >::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SelectInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
1802 | |
1803 | //===----------------------------------------------------------------------===// |
1804 | // VAArgInst Class |
1805 | //===----------------------------------------------------------------------===// |
1806 | |
1807 | /// This class represents the va_arg llvm instruction, which returns |
1808 | /// an argument of the specified type given a va_list and increments that list |
1809 | /// |
1810 | class VAArgInst : public UnaryInstruction { |
1811 | protected: |
1812 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1813 | friend class Instruction; |
1814 | |
1815 | VAArgInst *cloneImpl() const; |
1816 | |
1817 | public: |
1818 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
1819 | Instruction *InsertBefore = nullptr) |
1820 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
1821 | setName(NameStr); |
1822 | } |
1823 | |
1824 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
1825 | BasicBlock *InsertAtEnd) |
1826 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
1827 | setName(NameStr); |
1828 | } |
1829 | |
1830 | Value *getPointerOperand() { return getOperand(0); } |
1831 | const Value *getPointerOperand() const { return getOperand(0); } |
1832 | static unsigned getPointerOperandIndex() { return 0U; } |
1833 | |
1834 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1835 | static bool classof(const Instruction *I) { |
1836 | return I->getOpcode() == VAArg; |
1837 | } |
1838 | static bool classof(const Value *V) { |
1839 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1840 | } |
1841 | }; |
1842 | |
1843 | //===----------------------------------------------------------------------===// |
1844 | // ExtractElementInst Class |
1845 | //===----------------------------------------------------------------------===// |
1846 | |
1847 | /// This instruction extracts a single (scalar) |
1848 | /// element from a VectorType value |
1849 | /// |
1850 | class ExtractElementInst : public Instruction { |
1851 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
1852 | Instruction *InsertBefore = nullptr); |
1853 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
1854 | BasicBlock *InsertAtEnd); |
1855 | |
1856 | protected: |
1857 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1858 | friend class Instruction; |
1859 | |
1860 | ExtractElementInst *cloneImpl() const; |
1861 | |
1862 | public: |
1863 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1864 | const Twine &NameStr = "", |
1865 | Instruction *InsertBefore = nullptr) { |
1866 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
1867 | } |
1868 | |
1869 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1870 | const Twine &NameStr, |
1871 | BasicBlock *InsertAtEnd) { |
1872 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
1873 | } |
1874 | |
1875 | /// Return true if an extractelement instruction can be |
1876 | /// formed with the specified operands. |
1877 | static bool isValidOperands(const Value *Vec, const Value *Idx); |
1878 | |
1879 | Value *getVectorOperand() { return Op<0>(); } |
1880 | Value *getIndexOperand() { return Op<1>(); } |
1881 | const Value *getVectorOperand() const { return Op<0>(); } |
1882 | const Value *getIndexOperand() const { return Op<1>(); } |
1883 | |
1884 | VectorType *getVectorOperandType() const { |
1885 | return cast<VectorType>(getVectorOperand()->getType()); |
1886 | } |
1887 | |
1888 | /// Transparently provide more efficient getOperand methods. |
1889 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1890 | |
1891 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1892 | static bool classof(const Instruction *I) { |
1893 | return I->getOpcode() == Instruction::ExtractElement; |
1894 | } |
1895 | static bool classof(const Value *V) { |
1896 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1897 | } |
1898 | }; |
1899 | |
1900 | template <> |
1901 | struct OperandTraits<ExtractElementInst> : |
1902 | public FixedNumOperandTraits<ExtractElementInst, 2> { |
1903 | }; |
1904 | |
1905 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ExtractElementInst>:: operands(this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1905, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ExtractElementInst>::op_begin(const_cast <ExtractElementInst*>(this))[i_nocapture].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture, Value * Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1905, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands() const { return OperandTraits <ExtractElementInst>::operands(this); } template <int Idx_nocapture> Use &ExtractElementInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
1906 | |
1907 | //===----------------------------------------------------------------------===// |
1908 | // InsertElementInst Class |
1909 | //===----------------------------------------------------------------------===// |
1910 | |
1911 | /// This instruction inserts a single (scalar) |
1912 | /// element into a VectorType value |
1913 | /// |
1914 | class InsertElementInst : public Instruction { |
1915 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
1916 | const Twine &NameStr = "", |
1917 | Instruction *InsertBefore = nullptr); |
1918 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
1919 | BasicBlock *InsertAtEnd); |
1920 | |
1921 | protected: |
1922 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1923 | friend class Instruction; |
1924 | |
1925 | InsertElementInst *cloneImpl() const; |
1926 | |
1927 | public: |
1928 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1929 | const Twine &NameStr = "", |
1930 | Instruction *InsertBefore = nullptr) { |
1931 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
1932 | } |
1933 | |
1934 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1935 | const Twine &NameStr, |
1936 | BasicBlock *InsertAtEnd) { |
1937 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
1938 | } |
1939 | |
1940 | /// Return true if an insertelement instruction can be |
1941 | /// formed with the specified operands. |
1942 | static bool isValidOperands(const Value *Vec, const Value *NewElt, |
1943 | const Value *Idx); |
1944 | |
1945 | /// Overload to return most specific vector type. |
1946 | /// |
1947 | VectorType *getType() const { |
1948 | return cast<VectorType>(Instruction::getType()); |
1949 | } |
1950 | |
1951 | /// Transparently provide more efficient getOperand methods. |
1952 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1953 | |
1954 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1955 | static bool classof(const Instruction *I) { |
1956 | return I->getOpcode() == Instruction::InsertElement; |
1957 | } |
1958 | static bool classof(const Value *V) { |
1959 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1960 | } |
1961 | }; |
1962 | |
1963 | template <> |
1964 | struct OperandTraits<InsertElementInst> : |
1965 | public FixedNumOperandTraits<InsertElementInst, 3> { |
1966 | }; |
1967 | |
1968 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1968, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertElementInst>::op_begin(const_cast <InsertElementInst*>(this))[i_nocapture].get()); } void InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<InsertElementInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 1968, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertElementInst::getNumOperands() const { return OperandTraits <InsertElementInst>::operands(this); } template <int Idx_nocapture> Use &InsertElementInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &InsertElementInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1969 | |
1970 | //===----------------------------------------------------------------------===// |
1971 | // ShuffleVectorInst Class |
1972 | //===----------------------------------------------------------------------===// |
1973 | |
1974 | constexpr int UndefMaskElem = -1; |
1975 | |
1976 | /// This instruction constructs a fixed permutation of two |
1977 | /// input vectors. |
1978 | /// |
1979 | /// For each element of the result vector, the shuffle mask selects an element |
1980 | /// from one of the input vectors to copy to the result. Non-negative elements |
1981 | /// in the mask represent an index into the concatenated pair of input vectors. |
1982 | /// UndefMaskElem (-1) specifies that the result element is undefined. |
1983 | /// |
1984 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This |
1985 | /// requirement may be relaxed in the future. |
1986 | class ShuffleVectorInst : public Instruction { |
1987 | SmallVector<int, 4> ShuffleMask; |
1988 | Constant *ShuffleMaskForBitcode; |
1989 | |
1990 | protected: |
1991 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1992 | friend class Instruction; |
1993 | |
1994 | ShuffleVectorInst *cloneImpl() const; |
1995 | |
1996 | public: |
1997 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
1998 | const Twine &NameStr = "", |
1999 | Instruction *InsertBefor = nullptr); |
2000 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
2001 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2002 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2003 | const Twine &NameStr = "", |
2004 | Instruction *InsertBefor = nullptr); |
2005 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2006 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2007 | |
2008 | void *operator new(size_t s) { return User::operator new(s, 2); } |
2009 | |
2010 | /// Swap the operands and adjust the mask to preserve the semantics |
2011 | /// of the instruction. |
2012 | void commute(); |
2013 | |
2014 | /// Return true if a shufflevector instruction can be |
2015 | /// formed with the specified operands. |
2016 | static bool isValidOperands(const Value *V1, const Value *V2, |
2017 | const Value *Mask); |
2018 | static bool isValidOperands(const Value *V1, const Value *V2, |
2019 | ArrayRef<int> Mask); |
2020 | |
2021 | /// Overload to return most specific vector type. |
2022 | /// |
2023 | VectorType *getType() const { |
2024 | return cast<VectorType>(Instruction::getType()); |
2025 | } |
2026 | |
2027 | /// Transparently provide more efficient getOperand methods. |
2028 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2029 | |
2030 | /// Return the shuffle mask value of this instruction for the given element |
2031 | /// index. Return UndefMaskElem if the element is undef. |
2032 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } |
2033 | |
2034 | /// Convert the input shuffle mask operand to a vector of integers. Undefined |
2035 | /// elements of the mask are returned as UndefMaskElem. |
2036 | static void getShuffleMask(const Constant *Mask, |
2037 | SmallVectorImpl<int> &Result); |
2038 | |
2039 | /// Return the mask for this instruction as a vector of integers. Undefined |
2040 | /// elements of the mask are returned as UndefMaskElem. |
2041 | void getShuffleMask(SmallVectorImpl<int> &Result) const { |
2042 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); |
2043 | } |
2044 | |
2045 | /// Return the mask for this instruction, for use in bitcode. |
2046 | /// |
2047 | /// TODO: This is temporary until we decide a new bitcode encoding for |
2048 | /// shufflevector. |
2049 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } |
2050 | |
2051 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, |
2052 | Type *ResultTy); |
2053 | |
2054 | void setShuffleMask(ArrayRef<int> Mask); |
2055 | |
2056 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } |
2057 | |
2058 | /// Return true if this shuffle returns a vector with a different number of |
2059 | /// elements than its source vectors. |
2060 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
2061 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
2062 | bool changesLength() const { |
2063 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) |
2064 | ->getElementCount() |
2065 | .getKnownMinValue(); |
2066 | unsigned NumMaskElts = ShuffleMask.size(); |
2067 | return NumSourceElts != NumMaskElts; |
2068 | } |
2069 | |
2070 | /// Return true if this shuffle returns a vector with a greater number of |
2071 | /// elements than its source vectors. |
2072 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
2073 | bool increasesLength() const { |
2074 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) |
2075 | ->getElementCount() |
2076 | .getKnownMinValue(); |
2077 | unsigned NumMaskElts = ShuffleMask.size(); |
2078 | return NumSourceElts < NumMaskElts; |
2079 | } |
2080 | |
2081 | /// Return true if this shuffle mask chooses elements from exactly one source |
2082 | /// vector. |
2083 | /// Example: <7,5,undef,7> |
2084 | /// This assumes that vector operands are the same length as the mask. |
2085 | static bool isSingleSourceMask(ArrayRef<int> Mask); |
2086 | static bool isSingleSourceMask(const Constant *Mask) { |
2087 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2087, __PRETTY_FUNCTION__)); |
2088 | SmallVector<int, 16> MaskAsInts; |
2089 | getShuffleMask(Mask, MaskAsInts); |
2090 | return isSingleSourceMask(MaskAsInts); |
2091 | } |
2092 | |
2093 | /// Return true if this shuffle chooses elements from exactly one source |
2094 | /// vector without changing the length of that vector. |
2095 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
2096 | /// TODO: Optionally allow length-changing shuffles. |
2097 | bool isSingleSource() const { |
2098 | return !changesLength() && isSingleSourceMask(ShuffleMask); |
2099 | } |
2100 | |
2101 | /// Return true if this shuffle mask chooses elements from exactly one source |
2102 | /// vector without lane crossings. A shuffle using this mask is not |
2103 | /// necessarily a no-op because it may change the number of elements from its |
2104 | /// input vectors or it may provide demanded bits knowledge via undef lanes. |
2105 | /// Example: <undef,undef,2,3> |
2106 | static bool isIdentityMask(ArrayRef<int> Mask); |
2107 | static bool isIdentityMask(const Constant *Mask) { |
2108 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2108, __PRETTY_FUNCTION__)); |
2109 | SmallVector<int, 16> MaskAsInts; |
2110 | getShuffleMask(Mask, MaskAsInts); |
2111 | return isIdentityMask(MaskAsInts); |
2112 | } |
2113 | |
2114 | /// Return true if this shuffle chooses elements from exactly one source |
2115 | /// vector without lane crossings and does not change the number of elements |
2116 | /// from its input vectors. |
2117 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
2118 | bool isIdentity() const { |
2119 | return !changesLength() && isIdentityMask(ShuffleMask); |
2120 | } |
2121 | |
2122 | /// Return true if this shuffle lengthens exactly one source vector with |
2123 | /// undefs in the high elements. |
2124 | bool isIdentityWithPadding() const; |
2125 | |
2126 | /// Return true if this shuffle extracts the first N elements of exactly one |
2127 | /// source vector. |
2128 | bool isIdentityWithExtract() const; |
2129 | |
2130 | /// Return true if this shuffle concatenates its 2 source vectors. This |
2131 | /// returns false if either input is undefined. In that case, the shuffle is |
2132 | /// is better classified as an identity with padding operation. |
2133 | bool isConcat() const; |
2134 | |
2135 | /// Return true if this shuffle mask chooses elements from its source vectors |
2136 | /// without lane crossings. A shuffle using this mask would be |
2137 | /// equivalent to a vector select with a constant condition operand. |
2138 | /// Example: <4,1,6,undef> |
2139 | /// This returns false if the mask does not choose from both input vectors. |
2140 | /// In that case, the shuffle is better classified as an identity shuffle. |
2141 | /// This assumes that vector operands are the same length as the mask |
2142 | /// (a length-changing shuffle can never be equivalent to a vector select). |
2143 | static bool isSelectMask(ArrayRef<int> Mask); |
2144 | static bool isSelectMask(const Constant *Mask) { |
2145 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2145, __PRETTY_FUNCTION__)); |
2146 | SmallVector<int, 16> MaskAsInts; |
2147 | getShuffleMask(Mask, MaskAsInts); |
2148 | return isSelectMask(MaskAsInts); |
2149 | } |
2150 | |
2151 | /// Return true if this shuffle chooses elements from its source vectors |
2152 | /// without lane crossings and all operands have the same number of elements. |
2153 | /// In other words, this shuffle is equivalent to a vector select with a |
2154 | /// constant condition operand. |
2155 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
2156 | /// This returns false if the mask does not choose from both input vectors. |
2157 | /// In that case, the shuffle is better classified as an identity shuffle. |
2158 | /// TODO: Optionally allow length-changing shuffles. |
2159 | bool isSelect() const { |
2160 | return !changesLength() && isSelectMask(ShuffleMask); |
2161 | } |
2162 | |
2163 | /// Return true if this shuffle mask swaps the order of elements from exactly |
2164 | /// one source vector. |
2165 | /// Example: <7,6,undef,4> |
2166 | /// This assumes that vector operands are the same length as the mask. |
2167 | static bool isReverseMask(ArrayRef<int> Mask); |
2168 | static bool isReverseMask(const Constant *Mask) { |
2169 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2169, __PRETTY_FUNCTION__)); |
2170 | SmallVector<int, 16> MaskAsInts; |
2171 | getShuffleMask(Mask, MaskAsInts); |
2172 | return isReverseMask(MaskAsInts); |
2173 | } |
2174 | |
2175 | /// Return true if this shuffle swaps the order of elements from exactly |
2176 | /// one source vector. |
2177 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
2178 | /// TODO: Optionally allow length-changing shuffles. |
2179 | bool isReverse() const { |
2180 | return !changesLength() && isReverseMask(ShuffleMask); |
2181 | } |
2182 | |
2183 | /// Return true if this shuffle mask chooses all elements with the same value |
2184 | /// as the first element of exactly one source vector. |
2185 | /// Example: <4,undef,undef,4> |
2186 | /// This assumes that vector operands are the same length as the mask. |
2187 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
2188 | static bool isZeroEltSplatMask(const Constant *Mask) { |
2189 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2189, __PRETTY_FUNCTION__)); |
2190 | SmallVector<int, 16> MaskAsInts; |
2191 | getShuffleMask(Mask, MaskAsInts); |
2192 | return isZeroEltSplatMask(MaskAsInts); |
2193 | } |
2194 | |
2195 | /// Return true if all elements of this shuffle are the same value as the |
2196 | /// first element of exactly one source vector without changing the length |
2197 | /// of that vector. |
2198 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
2199 | /// TODO: Optionally allow length-changing shuffles. |
2200 | /// TODO: Optionally allow splats from other elements. |
2201 | bool isZeroEltSplat() const { |
2202 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); |
2203 | } |
2204 | |
2205 | /// Return true if this shuffle mask is a transpose mask. |
2206 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
2207 | /// even- or odd-numbered vector elements from two n-dimensional source |
2208 | /// vectors and write each result into consecutive elements of an |
2209 | /// n-dimensional destination vector. Two shuffles are necessary to complete |
2210 | /// the transpose, one for the even elements and another for the odd elements. |
2211 | /// This description closely follows how the TRN1 and TRN2 AArch64 |
2212 | /// instructions operate. |
2213 | /// |
2214 | /// For example, a simple 2x2 matrix can be transposed with: |
2215 | /// |
2216 | /// ; Original matrix |
2217 | /// m0 = < a, b > |
2218 | /// m1 = < c, d > |
2219 | /// |
2220 | /// ; Transposed matrix |
2221 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
2222 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
2223 | /// |
2224 | /// For matrices having greater than n columns, the resulting nx2 transposed |
2225 | /// matrix is stored in two result vectors such that one vector contains |
2226 | /// interleaved elements from all the even-numbered rows and the other vector |
2227 | /// contains interleaved elements from all the odd-numbered rows. For example, |
2228 | /// a 2x4 matrix can be transposed with: |
2229 | /// |
2230 | /// ; Original matrix |
2231 | /// m0 = < a, b, c, d > |
2232 | /// m1 = < e, f, g, h > |
2233 | /// |
2234 | /// ; Transposed matrix |
2235 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
2236 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
2237 | static bool isTransposeMask(ArrayRef<int> Mask); |
2238 | static bool isTransposeMask(const Constant *Mask) { |
2239 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2239, __PRETTY_FUNCTION__)); |
2240 | SmallVector<int, 16> MaskAsInts; |
2241 | getShuffleMask(Mask, MaskAsInts); |
2242 | return isTransposeMask(MaskAsInts); |
2243 | } |
2244 | |
2245 | /// Return true if this shuffle transposes the elements of its inputs without |
2246 | /// changing the length of the vectors. This operation may also be known as a |
2247 | /// merge or interleave. See the description for isTransposeMask() for the |
2248 | /// exact specification. |
2249 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
2250 | bool isTranspose() const { |
2251 | return !changesLength() && isTransposeMask(ShuffleMask); |
2252 | } |
2253 | |
2254 | /// Return true if this shuffle mask is an extract subvector mask. |
2255 | /// A valid extract subvector mask returns a smaller vector from a single |
2256 | /// source operand. The base extraction index is returned as well. |
2257 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2258 | int &Index); |
2259 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
2260 | int &Index) { |
2261 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2261, __PRETTY_FUNCTION__)); |
2262 | // Not possible to express a shuffle mask for a scalable vector for this |
2263 | // case. |
2264 | if (isa<ScalableVectorType>(Mask->getType())) |
2265 | return false; |
2266 | SmallVector<int, 16> MaskAsInts; |
2267 | getShuffleMask(Mask, MaskAsInts); |
2268 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
2269 | } |
2270 | |
2271 | /// Return true if this shuffle mask is an extract subvector mask. |
2272 | bool isExtractSubvectorMask(int &Index) const { |
2273 | // Not possible to express a shuffle mask for a scalable vector for this |
2274 | // case. |
2275 | if (isa<ScalableVectorType>(getType())) |
2276 | return false; |
2277 | |
2278 | int NumSrcElts = |
2279 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); |
2280 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); |
2281 | } |
2282 | |
2283 | /// Change values in a shuffle permute mask assuming the two vector operands |
2284 | /// of length InVecNumElts have swapped position. |
2285 | static void commuteShuffleMask(MutableArrayRef<int> Mask, |
2286 | unsigned InVecNumElts) { |
2287 | for (int &Idx : Mask) { |
2288 | if (Idx == -1) |
2289 | continue; |
2290 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
2291 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2292, __PRETTY_FUNCTION__)) |
2292 | "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2292, __PRETTY_FUNCTION__)); |
2293 | } |
2294 | } |
2295 | |
2296 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2297 | static bool classof(const Instruction *I) { |
2298 | return I->getOpcode() == Instruction::ShuffleVector; |
2299 | } |
2300 | static bool classof(const Value *V) { |
2301 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2302 | } |
2303 | }; |
2304 | |
2305 | template <> |
2306 | struct OperandTraits<ShuffleVectorInst> |
2307 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; |
2308 | |
2309 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2309, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ShuffleVectorInst>::op_begin(const_cast <ShuffleVectorInst*>(this))[i_nocapture].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<ShuffleVectorInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2309, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands() const { return OperandTraits <ShuffleVectorInst>::operands(this); } template <int Idx_nocapture> Use &ShuffleVectorInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ShuffleVectorInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
2310 | |
2311 | //===----------------------------------------------------------------------===// |
2312 | // ExtractValueInst Class |
2313 | //===----------------------------------------------------------------------===// |
2314 | |
2315 | /// This instruction extracts a struct member or array |
2316 | /// element value from an aggregate value. |
2317 | /// |
2318 | class ExtractValueInst : public UnaryInstruction { |
2319 | SmallVector<unsigned, 4> Indices; |
2320 | |
2321 | ExtractValueInst(const ExtractValueInst &EVI); |
2322 | |
2323 | /// Constructors - Create a extractvalue instruction with a base aggregate |
2324 | /// value and a list of indices. The first ctor can optionally insert before |
2325 | /// an existing instruction, the second appends the new instruction to the |
2326 | /// specified BasicBlock. |
2327 | inline ExtractValueInst(Value *Agg, |
2328 | ArrayRef<unsigned> Idxs, |
2329 | const Twine &NameStr, |
2330 | Instruction *InsertBefore); |
2331 | inline ExtractValueInst(Value *Agg, |
2332 | ArrayRef<unsigned> Idxs, |
2333 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2334 | |
2335 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
2336 | |
2337 | protected: |
2338 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2339 | friend class Instruction; |
2340 | |
2341 | ExtractValueInst *cloneImpl() const; |
2342 | |
2343 | public: |
2344 | static ExtractValueInst *Create(Value *Agg, |
2345 | ArrayRef<unsigned> Idxs, |
2346 | const Twine &NameStr = "", |
2347 | Instruction *InsertBefore = nullptr) { |
2348 | return new |
2349 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
2350 | } |
2351 | |
2352 | static ExtractValueInst *Create(Value *Agg, |
2353 | ArrayRef<unsigned> Idxs, |
2354 | const Twine &NameStr, |
2355 | BasicBlock *InsertAtEnd) { |
2356 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
2357 | } |
2358 | |
2359 | /// Returns the type of the element that would be extracted |
2360 | /// with an extractvalue instruction with the specified parameters. |
2361 | /// |
2362 | /// Null is returned if the indices are invalid for the specified type. |
2363 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
2364 | |
2365 | using idx_iterator = const unsigned*; |
2366 | |
2367 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2368 | inline idx_iterator idx_end() const { return Indices.end(); } |
2369 | inline iterator_range<idx_iterator> indices() const { |
2370 | return make_range(idx_begin(), idx_end()); |
2371 | } |
2372 | |
2373 | Value *getAggregateOperand() { |
2374 | return getOperand(0); |
2375 | } |
2376 | const Value *getAggregateOperand() const { |
2377 | return getOperand(0); |
2378 | } |
2379 | static unsigned getAggregateOperandIndex() { |
2380 | return 0U; // get index for modifying correct operand |
2381 | } |
2382 | |
2383 | ArrayRef<unsigned> getIndices() const { |
2384 | return Indices; |
2385 | } |
2386 | |
2387 | unsigned getNumIndices() const { |
2388 | return (unsigned)Indices.size(); |
2389 | } |
2390 | |
2391 | bool hasIndices() const { |
2392 | return true; |
2393 | } |
2394 | |
2395 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2396 | static bool classof(const Instruction *I) { |
2397 | return I->getOpcode() == Instruction::ExtractValue; |
2398 | } |
2399 | static bool classof(const Value *V) { |
2400 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2401 | } |
2402 | }; |
2403 | |
2404 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2405 | ArrayRef<unsigned> Idxs, |
2406 | const Twine &NameStr, |
2407 | Instruction *InsertBefore) |
2408 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2409 | ExtractValue, Agg, InsertBefore) { |
2410 | init(Idxs, NameStr); |
2411 | } |
2412 | |
2413 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2414 | ArrayRef<unsigned> Idxs, |
2415 | const Twine &NameStr, |
2416 | BasicBlock *InsertAtEnd) |
2417 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2418 | ExtractValue, Agg, InsertAtEnd) { |
2419 | init(Idxs, NameStr); |
2420 | } |
2421 | |
2422 | //===----------------------------------------------------------------------===// |
2423 | // InsertValueInst Class |
2424 | //===----------------------------------------------------------------------===// |
2425 | |
2426 | /// This instruction inserts a struct field of array element |
2427 | /// value into an aggregate value. |
2428 | /// |
2429 | class InsertValueInst : public Instruction { |
2430 | SmallVector<unsigned, 4> Indices; |
2431 | |
2432 | InsertValueInst(const InsertValueInst &IVI); |
2433 | |
2434 | /// Constructors - Create a insertvalue instruction with a base aggregate |
2435 | /// value, a value to insert, and a list of indices. The first ctor can |
2436 | /// optionally insert before an existing instruction, the second appends |
2437 | /// the new instruction to the specified BasicBlock. |
2438 | inline InsertValueInst(Value *Agg, Value *Val, |
2439 | ArrayRef<unsigned> Idxs, |
2440 | const Twine &NameStr, |
2441 | Instruction *InsertBefore); |
2442 | inline InsertValueInst(Value *Agg, Value *Val, |
2443 | ArrayRef<unsigned> Idxs, |
2444 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2445 | |
2446 | /// Constructors - These two constructors are convenience methods because one |
2447 | /// and two index insertvalue instructions are so common. |
2448 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
2449 | const Twine &NameStr = "", |
2450 | Instruction *InsertBefore = nullptr); |
2451 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
2452 | BasicBlock *InsertAtEnd); |
2453 | |
2454 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2455 | const Twine &NameStr); |
2456 | |
2457 | protected: |
2458 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2459 | friend class Instruction; |
2460 | |
2461 | InsertValueInst *cloneImpl() const; |
2462 | |
2463 | public: |
2464 | // allocate space for exactly two operands |
2465 | void *operator new(size_t s) { |
2466 | return User::operator new(s, 2); |
2467 | } |
2468 | |
2469 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2470 | ArrayRef<unsigned> Idxs, |
2471 | const Twine &NameStr = "", |
2472 | Instruction *InsertBefore = nullptr) { |
2473 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
2474 | } |
2475 | |
2476 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2477 | ArrayRef<unsigned> Idxs, |
2478 | const Twine &NameStr, |
2479 | BasicBlock *InsertAtEnd) { |
2480 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
2481 | } |
2482 | |
2483 | /// Transparently provide more efficient getOperand methods. |
2484 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2485 | |
2486 | using idx_iterator = const unsigned*; |
2487 | |
2488 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2489 | inline idx_iterator idx_end() const { return Indices.end(); } |
2490 | inline iterator_range<idx_iterator> indices() const { |
2491 | return make_range(idx_begin(), idx_end()); |
2492 | } |
2493 | |
2494 | Value *getAggregateOperand() { |
2495 | return getOperand(0); |
2496 | } |
2497 | const Value *getAggregateOperand() const { |
2498 | return getOperand(0); |
2499 | } |
2500 | static unsigned getAggregateOperandIndex() { |
2501 | return 0U; // get index for modifying correct operand |
2502 | } |
2503 | |
2504 | Value *getInsertedValueOperand() { |
2505 | return getOperand(1); |
2506 | } |
2507 | const Value *getInsertedValueOperand() const { |
2508 | return getOperand(1); |
2509 | } |
2510 | static unsigned getInsertedValueOperandIndex() { |
2511 | return 1U; // get index for modifying correct operand |
2512 | } |
2513 | |
2514 | ArrayRef<unsigned> getIndices() const { |
2515 | return Indices; |
2516 | } |
2517 | |
2518 | unsigned getNumIndices() const { |
2519 | return (unsigned)Indices.size(); |
2520 | } |
2521 | |
2522 | bool hasIndices() const { |
2523 | return true; |
2524 | } |
2525 | |
2526 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2527 | static bool classof(const Instruction *I) { |
2528 | return I->getOpcode() == Instruction::InsertValue; |
2529 | } |
2530 | static bool classof(const Value *V) { |
2531 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2532 | } |
2533 | }; |
2534 | |
2535 | template <> |
2536 | struct OperandTraits<InsertValueInst> : |
2537 | public FixedNumOperandTraits<InsertValueInst, 2> { |
2538 | }; |
2539 | |
2540 | InsertValueInst::InsertValueInst(Value *Agg, |
2541 | Value *Val, |
2542 | ArrayRef<unsigned> Idxs, |
2543 | const Twine &NameStr, |
2544 | Instruction *InsertBefore) |
2545 | : Instruction(Agg->getType(), InsertValue, |
2546 | OperandTraits<InsertValueInst>::op_begin(this), |
2547 | 2, InsertBefore) { |
2548 | init(Agg, Val, Idxs, NameStr); |
2549 | } |
2550 | |
2551 | InsertValueInst::InsertValueInst(Value *Agg, |
2552 | Value *Val, |
2553 | ArrayRef<unsigned> Idxs, |
2554 | const Twine &NameStr, |
2555 | BasicBlock *InsertAtEnd) |
2556 | : Instruction(Agg->getType(), InsertValue, |
2557 | OperandTraits<InsertValueInst>::op_begin(this), |
2558 | 2, InsertAtEnd) { |
2559 | init(Agg, Val, Idxs, NameStr); |
2560 | } |
2561 | |
2562 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2562, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<InsertValueInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2562, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertValueInst::getNumOperands() const { return OperandTraits <InsertValueInst>::operands(this); } template <int Idx_nocapture > Use &InsertValueInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2563 | |
2564 | //===----------------------------------------------------------------------===// |
2565 | // PHINode Class |
2566 | //===----------------------------------------------------------------------===// |
2567 | |
2568 | // PHINode - The PHINode class is used to represent the magical mystical PHI |
2569 | // node, that can not exist in nature, but can be synthesized in a computer |
2570 | // scientist's overactive imagination. |
2571 | // |
2572 | class PHINode : public Instruction { |
2573 | /// The number of operands actually allocated. NumOperands is |
2574 | /// the number actually in use. |
2575 | unsigned ReservedSpace; |
2576 | |
2577 | PHINode(const PHINode &PN); |
2578 | |
2579 | explicit PHINode(Type *Ty, unsigned NumReservedValues, |
2580 | const Twine &NameStr = "", |
2581 | Instruction *InsertBefore = nullptr) |
2582 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), |
2583 | ReservedSpace(NumReservedValues) { |
2584 | setName(NameStr); |
2585 | allocHungoffUses(ReservedSpace); |
2586 | } |
2587 | |
2588 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, |
2589 | BasicBlock *InsertAtEnd) |
2590 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), |
2591 | ReservedSpace(NumReservedValues) { |
2592 | setName(NameStr); |
2593 | allocHungoffUses(ReservedSpace); |
2594 | } |
2595 | |
2596 | protected: |
2597 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2598 | friend class Instruction; |
2599 | |
2600 | PHINode *cloneImpl() const; |
2601 | |
2602 | // allocHungoffUses - this is more complicated than the generic |
2603 | // User::allocHungoffUses, because we have to allocate Uses for the incoming |
2604 | // values and pointers to the incoming blocks, all in one allocation. |
2605 | void allocHungoffUses(unsigned N) { |
2606 | User::allocHungoffUses(N, /* IsPhi */ true); |
2607 | } |
2608 | |
2609 | public: |
2610 | /// Constructors - NumReservedValues is a hint for the number of incoming |
2611 | /// edges that this phi node will have (use 0 if you really have no idea). |
2612 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2613 | const Twine &NameStr = "", |
2614 | Instruction *InsertBefore = nullptr) { |
2615 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); |
2616 | } |
2617 | |
2618 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2619 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
2620 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); |
2621 | } |
2622 | |
2623 | /// Provide fast operand accessors |
2624 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2625 | |
2626 | // Block iterator interface. This provides access to the list of incoming |
2627 | // basic blocks, which parallels the list of incoming values. |
2628 | |
2629 | using block_iterator = BasicBlock **; |
2630 | using const_block_iterator = BasicBlock * const *; |
2631 | |
2632 | block_iterator block_begin() { |
2633 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
2634 | } |
2635 | |
2636 | const_block_iterator block_begin() const { |
2637 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
2638 | } |
2639 | |
2640 | block_iterator block_end() { |
2641 | return block_begin() + getNumOperands(); |
2642 | } |
2643 | |
2644 | const_block_iterator block_end() const { |
2645 | return block_begin() + getNumOperands(); |
2646 | } |
2647 | |
2648 | iterator_range<block_iterator> blocks() { |
2649 | return make_range(block_begin(), block_end()); |
2650 | } |
2651 | |
2652 | iterator_range<const_block_iterator> blocks() const { |
2653 | return make_range(block_begin(), block_end()); |
2654 | } |
2655 | |
2656 | op_range incoming_values() { return operands(); } |
2657 | |
2658 | const_op_range incoming_values() const { return operands(); } |
2659 | |
2660 | /// Return the number of incoming edges |
2661 | /// |
2662 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
2663 | |
2664 | /// Return incoming value number x |
2665 | /// |
2666 | Value *getIncomingValue(unsigned i) const { |
2667 | return getOperand(i); |
2668 | } |
2669 | void setIncomingValue(unsigned i, Value *V) { |
2670 | assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast< void> (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2670, __PRETTY_FUNCTION__)); |
2671 | assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2672, __PRETTY_FUNCTION__)) |
2672 | "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2672, __PRETTY_FUNCTION__)); |
2673 | setOperand(i, V); |
2674 | } |
2675 | |
2676 | static unsigned getOperandNumForIncomingValue(unsigned i) { |
2677 | return i; |
2678 | } |
2679 | |
2680 | static unsigned getIncomingValueNumForOperand(unsigned i) { |
2681 | return i; |
2682 | } |
2683 | |
2684 | /// Return incoming basic block number @p i. |
2685 | /// |
2686 | BasicBlock *getIncomingBlock(unsigned i) const { |
2687 | return block_begin()[i]; |
2688 | } |
2689 | |
2690 | /// Return incoming basic block corresponding |
2691 | /// to an operand of the PHI. |
2692 | /// |
2693 | BasicBlock *getIncomingBlock(const Use &U) const { |
2694 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2694, __PRETTY_FUNCTION__)); |
2695 | return getIncomingBlock(unsigned(&U - op_begin())); |
2696 | } |
2697 | |
2698 | /// Return incoming basic block corresponding |
2699 | /// to value use iterator. |
2700 | /// |
2701 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { |
2702 | return getIncomingBlock(I.getUse()); |
2703 | } |
2704 | |
2705 | void setIncomingBlock(unsigned i, BasicBlock *BB) { |
2706 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2706, __PRETTY_FUNCTION__)); |
2707 | block_begin()[i] = BB; |
2708 | } |
2709 | |
2710 | /// Replace every incoming basic block \p Old to basic block \p New. |
2711 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { |
2712 | assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!" ) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2712, __PRETTY_FUNCTION__)); |
2713 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2714 | if (getIncomingBlock(Op) == Old) |
2715 | setIncomingBlock(Op, New); |
2716 | } |
2717 | |
2718 | /// Add an incoming value to the end of the PHI list |
2719 | /// |
2720 | void addIncoming(Value *V, BasicBlock *BB) { |
2721 | if (getNumOperands() == ReservedSpace) |
2722 | growOperands(); // Get more space! |
2723 | // Initialize some new operands. |
2724 | setNumHungOffUseOperands(getNumOperands() + 1); |
2725 | setIncomingValue(getNumOperands() - 1, V); |
2726 | setIncomingBlock(getNumOperands() - 1, BB); |
2727 | } |
2728 | |
2729 | /// Remove an incoming value. This is useful if a |
2730 | /// predecessor basic block is deleted. The value removed is returned. |
2731 | /// |
2732 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty |
2733 | /// is true), the PHI node is destroyed and any uses of it are replaced with |
2734 | /// dummy values. The only time there should be zero incoming values to a PHI |
2735 | /// node is when the block is dead, so this strategy is sound. |
2736 | /// |
2737 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); |
2738 | |
2739 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { |
2740 | int Idx = getBasicBlockIndex(BB); |
2741 | assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!" ) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2741, __PRETTY_FUNCTION__)); |
2742 | return removeIncomingValue(Idx, DeletePHIIfEmpty); |
2743 | } |
2744 | |
2745 | /// Return the first index of the specified basic |
2746 | /// block in the value list for this PHI. Returns -1 if no instance. |
2747 | /// |
2748 | int getBasicBlockIndex(const BasicBlock *BB) const { |
2749 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
2750 | if (block_begin()[i] == BB) |
2751 | return i; |
2752 | return -1; |
2753 | } |
2754 | |
2755 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
2756 | int Idx = getBasicBlockIndex(BB); |
2757 | assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast <void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2757, __PRETTY_FUNCTION__)); |
2758 | return getIncomingValue(Idx); |
2759 | } |
2760 | |
2761 | /// Set every incoming value(s) for block \p BB to \p V. |
2762 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { |
2763 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2763, __PRETTY_FUNCTION__)); |
2764 | bool Found = false; |
2765 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2766 | if (getIncomingBlock(Op) == BB) { |
2767 | Found = true; |
2768 | setIncomingValue(Op, V); |
2769 | } |
2770 | (void)Found; |
2771 | assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast <void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2771, __PRETTY_FUNCTION__)); |
2772 | } |
2773 | |
2774 | /// If the specified PHI node always merges together the |
2775 | /// same value, return the value, otherwise return null. |
2776 | Value *hasConstantValue() const; |
2777 | |
2778 | /// Whether the specified PHI node always merges |
2779 | /// together the same value, assuming undefs are equal to a unique |
2780 | /// non-undef value. |
2781 | bool hasConstantOrUndefValue() const; |
2782 | |
2783 | /// If the PHI node is complete which means all of its parent's predecessors |
2784 | /// have incoming value in this PHI, return true, otherwise return false. |
2785 | bool isComplete() const { |
2786 | return llvm::all_of(predecessors(getParent()), |
2787 | [this](const BasicBlock *Pred) { |
2788 | return getBasicBlockIndex(Pred) >= 0; |
2789 | }); |
2790 | } |
2791 | |
2792 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
2793 | static bool classof(const Instruction *I) { |
2794 | return I->getOpcode() == Instruction::PHI; |
2795 | } |
2796 | static bool classof(const Value *V) { |
2797 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2798 | } |
2799 | |
2800 | private: |
2801 | void growOperands(); |
2802 | }; |
2803 | |
2804 | template <> |
2805 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { |
2806 | }; |
2807 | |
2808 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { ((i_nocapture < OperandTraits<PHINode>::operands (this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2808, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<PHINode>::op_begin(const_cast<PHINode *>(this))[i_nocapture].get()); } void PHINode::setOperand( unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2808, __PRETTY_FUNCTION__)); OperandTraits<PHINode>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode ::getNumOperands() const { return OperandTraits<PHINode> ::operands(this); } template <int Idx_nocapture> Use & PHINode::Op() { return this->OpFrom<Idx_nocapture>(this ); } template <int Idx_nocapture> const Use &PHINode ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2809 | |
2810 | //===----------------------------------------------------------------------===// |
2811 | // LandingPadInst Class |
2812 | //===----------------------------------------------------------------------===// |
2813 | |
2814 | //===--------------------------------------------------------------------------- |
2815 | /// The landingpad instruction holds all of the information |
2816 | /// necessary to generate correct exception handling. The landingpad instruction |
2817 | /// cannot be moved from the top of a landing pad block, which itself is |
2818 | /// accessible only from the 'unwind' edge of an invoke. This uses the |
2819 | /// SubclassData field in Value to store whether or not the landingpad is a |
2820 | /// cleanup. |
2821 | /// |
2822 | class LandingPadInst : public Instruction { |
2823 | using CleanupField = BoolBitfieldElementT<0>; |
2824 | |
2825 | /// The number of operands actually allocated. NumOperands is |
2826 | /// the number actually in use. |
2827 | unsigned ReservedSpace; |
2828 | |
2829 | LandingPadInst(const LandingPadInst &LP); |
2830 | |
2831 | public: |
2832 | enum ClauseType { Catch, Filter }; |
2833 | |
2834 | private: |
2835 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2836 | const Twine &NameStr, Instruction *InsertBefore); |
2837 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2838 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2839 | |
2840 | // Allocate space for exactly zero operands. |
2841 | void *operator new(size_t s) { |
2842 | return User::operator new(s); |
2843 | } |
2844 | |
2845 | void growOperands(unsigned Size); |
2846 | void init(unsigned NumReservedValues, const Twine &NameStr); |
2847 | |
2848 | protected: |
2849 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2850 | friend class Instruction; |
2851 | |
2852 | LandingPadInst *cloneImpl() const; |
2853 | |
2854 | public: |
2855 | /// Constructors - NumReservedClauses is a hint for the number of incoming |
2856 | /// clauses that this landingpad will have (use 0 if you really have no idea). |
2857 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2858 | const Twine &NameStr = "", |
2859 | Instruction *InsertBefore = nullptr); |
2860 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2861 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2862 | |
2863 | /// Provide fast operand accessors |
2864 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2865 | |
2866 | /// Return 'true' if this landingpad instruction is a |
2867 | /// cleanup. I.e., it should be run when unwinding even if its landing pad |
2868 | /// doesn't catch the exception. |
2869 | bool isCleanup() const { return getSubclassData<CleanupField>(); } |
2870 | |
2871 | /// Indicate that this landingpad instruction is a cleanup. |
2872 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } |
2873 | |
2874 | /// Add a catch or filter clause to the landing pad. |
2875 | void addClause(Constant *ClauseVal); |
2876 | |
2877 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to |
2878 | /// determine what type of clause this is. |
2879 | Constant *getClause(unsigned Idx) const { |
2880 | return cast<Constant>(getOperandList()[Idx]); |
2881 | } |
2882 | |
2883 | /// Return 'true' if the clause and index Idx is a catch clause. |
2884 | bool isCatch(unsigned Idx) const { |
2885 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); |
2886 | } |
2887 | |
2888 | /// Return 'true' if the clause and index Idx is a filter clause. |
2889 | bool isFilter(unsigned Idx) const { |
2890 | return isa<ArrayType>(getOperandList()[Idx]->getType()); |
2891 | } |
2892 | |
2893 | /// Get the number of clauses for this landing pad. |
2894 | unsigned getNumClauses() const { return getNumOperands(); } |
2895 | |
2896 | /// Grow the size of the operand list to accommodate the new |
2897 | /// number of clauses. |
2898 | void reserveClauses(unsigned Size) { growOperands(Size); } |
2899 | |
2900 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2901 | static bool classof(const Instruction *I) { |
2902 | return I->getOpcode() == Instruction::LandingPad; |
2903 | } |
2904 | static bool classof(const Value *V) { |
2905 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2906 | } |
2907 | }; |
2908 | |
2909 | template <> |
2910 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { |
2911 | }; |
2912 | |
2913 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2913, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2913, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits <LandingPadInst>::operands(this); } template <int Idx_nocapture > Use &LandingPadInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2914 | |
2915 | //===----------------------------------------------------------------------===// |
2916 | // ReturnInst Class |
2917 | //===----------------------------------------------------------------------===// |
2918 | |
2919 | //===--------------------------------------------------------------------------- |
2920 | /// Return a value (possibly void), from a function. Execution |
2921 | /// does not continue in this function any longer. |
2922 | /// |
2923 | class ReturnInst : public Instruction { |
2924 | ReturnInst(const ReturnInst &RI); |
2925 | |
2926 | private: |
2927 | // ReturnInst constructors: |
2928 | // ReturnInst() - 'ret void' instruction |
2929 | // ReturnInst( null) - 'ret void' instruction |
2930 | // ReturnInst(Value* X) - 'ret X' instruction |
2931 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I |
2932 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I |
2933 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B |
2934 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B |
2935 | // |
2936 | // NOTE: If the Value* passed is of type void then the constructor behaves as |
2937 | // if it was passed NULL. |
2938 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, |
2939 | Instruction *InsertBefore = nullptr); |
2940 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); |
2941 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
2942 | |
2943 | protected: |
2944 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2945 | friend class Instruction; |
2946 | |
2947 | ReturnInst *cloneImpl() const; |
2948 | |
2949 | public: |
2950 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, |
2951 | Instruction *InsertBefore = nullptr) { |
2952 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); |
2953 | } |
2954 | |
2955 | static ReturnInst* Create(LLVMContext &C, Value *retVal, |
2956 | BasicBlock *InsertAtEnd) { |
2957 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); |
2958 | } |
2959 | |
2960 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { |
2961 | return new(0) ReturnInst(C, InsertAtEnd); |
2962 | } |
2963 | |
2964 | /// Provide fast operand accessors |
2965 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2966 | |
2967 | /// Convenience accessor. Returns null if there is no return value. |
2968 | Value *getReturnValue() const { |
2969 | return getNumOperands() != 0 ? getOperand(0) : nullptr; |
2970 | } |
2971 | |
2972 | unsigned getNumSuccessors() const { return 0; } |
2973 | |
2974 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2975 | static bool classof(const Instruction *I) { |
2976 | return (I->getOpcode() == Instruction::Ret); |
2977 | } |
2978 | static bool classof(const Value *V) { |
2979 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2980 | } |
2981 | |
2982 | private: |
2983 | BasicBlock *getSuccessor(unsigned idx) const { |
2984 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2984); |
2985 | } |
2986 | |
2987 | void setSuccessor(unsigned idx, BasicBlock *B) { |
2988 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2988); |
2989 | } |
2990 | }; |
2991 | |
2992 | template <> |
2993 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { |
2994 | }; |
2995 | |
2996 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2996, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst *>(this))[i_nocapture].get()); } void ReturnInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 2996, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst ::getNumOperands() const { return OperandTraits<ReturnInst >::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ReturnInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
2997 | |
2998 | //===----------------------------------------------------------------------===// |
2999 | // BranchInst Class |
3000 | //===----------------------------------------------------------------------===// |
3001 | |
3002 | //===--------------------------------------------------------------------------- |
3003 | /// Conditional or Unconditional Branch instruction. |
3004 | /// |
3005 | class BranchInst : public Instruction { |
3006 | /// Ops list - Branches are strange. The operands are ordered: |
3007 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because |
3008 | /// they don't have to check for cond/uncond branchness. These are mostly |
3009 | /// accessed relative from op_end(). |
3010 | BranchInst(const BranchInst &BI); |
3011 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): |
3012 | // BranchInst(BB *B) - 'br B' |
3013 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' |
3014 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I |
3015 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I |
3016 | // BranchInst(BB* B, BB *I) - 'br B' insert at end |
3017 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end |
3018 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); |
3019 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3020 | Instruction *InsertBefore = nullptr); |
3021 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); |
3022 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3023 | BasicBlock *InsertAtEnd); |
3024 | |
3025 | void AssertOK(); |
3026 | |
3027 | protected: |
3028 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3029 | friend class Instruction; |
3030 | |
3031 | BranchInst *cloneImpl() const; |
3032 | |
3033 | public: |
3034 | /// Iterator type that casts an operand to a basic block. |
3035 | /// |
3036 | /// This only makes sense because the successors are stored as adjacent |
3037 | /// operands for branch instructions. |
3038 | struct succ_op_iterator |
3039 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3040 | std::random_access_iterator_tag, BasicBlock *, |
3041 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3042 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3043 | |
3044 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3045 | BasicBlock *operator->() const { return operator*(); } |
3046 | }; |
3047 | |
3048 | /// The const version of `succ_op_iterator`. |
3049 | struct const_succ_op_iterator |
3050 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3051 | std::random_access_iterator_tag, |
3052 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3053 | const BasicBlock *> { |
3054 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3055 | : iterator_adaptor_base(I) {} |
3056 | |
3057 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3058 | const BasicBlock *operator->() const { return operator*(); } |
3059 | }; |
3060 | |
3061 | static BranchInst *Create(BasicBlock *IfTrue, |
3062 | Instruction *InsertBefore = nullptr) { |
3063 | return new(1) BranchInst(IfTrue, InsertBefore); |
3064 | } |
3065 | |
3066 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3067 | Value *Cond, Instruction *InsertBefore = nullptr) { |
3068 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); |
3069 | } |
3070 | |
3071 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { |
3072 | return new(1) BranchInst(IfTrue, InsertAtEnd); |
3073 | } |
3074 | |
3075 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3076 | Value *Cond, BasicBlock *InsertAtEnd) { |
3077 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); |
3078 | } |
3079 | |
3080 | /// Transparently provide more efficient getOperand methods. |
3081 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3082 | |
3083 | bool isUnconditional() const { return getNumOperands() == 1; } |
3084 | bool isConditional() const { return getNumOperands() == 3; } |
3085 | |
3086 | Value *getCondition() const { |
3087 | assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3087, __PRETTY_FUNCTION__)); |
3088 | return Op<-3>(); |
3089 | } |
3090 | |
3091 | void setCondition(Value *V) { |
3092 | assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3092, __PRETTY_FUNCTION__)); |
3093 | Op<-3>() = V; |
3094 | } |
3095 | |
3096 | unsigned getNumSuccessors() const { return 1+isConditional(); } |
3097 | |
3098 | BasicBlock *getSuccessor(unsigned i) const { |
3099 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3099, __PRETTY_FUNCTION__)); |
3100 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); |
3101 | } |
3102 | |
3103 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3104 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3104, __PRETTY_FUNCTION__)); |
3105 | *(&Op<-1>() - idx) = NewSucc; |
3106 | } |
3107 | |
3108 | /// Swap the successors of this branch instruction. |
3109 | /// |
3110 | /// Swaps the successors of the branch instruction. This also swaps any |
3111 | /// branch weight metadata associated with the instruction so that it |
3112 | /// continues to map correctly to each operand. |
3113 | void swapSuccessors(); |
3114 | |
3115 | iterator_range<succ_op_iterator> successors() { |
3116 | return make_range( |
3117 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3118 | succ_op_iterator(value_op_end())); |
3119 | } |
3120 | |
3121 | iterator_range<const_succ_op_iterator> successors() const { |
3122 | return make_range(const_succ_op_iterator( |
3123 | std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3124 | const_succ_op_iterator(value_op_end())); |
3125 | } |
3126 | |
3127 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3128 | static bool classof(const Instruction *I) { |
3129 | return (I->getOpcode() == Instruction::Br); |
3130 | } |
3131 | static bool classof(const Value *V) { |
3132 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3133 | } |
3134 | }; |
3135 | |
3136 | template <> |
3137 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { |
3138 | }; |
3139 | |
3140 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3140, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst *>(this))[i_nocapture].get()); } void BranchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3140, __PRETTY_FUNCTION__)); OperandTraits<BranchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst ::getNumOperands() const { return OperandTraits<BranchInst >::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & BranchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3141 | |
3142 | //===----------------------------------------------------------------------===// |
3143 | // SwitchInst Class |
3144 | //===----------------------------------------------------------------------===// |
3145 | |
3146 | //===--------------------------------------------------------------------------- |
3147 | /// Multiway switch |
3148 | /// |
3149 | class SwitchInst : public Instruction { |
3150 | unsigned ReservedSpace; |
3151 | |
3152 | // Operand[0] = Value to switch on |
3153 | // Operand[1] = Default basic block destination |
3154 | // Operand[2n ] = Value to match |
3155 | // Operand[2n+1] = BasicBlock to go to on match |
3156 | SwitchInst(const SwitchInst &SI); |
3157 | |
3158 | /// Create a new switch instruction, specifying a value to switch on and a |
3159 | /// default destination. The number of additional cases can be specified here |
3160 | /// to make memory allocation more efficient. This constructor can also |
3161 | /// auto-insert before another instruction. |
3162 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3163 | Instruction *InsertBefore); |
3164 | |
3165 | /// Create a new switch instruction, specifying a value to switch on and a |
3166 | /// default destination. The number of additional cases can be specified here |
3167 | /// to make memory allocation more efficient. This constructor also |
3168 | /// auto-inserts at the end of the specified BasicBlock. |
3169 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3170 | BasicBlock *InsertAtEnd); |
3171 | |
3172 | // allocate space for exactly zero operands |
3173 | void *operator new(size_t s) { |
3174 | return User::operator new(s); |
3175 | } |
3176 | |
3177 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); |
3178 | void growOperands(); |
3179 | |
3180 | protected: |
3181 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3182 | friend class Instruction; |
3183 | |
3184 | SwitchInst *cloneImpl() const; |
3185 | |
3186 | public: |
3187 | // -2 |
3188 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); |
3189 | |
3190 | template <typename CaseHandleT> class CaseIteratorImpl; |
3191 | |
3192 | /// A handle to a particular switch case. It exposes a convenient interface |
3193 | /// to both the case value and the successor block. |
3194 | /// |
3195 | /// We define this as a template and instantiate it to form both a const and |
3196 | /// non-const handle. |
3197 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> |
3198 | class CaseHandleImpl { |
3199 | // Directly befriend both const and non-const iterators. |
3200 | friend class SwitchInst::CaseIteratorImpl< |
3201 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; |
3202 | |
3203 | protected: |
3204 | // Expose the switch type we're parameterized with to the iterator. |
3205 | using SwitchInstType = SwitchInstT; |
3206 | |
3207 | SwitchInstT *SI; |
3208 | ptrdiff_t Index; |
3209 | |
3210 | CaseHandleImpl() = default; |
3211 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} |
3212 | |
3213 | public: |
3214 | /// Resolves case value for current case. |
3215 | ConstantIntT *getCaseValue() const { |
3216 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3217, __PRETTY_FUNCTION__)) |
3217 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3217, __PRETTY_FUNCTION__)); |
3218 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); |
3219 | } |
3220 | |
3221 | /// Resolves successor for current case. |
3222 | BasicBlockT *getCaseSuccessor() const { |
3223 | assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)) |
3224 | (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)) |
3225 | "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)); |
3226 | return SI->getSuccessor(getSuccessorIndex()); |
3227 | } |
3228 | |
3229 | /// Returns number of current case. |
3230 | unsigned getCaseIndex() const { return Index; } |
3231 | |
3232 | /// Returns successor index for current case successor. |
3233 | unsigned getSuccessorIndex() const { |
3234 | assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)) |
3235 | (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)) |
3236 | "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)); |
3237 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; |
3238 | } |
3239 | |
3240 | bool operator==(const CaseHandleImpl &RHS) const { |
3241 | assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast <void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3241, __PRETTY_FUNCTION__)); |
3242 | return Index == RHS.Index; |
3243 | } |
3244 | }; |
3245 | |
3246 | using ConstCaseHandle = |
3247 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; |
3248 | |
3249 | class CaseHandle |
3250 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { |
3251 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; |
3252 | |
3253 | public: |
3254 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} |
3255 | |
3256 | /// Sets the new value for current case. |
3257 | void setValue(ConstantInt *V) { |
3258 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3259, __PRETTY_FUNCTION__)) |
3259 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3259, __PRETTY_FUNCTION__)); |
3260 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); |
3261 | } |
3262 | |
3263 | /// Sets the new successor for current case. |
3264 | void setSuccessor(BasicBlock *S) { |
3265 | SI->setSuccessor(getSuccessorIndex(), S); |
3266 | } |
3267 | }; |
3268 | |
3269 | template <typename CaseHandleT> |
3270 | class CaseIteratorImpl |
3271 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, |
3272 | std::random_access_iterator_tag, |
3273 | CaseHandleT> { |
3274 | using SwitchInstT = typename CaseHandleT::SwitchInstType; |
3275 | |
3276 | CaseHandleT Case; |
3277 | |
3278 | public: |
3279 | /// Default constructed iterator is in an invalid state until assigned to |
3280 | /// a case for a particular switch. |
3281 | CaseIteratorImpl() = default; |
3282 | |
3283 | /// Initializes case iterator for given SwitchInst and for given |
3284 | /// case number. |
3285 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} |
3286 | |
3287 | /// Initializes case iterator for given SwitchInst and for given |
3288 | /// successor index. |
3289 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, |
3290 | unsigned SuccessorIndex) { |
3291 | assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3292, __PRETTY_FUNCTION__)) |
3292 | "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3292, __PRETTY_FUNCTION__)); |
3293 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) |
3294 | : CaseIteratorImpl(SI, DefaultPseudoIndex); |
3295 | } |
3296 | |
3297 | /// Support converting to the const variant. This will be a no-op for const |
3298 | /// variant. |
3299 | operator CaseIteratorImpl<ConstCaseHandle>() const { |
3300 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); |
3301 | } |
3302 | |
3303 | CaseIteratorImpl &operator+=(ptrdiff_t N) { |
3304 | // Check index correctness after addition. |
3305 | // Note: Index == getNumCases() means end(). |
3306 | assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)) |
3307 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)) |
3308 | "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)); |
3309 | Case.Index += N; |
3310 | return *this; |
3311 | } |
3312 | CaseIteratorImpl &operator-=(ptrdiff_t N) { |
3313 | // Check index correctness after subtraction. |
3314 | // Note: Case.Index == getNumCases() means end(). |
3315 | assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)) |
3316 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)) |
3317 | "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)); |
3318 | Case.Index -= N; |
3319 | return *this; |
3320 | } |
3321 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { |
3322 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3322, __PRETTY_FUNCTION__)); |
3323 | return Case.Index - RHS.Case.Index; |
3324 | } |
3325 | bool operator==(const CaseIteratorImpl &RHS) const { |
3326 | return Case == RHS.Case; |
3327 | } |
3328 | bool operator<(const CaseIteratorImpl &RHS) const { |
3329 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3329, __PRETTY_FUNCTION__)); |
3330 | return Case.Index < RHS.Case.Index; |
3331 | } |
3332 | CaseHandleT &operator*() { return Case; } |
3333 | const CaseHandleT &operator*() const { return Case; } |
3334 | }; |
3335 | |
3336 | using CaseIt = CaseIteratorImpl<CaseHandle>; |
3337 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; |
3338 | |
3339 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3340 | unsigned NumCases, |
3341 | Instruction *InsertBefore = nullptr) { |
3342 | return new SwitchInst(Value, Default, NumCases, InsertBefore); |
3343 | } |
3344 | |
3345 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3346 | unsigned NumCases, BasicBlock *InsertAtEnd) { |
3347 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); |
3348 | } |
3349 | |
3350 | /// Provide fast operand accessors |
3351 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3352 | |
3353 | // Accessor Methods for Switch stmt |
3354 | Value *getCondition() const { return getOperand(0); } |
3355 | void setCondition(Value *V) { setOperand(0, V); } |
3356 | |
3357 | BasicBlock *getDefaultDest() const { |
3358 | return cast<BasicBlock>(getOperand(1)); |
3359 | } |
3360 | |
3361 | void setDefaultDest(BasicBlock *DefaultCase) { |
3362 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); |
3363 | } |
3364 | |
3365 | /// Return the number of 'cases' in this switch instruction, excluding the |
3366 | /// default case. |
3367 | unsigned getNumCases() const { |
3368 | return getNumOperands()/2 - 1; |
3369 | } |
3370 | |
3371 | /// Returns a read/write iterator that points to the first case in the |
3372 | /// SwitchInst. |
3373 | CaseIt case_begin() { |
3374 | return CaseIt(this, 0); |
3375 | } |
3376 | |
3377 | /// Returns a read-only iterator that points to the first case in the |
3378 | /// SwitchInst. |
3379 | ConstCaseIt case_begin() const { |
3380 | return ConstCaseIt(this, 0); |
3381 | } |
3382 | |
3383 | /// Returns a read/write iterator that points one past the last in the |
3384 | /// SwitchInst. |
3385 | CaseIt case_end() { |
3386 | return CaseIt(this, getNumCases()); |
3387 | } |
3388 | |
3389 | /// Returns a read-only iterator that points one past the last in the |
3390 | /// SwitchInst. |
3391 | ConstCaseIt case_end() const { |
3392 | return ConstCaseIt(this, getNumCases()); |
3393 | } |
3394 | |
3395 | /// Iteration adapter for range-for loops. |
3396 | iterator_range<CaseIt> cases() { |
3397 | return make_range(case_begin(), case_end()); |
3398 | } |
3399 | |
3400 | /// Constant iteration adapter for range-for loops. |
3401 | iterator_range<ConstCaseIt> cases() const { |
3402 | return make_range(case_begin(), case_end()); |
3403 | } |
3404 | |
3405 | /// Returns an iterator that points to the default case. |
3406 | /// Note: this iterator allows to resolve successor only. Attempt |
3407 | /// to resolve case value causes an assertion. |
3408 | /// Also note, that increment and decrement also causes an assertion and |
3409 | /// makes iterator invalid. |
3410 | CaseIt case_default() { |
3411 | return CaseIt(this, DefaultPseudoIndex); |
3412 | } |
3413 | ConstCaseIt case_default() const { |
3414 | return ConstCaseIt(this, DefaultPseudoIndex); |
3415 | } |
3416 | |
3417 | /// Search all of the case values for the specified constant. If it is |
3418 | /// explicitly handled, return the case iterator of it, otherwise return |
3419 | /// default case iterator to indicate that it is handled by the default |
3420 | /// handler. |
3421 | CaseIt findCaseValue(const ConstantInt *C) { |
3422 | CaseIt I = llvm::find_if( |
3423 | cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); |
3424 | if (I != case_end()) |
3425 | return I; |
3426 | |
3427 | return case_default(); |
3428 | } |
3429 | ConstCaseIt findCaseValue(const ConstantInt *C) const { |
3430 | ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { |
3431 | return Case.getCaseValue() == C; |
3432 | }); |
3433 | if (I != case_end()) |
3434 | return I; |
3435 | |
3436 | return case_default(); |
3437 | } |
3438 | |
3439 | /// Finds the unique case value for a given successor. Returns null if the |
3440 | /// successor is not found, not unique, or is the default case. |
3441 | ConstantInt *findCaseDest(BasicBlock *BB) { |
3442 | if (BB == getDefaultDest()) |
3443 | return nullptr; |
3444 | |
3445 | ConstantInt *CI = nullptr; |
3446 | for (auto Case : cases()) { |
3447 | if (Case.getCaseSuccessor() != BB) |
3448 | continue; |
3449 | |
3450 | if (CI) |
3451 | return nullptr; // Multiple cases lead to BB. |
3452 | |
3453 | CI = Case.getCaseValue(); |
3454 | } |
3455 | |
3456 | return CI; |
3457 | } |
3458 | |
3459 | /// Add an entry to the switch instruction. |
3460 | /// Note: |
3461 | /// This action invalidates case_end(). Old case_end() iterator will |
3462 | /// point to the added case. |
3463 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); |
3464 | |
3465 | /// This method removes the specified case and its successor from the switch |
3466 | /// instruction. Note that this operation may reorder the remaining cases at |
3467 | /// index idx and above. |
3468 | /// Note: |
3469 | /// This action invalidates iterators for all cases following the one removed, |
3470 | /// including the case_end() iterator. It returns an iterator for the next |
3471 | /// case. |
3472 | CaseIt removeCase(CaseIt I); |
3473 | |
3474 | unsigned getNumSuccessors() const { return getNumOperands()/2; } |
3475 | BasicBlock *getSuccessor(unsigned idx) const { |
3476 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3476, __PRETTY_FUNCTION__)); |
3477 | return cast<BasicBlock>(getOperand(idx*2+1)); |
3478 | } |
3479 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3480 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3480, __PRETTY_FUNCTION__)); |
3481 | setOperand(idx * 2 + 1, NewSucc); |
3482 | } |
3483 | |
3484 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3485 | static bool classof(const Instruction *I) { |
3486 | return I->getOpcode() == Instruction::Switch; |
3487 | } |
3488 | static bool classof(const Value *V) { |
3489 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3490 | } |
3491 | }; |
3492 | |
3493 | /// A wrapper class to simplify modification of SwitchInst cases along with |
3494 | /// their prof branch_weights metadata. |
3495 | class SwitchInstProfUpdateWrapper { |
3496 | SwitchInst &SI; |
3497 | Optional<SmallVector<uint32_t, 8> > Weights = None; |
3498 | bool Changed = false; |
3499 | |
3500 | protected: |
3501 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); |
3502 | |
3503 | MDNode *buildProfBranchWeightsMD(); |
3504 | |
3505 | void init(); |
3506 | |
3507 | public: |
3508 | using CaseWeightOpt = Optional<uint32_t>; |
3509 | SwitchInst *operator->() { return &SI; } |
3510 | SwitchInst &operator*() { return SI; } |
3511 | operator SwitchInst *() { return &SI; } |
3512 | |
3513 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } |
3514 | |
3515 | ~SwitchInstProfUpdateWrapper() { |
3516 | if (Changed) |
3517 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); |
3518 | } |
3519 | |
3520 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove |
3521 | /// correspondent branch weight. |
3522 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); |
3523 | |
3524 | /// Delegate the call to the underlying SwitchInst::addCase() and set the |
3525 | /// specified branch weight for the added case. |
3526 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); |
3527 | |
3528 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark |
3529 | /// this object to not touch the underlying SwitchInst in destructor. |
3530 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
3531 | |
3532 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); |
3533 | CaseWeightOpt getSuccessorWeight(unsigned idx); |
3534 | |
3535 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); |
3536 | }; |
3537 | |
3538 | template <> |
3539 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { |
3540 | }; |
3541 | |
3542 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3542, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst *>(this))[i_nocapture].get()); } void SwitchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3542, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst ::getNumOperands() const { return OperandTraits<SwitchInst >::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SwitchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3543 | |
3544 | //===----------------------------------------------------------------------===// |
3545 | // IndirectBrInst Class |
3546 | //===----------------------------------------------------------------------===// |
3547 | |
3548 | //===--------------------------------------------------------------------------- |
3549 | /// Indirect Branch Instruction. |
3550 | /// |
3551 | class IndirectBrInst : public Instruction { |
3552 | unsigned ReservedSpace; |
3553 | |
3554 | // Operand[0] = Address to jump to |
3555 | // Operand[n+1] = n-th destination |
3556 | IndirectBrInst(const IndirectBrInst &IBI); |
3557 | |
3558 | /// Create a new indirectbr instruction, specifying an |
3559 | /// Address to jump to. The number of expected destinations can be specified |
3560 | /// here to make memory allocation more efficient. This constructor can also |
3561 | /// autoinsert before another instruction. |
3562 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); |
3563 | |
3564 | /// Create a new indirectbr instruction, specifying an |
3565 | /// Address to jump to. The number of expected destinations can be specified |
3566 | /// here to make memory allocation more efficient. This constructor also |
3567 | /// autoinserts at the end of the specified BasicBlock. |
3568 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); |
3569 | |
3570 | // allocate space for exactly zero operands |
3571 | void *operator new(size_t s) { |
3572 | return User::operator new(s); |
3573 | } |
3574 | |
3575 | void init(Value *Address, unsigned NumDests); |
3576 | void growOperands(); |
3577 | |
3578 | protected: |
3579 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3580 | friend class Instruction; |
3581 | |
3582 | IndirectBrInst *cloneImpl() const; |
3583 | |
3584 | public: |
3585 | /// Iterator type that casts an operand to a basic block. |
3586 | /// |
3587 | /// This only makes sense because the successors are stored as adjacent |
3588 | /// operands for indirectbr instructions. |
3589 | struct succ_op_iterator |
3590 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3591 | std::random_access_iterator_tag, BasicBlock *, |
3592 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3593 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3594 | |
3595 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3596 | BasicBlock *operator->() const { return operator*(); } |
3597 | }; |
3598 | |
3599 | /// The const version of `succ_op_iterator`. |
3600 | struct const_succ_op_iterator |
3601 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3602 | std::random_access_iterator_tag, |
3603 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3604 | const BasicBlock *> { |
3605 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3606 | : iterator_adaptor_base(I) {} |
3607 | |
3608 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3609 | const BasicBlock *operator->() const { return operator*(); } |
3610 | }; |
3611 | |
3612 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3613 | Instruction *InsertBefore = nullptr) { |
3614 | return new IndirectBrInst(Address, NumDests, InsertBefore); |
3615 | } |
3616 | |
3617 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3618 | BasicBlock *InsertAtEnd) { |
3619 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); |
3620 | } |
3621 | |
3622 | /// Provide fast operand accessors. |
3623 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3624 | |
3625 | // Accessor Methods for IndirectBrInst instruction. |
3626 | Value *getAddress() { return getOperand(0); } |
3627 | const Value *getAddress() const { return getOperand(0); } |
3628 | void setAddress(Value *V) { setOperand(0, V); } |
3629 | |
3630 | /// return the number of possible destinations in this |
3631 | /// indirectbr instruction. |
3632 | unsigned getNumDestinations() const { return getNumOperands()-1; } |
3633 | |
3634 | /// Return the specified destination. |
3635 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } |
3636 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } |
3637 | |
3638 | /// Add a destination. |
3639 | /// |
3640 | void addDestination(BasicBlock *Dest); |
3641 | |
3642 | /// This method removes the specified successor from the |
3643 | /// indirectbr instruction. |
3644 | void removeDestination(unsigned i); |
3645 | |
3646 | unsigned getNumSuccessors() const { return getNumOperands()-1; } |
3647 | BasicBlock *getSuccessor(unsigned i) const { |
3648 | return cast<BasicBlock>(getOperand(i+1)); |
3649 | } |
3650 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3651 | setOperand(i + 1, NewSucc); |
3652 | } |
3653 | |
3654 | iterator_range<succ_op_iterator> successors() { |
3655 | return make_range(succ_op_iterator(std::next(value_op_begin())), |
3656 | succ_op_iterator(value_op_end())); |
3657 | } |
3658 | |
3659 | iterator_range<const_succ_op_iterator> successors() const { |
3660 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), |
3661 | const_succ_op_iterator(value_op_end())); |
3662 | } |
3663 | |
3664 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3665 | static bool classof(const Instruction *I) { |
3666 | return I->getOpcode() == Instruction::IndirectBr; |
3667 | } |
3668 | static bool classof(const Value *V) { |
3669 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3670 | } |
3671 | }; |
3672 | |
3673 | template <> |
3674 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { |
3675 | }; |
3676 | |
3677 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3677, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3677, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits <IndirectBrInst>::operands(this); } template <int Idx_nocapture > Use &IndirectBrInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
3678 | |
3679 | //===----------------------------------------------------------------------===// |
3680 | // InvokeInst Class |
3681 | //===----------------------------------------------------------------------===// |
3682 | |
3683 | /// Invoke instruction. The SubclassData field is used to hold the |
3684 | /// calling convention of the call. |
3685 | /// |
3686 | class InvokeInst : public CallBase { |
3687 | /// The number of operands for this call beyond the called function, |
3688 | /// arguments, and operand bundles. |
3689 | static constexpr int NumExtraOperands = 2; |
3690 | |
3691 | /// The index from the end of the operand array to the normal destination. |
3692 | static constexpr int NormalDestOpEndIdx = -3; |
3693 | |
3694 | /// The index from the end of the operand array to the unwind destination. |
3695 | static constexpr int UnwindDestOpEndIdx = -2; |
3696 | |
3697 | InvokeInst(const InvokeInst &BI); |
3698 | |
3699 | /// Construct an InvokeInst given a range of arguments. |
3700 | /// |
3701 | /// Construct an InvokeInst from a range of arguments |
3702 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3703 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3704 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3705 | const Twine &NameStr, Instruction *InsertBefore); |
3706 | |
3707 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3708 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3709 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3710 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3711 | |
3712 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3713 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3714 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3715 | |
3716 | /// Compute the number of operands to allocate. |
3717 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
3718 | // We need one operand for the called function, plus our extra operands and |
3719 | // the input operand counts provided. |
3720 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; |
3721 | } |
3722 | |
3723 | protected: |
3724 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3725 | friend class Instruction; |
3726 | |
3727 | InvokeInst *cloneImpl() const; |
3728 | |
3729 | public: |
3730 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3731 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3732 | const Twine &NameStr, |
3733 | Instruction *InsertBefore = nullptr) { |
3734 | int NumOperands = ComputeNumOperands(Args.size()); |
3735 | return new (NumOperands) |
3736 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3737 | NameStr, InsertBefore); |
3738 | } |
3739 | |
3740 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3741 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3742 | ArrayRef<OperandBundleDef> Bundles = None, |
3743 | const Twine &NameStr = "", |
3744 | Instruction *InsertBefore = nullptr) { |
3745 | int NumOperands = |
3746 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3747 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3748 | |
3749 | return new (NumOperands, DescriptorBytes) |
3750 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3751 | NameStr, InsertBefore); |
3752 | } |
3753 | |
3754 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3755 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3756 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3757 | int NumOperands = ComputeNumOperands(Args.size()); |
3758 | return new (NumOperands) |
3759 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3760 | NameStr, InsertAtEnd); |
3761 | } |
3762 | |
3763 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3764 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3765 | ArrayRef<OperandBundleDef> Bundles, |
3766 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3767 | int NumOperands = |
3768 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3769 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3770 | |
3771 | return new (NumOperands, DescriptorBytes) |
3772 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3773 | NameStr, InsertAtEnd); |
3774 | } |
3775 | |
3776 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3777 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3778 | const Twine &NameStr, |
3779 | Instruction *InsertBefore = nullptr) { |
3780 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3781 | IfException, Args, None, NameStr, InsertBefore); |
3782 | } |
3783 | |
3784 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3785 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3786 | ArrayRef<OperandBundleDef> Bundles = None, |
3787 | const Twine &NameStr = "", |
3788 | Instruction *InsertBefore = nullptr) { |
3789 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3790 | IfException, Args, Bundles, NameStr, InsertBefore); |
3791 | } |
3792 | |
3793 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3794 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3795 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3796 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3797 | IfException, Args, NameStr, InsertAtEnd); |
3798 | } |
3799 | |
3800 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3801 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3802 | ArrayRef<OperandBundleDef> Bundles, |
3803 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3804 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3805 | IfException, Args, Bundles, NameStr, InsertAtEnd); |
3806 | } |
3807 | |
3808 | /// Create a clone of \p II with a different set of operand bundles and |
3809 | /// insert it before \p InsertPt. |
3810 | /// |
3811 | /// The returned invoke instruction is identical to \p II in every way except |
3812 | /// that the operand bundles for the new instruction are set to the operand |
3813 | /// bundles in \p Bundles. |
3814 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, |
3815 | Instruction *InsertPt = nullptr); |
3816 | |
3817 | // get*Dest - Return the destination basic blocks... |
3818 | BasicBlock *getNormalDest() const { |
3819 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); |
3820 | } |
3821 | BasicBlock *getUnwindDest() const { |
3822 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); |
3823 | } |
3824 | void setNormalDest(BasicBlock *B) { |
3825 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3826 | } |
3827 | void setUnwindDest(BasicBlock *B) { |
3828 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3829 | } |
3830 | |
3831 | /// Get the landingpad instruction from the landing pad |
3832 | /// block (the unwind destination). |
3833 | LandingPadInst *getLandingPadInst() const; |
3834 | |
3835 | BasicBlock *getSuccessor(unsigned i) const { |
3836 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3836, __PRETTY_FUNCTION__)); |
3837 | return i == 0 ? getNormalDest() : getUnwindDest(); |
3838 | } |
3839 | |
3840 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3841 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 3841, __PRETTY_FUNCTION__)); |
3842 | if (i == 0) |
3843 | setNormalDest(NewSucc); |
3844 | else |
3845 | setUnwindDest(NewSucc); |
3846 | } |
3847 | |
3848 | unsigned getNumSuccessors() const { return 2; } |
3849 | |
3850 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3851 | static bool classof(const Instruction *I) { |
3852 | return (I->getOpcode() == Instruction::Invoke); |
3853 | } |
3854 | static bool classof(const Value *V) { |
3855 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3856 | } |
3857 | |
3858 | private: |
3859 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
3860 | // method so that subclasses cannot accidentally use it. |
3861 | template <typename Bitfield> |
3862 | void setSubclassData(typename Bitfield::Type Value) { |
3863 | Instruction::setSubclassData<Bitfield>(Value); |
3864 | } |
3865 | }; |
3866 | |
3867 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3868 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3869 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3870 | const Twine &NameStr, Instruction *InsertBefore) |
3871 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3872 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3873 | InsertBefore) { |
3874 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3875 | } |
3876 | |
3877 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3878 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3879 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3880 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
3881 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3882 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3883 | InsertAtEnd) { |
3884 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3885 | } |
3886 | |
3887 | //===----------------------------------------------------------------------===// |
3888 | // CallBrInst Class |
3889 | //===----------------------------------------------------------------------===// |
3890 | |
3891 | /// CallBr instruction, tracking function calls that may not return control but |
3892 | /// instead transfer it to a third location. The SubclassData field is used to |
3893 | /// hold the calling convention of the call. |
3894 | /// |
3895 | class CallBrInst : public CallBase { |
3896 | |
3897 | unsigned NumIndirectDests; |
3898 | |
3899 | CallBrInst(const CallBrInst &BI); |
3900 | |
3901 | /// Construct a CallBrInst given a range of arguments. |
3902 | /// |
3903 | /// Construct a CallBrInst from a range of arguments |
3904 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3905 | ArrayRef<BasicBlock *> IndirectDests, |
3906 | ArrayRef<Value *> Args, |
3907 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3908 | const Twine &NameStr, Instruction *InsertBefore); |
3909 | |
3910 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3911 | ArrayRef<BasicBlock *> IndirectDests, |
3912 | ArrayRef<Value *> Args, |
3913 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3914 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3915 | |
3916 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, |
3917 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, |
3918 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3919 | |
3920 | /// Should the Indirect Destinations change, scan + update the Arg list. |
3921 | void updateArgBlockAddresses(unsigned i, BasicBlock *B); |
3922 | |
3923 | /// Compute the number of operands to allocate. |
3924 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, |
3925 | int NumBundleInputs = 0) { |
3926 | // We need one operand for the called function, plus our extra operands and |
3927 | // the input operand counts provided. |
3928 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; |
3929 | } |
3930 | |
3931 | protected: |
3932 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3933 | friend class Instruction; |
3934 | |
3935 | CallBrInst *cloneImpl() const; |
3936 | |
3937 | public: |
3938 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3939 | BasicBlock *DefaultDest, |
3940 | ArrayRef<BasicBlock *> IndirectDests, |
3941 | ArrayRef<Value *> Args, const Twine &NameStr, |
3942 | Instruction *InsertBefore = nullptr) { |
3943 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3944 | return new (NumOperands) |
3945 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3946 | NumOperands, NameStr, InsertBefore); |
3947 | } |
3948 | |
3949 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3950 | BasicBlock *DefaultDest, |
3951 | ArrayRef<BasicBlock *> IndirectDests, |
3952 | ArrayRef<Value *> Args, |
3953 | ArrayRef<OperandBundleDef> Bundles = None, |
3954 | const Twine &NameStr = "", |
3955 | Instruction *InsertBefore = nullptr) { |
3956 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3957 | CountBundleInputs(Bundles)); |
3958 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3959 | |
3960 | return new (NumOperands, DescriptorBytes) |
3961 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3962 | NumOperands, NameStr, InsertBefore); |
3963 | } |
3964 | |
3965 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3966 | BasicBlock *DefaultDest, |
3967 | ArrayRef<BasicBlock *> IndirectDests, |
3968 | ArrayRef<Value *> Args, const Twine &NameStr, |
3969 | BasicBlock *InsertAtEnd) { |
3970 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3971 | return new (NumOperands) |
3972 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3973 | NumOperands, NameStr, InsertAtEnd); |
3974 | } |
3975 | |
3976 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3977 | BasicBlock *DefaultDest, |
3978 | ArrayRef<BasicBlock *> IndirectDests, |
3979 | ArrayRef<Value *> Args, |
3980 | ArrayRef<OperandBundleDef> Bundles, |
3981 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3982 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3983 | CountBundleInputs(Bundles)); |
3984 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3985 | |
3986 | return new (NumOperands, DescriptorBytes) |
3987 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3988 | NumOperands, NameStr, InsertAtEnd); |
3989 | } |
3990 | |
3991 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
3992 | ArrayRef<BasicBlock *> IndirectDests, |
3993 | ArrayRef<Value *> Args, const Twine &NameStr, |
3994 | Instruction *InsertBefore = nullptr) { |
3995 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
3996 | IndirectDests, Args, NameStr, InsertBefore); |
3997 | } |
3998 | |
3999 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4000 | ArrayRef<BasicBlock *> IndirectDests, |
4001 | ArrayRef<Value *> Args, |
4002 | ArrayRef<OperandBundleDef> Bundles = None, |
4003 | const Twine &NameStr = "", |
4004 | Instruction *InsertBefore = nullptr) { |
4005 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4006 | IndirectDests, Args, Bundles, NameStr, InsertBefore); |
4007 | } |
4008 | |
4009 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4010 | ArrayRef<BasicBlock *> IndirectDests, |
4011 | ArrayRef<Value *> Args, const Twine &NameStr, |
4012 | BasicBlock *InsertAtEnd) { |
4013 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4014 | IndirectDests, Args, NameStr, InsertAtEnd); |
4015 | } |
4016 | |
4017 | static CallBrInst *Create(FunctionCallee Func, |
4018 | BasicBlock *DefaultDest, |
4019 | ArrayRef<BasicBlock *> IndirectDests, |
4020 | ArrayRef<Value *> Args, |
4021 | ArrayRef<OperandBundleDef> Bundles, |
4022 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4023 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4024 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); |
4025 | } |
4026 | |
4027 | /// Create a clone of \p CBI with a different set of operand bundles and |
4028 | /// insert it before \p InsertPt. |
4029 | /// |
4030 | /// The returned callbr instruction is identical to \p CBI in every way |
4031 | /// except that the operand bundles for the new instruction are set to the |
4032 | /// operand bundles in \p Bundles. |
4033 | static CallBrInst *Create(CallBrInst *CBI, |
4034 | ArrayRef<OperandBundleDef> Bundles, |
4035 | Instruction *InsertPt = nullptr); |
4036 | |
4037 | /// Return the number of callbr indirect dest labels. |
4038 | /// |
4039 | unsigned getNumIndirectDests() const { return NumIndirectDests; } |
4040 | |
4041 | /// getIndirectDestLabel - Return the i-th indirect dest label. |
4042 | /// |
4043 | Value *getIndirectDestLabel(unsigned i) const { |
4044 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4044, __PRETTY_FUNCTION__)); |
4045 | return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4046 | 1); |
4047 | } |
4048 | |
4049 | Value *getIndirectDestLabelUse(unsigned i) const { |
4050 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4050, __PRETTY_FUNCTION__)); |
4051 | return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4052 | 1); |
4053 | } |
4054 | |
4055 | // Return the destination basic blocks... |
4056 | BasicBlock *getDefaultDest() const { |
4057 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); |
4058 | } |
4059 | BasicBlock *getIndirectDest(unsigned i) const { |
4060 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); |
4061 | } |
4062 | SmallVector<BasicBlock *, 16> getIndirectDests() const { |
4063 | SmallVector<BasicBlock *, 16> IndirectDests; |
4064 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) |
4065 | IndirectDests.push_back(getIndirectDest(i)); |
4066 | return IndirectDests; |
4067 | } |
4068 | void setDefaultDest(BasicBlock *B) { |
4069 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); |
4070 | } |
4071 | void setIndirectDest(unsigned i, BasicBlock *B) { |
4072 | updateArgBlockAddresses(i, B); |
4073 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); |
4074 | } |
4075 | |
4076 | BasicBlock *getSuccessor(unsigned i) const { |
4077 | assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4078, __PRETTY_FUNCTION__)) |
4078 | "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4078, __PRETTY_FUNCTION__)); |
4079 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); |
4080 | } |
4081 | |
4082 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
4083 | assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4084, __PRETTY_FUNCTION__)) |
4084 | "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4084, __PRETTY_FUNCTION__)); |
4085 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); |
4086 | } |
4087 | |
4088 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } |
4089 | |
4090 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4091 | static bool classof(const Instruction *I) { |
4092 | return (I->getOpcode() == Instruction::CallBr); |
4093 | } |
4094 | static bool classof(const Value *V) { |
4095 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4096 | } |
4097 | |
4098 | private: |
4099 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4100 | // method so that subclasses cannot accidentally use it. |
4101 | template <typename Bitfield> |
4102 | void setSubclassData(typename Bitfield::Type Value) { |
4103 | Instruction::setSubclassData<Bitfield>(Value); |
4104 | } |
4105 | }; |
4106 | |
4107 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4108 | ArrayRef<BasicBlock *> IndirectDests, |
4109 | ArrayRef<Value *> Args, |
4110 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4111 | const Twine &NameStr, Instruction *InsertBefore) |
4112 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4113 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4114 | InsertBefore) { |
4115 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4116 | } |
4117 | |
4118 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4119 | ArrayRef<BasicBlock *> IndirectDests, |
4120 | ArrayRef<Value *> Args, |
4121 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4122 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
4123 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4124 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4125 | InsertAtEnd) { |
4126 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4127 | } |
4128 | |
4129 | //===----------------------------------------------------------------------===// |
4130 | // ResumeInst Class |
4131 | //===----------------------------------------------------------------------===// |
4132 | |
4133 | //===--------------------------------------------------------------------------- |
4134 | /// Resume the propagation of an exception. |
4135 | /// |
4136 | class ResumeInst : public Instruction { |
4137 | ResumeInst(const ResumeInst &RI); |
4138 | |
4139 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); |
4140 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); |
4141 | |
4142 | protected: |
4143 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4144 | friend class Instruction; |
4145 | |
4146 | ResumeInst *cloneImpl() const; |
4147 | |
4148 | public: |
4149 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { |
4150 | return new(1) ResumeInst(Exn, InsertBefore); |
4151 | } |
4152 | |
4153 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { |
4154 | return new(1) ResumeInst(Exn, InsertAtEnd); |
4155 | } |
4156 | |
4157 | /// Provide fast operand accessors |
4158 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4159 | |
4160 | /// Convenience accessor. |
4161 | Value *getValue() const { return Op<0>(); } |
4162 | |
4163 | unsigned getNumSuccessors() const { return 0; } |
4164 | |
4165 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4166 | static bool classof(const Instruction *I) { |
4167 | return I->getOpcode() == Instruction::Resume; |
4168 | } |
4169 | static bool classof(const Value *V) { |
4170 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4171 | } |
4172 | |
4173 | private: |
4174 | BasicBlock *getSuccessor(unsigned idx) const { |
4175 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4175); |
4176 | } |
4177 | |
4178 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4179 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4179); |
4180 | } |
4181 | }; |
4182 | |
4183 | template <> |
4184 | struct OperandTraits<ResumeInst> : |
4185 | public FixedNumOperandTraits<ResumeInst, 1> { |
4186 | }; |
4187 | |
4188 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4188, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst *>(this))[i_nocapture].get()); } void ResumeInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4188, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst ::getNumOperands() const { return OperandTraits<ResumeInst >::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ResumeInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
4189 | |
4190 | //===----------------------------------------------------------------------===// |
4191 | // CatchSwitchInst Class |
4192 | //===----------------------------------------------------------------------===// |
4193 | class CatchSwitchInst : public Instruction { |
4194 | using UnwindDestField = BoolBitfieldElementT<0>; |
4195 | |
4196 | /// The number of operands actually allocated. NumOperands is |
4197 | /// the number actually in use. |
4198 | unsigned ReservedSpace; |
4199 | |
4200 | // Operand[0] = Outer scope |
4201 | // Operand[1] = Unwind block destination |
4202 | // Operand[n] = BasicBlock to go to on match |
4203 | CatchSwitchInst(const CatchSwitchInst &CSI); |
4204 | |
4205 | /// Create a new switch instruction, specifying a |
4206 | /// default destination. The number of additional handlers can be specified |
4207 | /// here to make memory allocation more efficient. |
4208 | /// This constructor can also autoinsert before another instruction. |
4209 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4210 | unsigned NumHandlers, const Twine &NameStr, |
4211 | Instruction *InsertBefore); |
4212 | |
4213 | /// Create a new switch instruction, specifying a |
4214 | /// default destination. The number of additional handlers can be specified |
4215 | /// here to make memory allocation more efficient. |
4216 | /// This constructor also autoinserts at the end of the specified BasicBlock. |
4217 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4218 | unsigned NumHandlers, const Twine &NameStr, |
4219 | BasicBlock *InsertAtEnd); |
4220 | |
4221 | // allocate space for exactly zero operands |
4222 | void *operator new(size_t s) { return User::operator new(s); } |
4223 | |
4224 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); |
4225 | void growOperands(unsigned Size); |
4226 | |
4227 | protected: |
4228 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4229 | friend class Instruction; |
4230 | |
4231 | CatchSwitchInst *cloneImpl() const; |
4232 | |
4233 | public: |
4234 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4235 | unsigned NumHandlers, |
4236 | const Twine &NameStr = "", |
4237 | Instruction *InsertBefore = nullptr) { |
4238 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4239 | InsertBefore); |
4240 | } |
4241 | |
4242 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4243 | unsigned NumHandlers, const Twine &NameStr, |
4244 | BasicBlock *InsertAtEnd) { |
4245 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4246 | InsertAtEnd); |
4247 | } |
4248 | |
4249 | /// Provide fast operand accessors |
4250 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4251 | |
4252 | // Accessor Methods for CatchSwitch stmt |
4253 | Value *getParentPad() const { return getOperand(0); } |
4254 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } |
4255 | |
4256 | // Accessor Methods for CatchSwitch stmt |
4257 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4258 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4259 | BasicBlock *getUnwindDest() const { |
4260 | if (hasUnwindDest()) |
4261 | return cast<BasicBlock>(getOperand(1)); |
4262 | return nullptr; |
4263 | } |
4264 | void setUnwindDest(BasicBlock *UnwindDest) { |
4265 | assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail ( "UnwindDest", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4265, __PRETTY_FUNCTION__)); |
4266 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4266, __PRETTY_FUNCTION__)); |
4267 | setOperand(1, UnwindDest); |
4268 | } |
4269 | |
4270 | /// return the number of 'handlers' in this catchswitch |
4271 | /// instruction, except the default handler |
4272 | unsigned getNumHandlers() const { |
4273 | if (hasUnwindDest()) |
4274 | return getNumOperands() - 2; |
4275 | return getNumOperands() - 1; |
4276 | } |
4277 | |
4278 | private: |
4279 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } |
4280 | static const BasicBlock *handler_helper(const Value *V) { |
4281 | return cast<BasicBlock>(V); |
4282 | } |
4283 | |
4284 | public: |
4285 | using DerefFnTy = BasicBlock *(*)(Value *); |
4286 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; |
4287 | using handler_range = iterator_range<handler_iterator>; |
4288 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); |
4289 | using const_handler_iterator = |
4290 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; |
4291 | using const_handler_range = iterator_range<const_handler_iterator>; |
4292 | |
4293 | /// Returns an iterator that points to the first handler in CatchSwitchInst. |
4294 | handler_iterator handler_begin() { |
4295 | op_iterator It = op_begin() + 1; |
4296 | if (hasUnwindDest()) |
4297 | ++It; |
4298 | return handler_iterator(It, DerefFnTy(handler_helper)); |
4299 | } |
4300 | |
4301 | /// Returns an iterator that points to the first handler in the |
4302 | /// CatchSwitchInst. |
4303 | const_handler_iterator handler_begin() const { |
4304 | const_op_iterator It = op_begin() + 1; |
4305 | if (hasUnwindDest()) |
4306 | ++It; |
4307 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); |
4308 | } |
4309 | |
4310 | /// Returns a read-only iterator that points one past the last |
4311 | /// handler in the CatchSwitchInst. |
4312 | handler_iterator handler_end() { |
4313 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); |
4314 | } |
4315 | |
4316 | /// Returns an iterator that points one past the last handler in the |
4317 | /// CatchSwitchInst. |
4318 | const_handler_iterator handler_end() const { |
4319 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); |
4320 | } |
4321 | |
4322 | /// iteration adapter for range-for loops. |
4323 | handler_range handlers() { |
4324 | return make_range(handler_begin(), handler_end()); |
4325 | } |
4326 | |
4327 | /// iteration adapter for range-for loops. |
4328 | const_handler_range handlers() const { |
4329 | return make_range(handler_begin(), handler_end()); |
4330 | } |
4331 | |
4332 | /// Add an entry to the switch instruction... |
4333 | /// Note: |
4334 | /// This action invalidates handler_end(). Old handler_end() iterator will |
4335 | /// point to the added handler. |
4336 | void addHandler(BasicBlock *Dest); |
4337 | |
4338 | void removeHandler(handler_iterator HI); |
4339 | |
4340 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } |
4341 | BasicBlock *getSuccessor(unsigned Idx) const { |
4342 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4343, __PRETTY_FUNCTION__)) |
4343 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4343, __PRETTY_FUNCTION__)); |
4344 | return cast<BasicBlock>(getOperand(Idx + 1)); |
4345 | } |
4346 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { |
4347 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4348, __PRETTY_FUNCTION__)) |
4348 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4348, __PRETTY_FUNCTION__)); |
4349 | setOperand(Idx + 1, NewSucc); |
4350 | } |
4351 | |
4352 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4353 | static bool classof(const Instruction *I) { |
4354 | return I->getOpcode() == Instruction::CatchSwitch; |
4355 | } |
4356 | static bool classof(const Value *V) { |
4357 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4358 | } |
4359 | }; |
4360 | |
4361 | template <> |
4362 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; |
4363 | |
4364 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4364, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchSwitchInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4364, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands() const { return OperandTraits <CatchSwitchInst>::operands(this); } template <int Idx_nocapture > Use &CatchSwitchInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4365 | |
4366 | //===----------------------------------------------------------------------===// |
4367 | // CleanupPadInst Class |
4368 | //===----------------------------------------------------------------------===// |
4369 | class CleanupPadInst : public FuncletPadInst { |
4370 | private: |
4371 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4372 | unsigned Values, const Twine &NameStr, |
4373 | Instruction *InsertBefore) |
4374 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4375 | NameStr, InsertBefore) {} |
4376 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4377 | unsigned Values, const Twine &NameStr, |
4378 | BasicBlock *InsertAtEnd) |
4379 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4380 | NameStr, InsertAtEnd) {} |
4381 | |
4382 | public: |
4383 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, |
4384 | const Twine &NameStr = "", |
4385 | Instruction *InsertBefore = nullptr) { |
4386 | unsigned Values = 1 + Args.size(); |
4387 | return new (Values) |
4388 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); |
4389 | } |
4390 | |
4391 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, |
4392 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4393 | unsigned Values = 1 + Args.size(); |
4394 | return new (Values) |
4395 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); |
4396 | } |
4397 | |
4398 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4399 | static bool classof(const Instruction *I) { |
4400 | return I->getOpcode() == Instruction::CleanupPad; |
4401 | } |
4402 | static bool classof(const Value *V) { |
4403 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4404 | } |
4405 | }; |
4406 | |
4407 | //===----------------------------------------------------------------------===// |
4408 | // CatchPadInst Class |
4409 | //===----------------------------------------------------------------------===// |
4410 | class CatchPadInst : public FuncletPadInst { |
4411 | private: |
4412 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4413 | unsigned Values, const Twine &NameStr, |
4414 | Instruction *InsertBefore) |
4415 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4416 | NameStr, InsertBefore) {} |
4417 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4418 | unsigned Values, const Twine &NameStr, |
4419 | BasicBlock *InsertAtEnd) |
4420 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4421 | NameStr, InsertAtEnd) {} |
4422 | |
4423 | public: |
4424 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4425 | const Twine &NameStr = "", |
4426 | Instruction *InsertBefore = nullptr) { |
4427 | unsigned Values = 1 + Args.size(); |
4428 | return new (Values) |
4429 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); |
4430 | } |
4431 | |
4432 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4433 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4434 | unsigned Values = 1 + Args.size(); |
4435 | return new (Values) |
4436 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); |
4437 | } |
4438 | |
4439 | /// Convenience accessors |
4440 | CatchSwitchInst *getCatchSwitch() const { |
4441 | return cast<CatchSwitchInst>(Op<-1>()); |
4442 | } |
4443 | void setCatchSwitch(Value *CatchSwitch) { |
4444 | assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail ( "CatchSwitch", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4444, __PRETTY_FUNCTION__)); |
4445 | Op<-1>() = CatchSwitch; |
4446 | } |
4447 | |
4448 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4449 | static bool classof(const Instruction *I) { |
4450 | return I->getOpcode() == Instruction::CatchPad; |
4451 | } |
4452 | static bool classof(const Value *V) { |
4453 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4454 | } |
4455 | }; |
4456 | |
4457 | //===----------------------------------------------------------------------===// |
4458 | // CatchReturnInst Class |
4459 | //===----------------------------------------------------------------------===// |
4460 | |
4461 | class CatchReturnInst : public Instruction { |
4462 | CatchReturnInst(const CatchReturnInst &RI); |
4463 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); |
4464 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); |
4465 | |
4466 | void init(Value *CatchPad, BasicBlock *BB); |
4467 | |
4468 | protected: |
4469 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4470 | friend class Instruction; |
4471 | |
4472 | CatchReturnInst *cloneImpl() const; |
4473 | |
4474 | public: |
4475 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4476 | Instruction *InsertBefore = nullptr) { |
4477 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4477, __PRETTY_FUNCTION__)); |
4478 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4478, __PRETTY_FUNCTION__)); |
4479 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); |
4480 | } |
4481 | |
4482 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4483 | BasicBlock *InsertAtEnd) { |
4484 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4484, __PRETTY_FUNCTION__)); |
4485 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4485, __PRETTY_FUNCTION__)); |
4486 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); |
4487 | } |
4488 | |
4489 | /// Provide fast operand accessors |
4490 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4491 | |
4492 | /// Convenience accessors. |
4493 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } |
4494 | void setCatchPad(CatchPadInst *CatchPad) { |
4495 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4495, __PRETTY_FUNCTION__)); |
4496 | Op<0>() = CatchPad; |
4497 | } |
4498 | |
4499 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } |
4500 | void setSuccessor(BasicBlock *NewSucc) { |
4501 | assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4501, __PRETTY_FUNCTION__)); |
4502 | Op<1>() = NewSucc; |
4503 | } |
4504 | unsigned getNumSuccessors() const { return 1; } |
4505 | |
4506 | /// Get the parentPad of this catchret's catchpad's catchswitch. |
4507 | /// The successor block is implicitly a member of this funclet. |
4508 | Value *getCatchSwitchParentPad() const { |
4509 | return getCatchPad()->getCatchSwitch()->getParentPad(); |
4510 | } |
4511 | |
4512 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4513 | static bool classof(const Instruction *I) { |
4514 | return (I->getOpcode() == Instruction::CatchRet); |
4515 | } |
4516 | static bool classof(const Value *V) { |
4517 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4518 | } |
4519 | |
4520 | private: |
4521 | BasicBlock *getSuccessor(unsigned Idx) const { |
4522 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4522, __PRETTY_FUNCTION__)); |
4523 | return getSuccessor(); |
4524 | } |
4525 | |
4526 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4527 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4527, __PRETTY_FUNCTION__)); |
4528 | setSuccessor(B); |
4529 | } |
4530 | }; |
4531 | |
4532 | template <> |
4533 | struct OperandTraits<CatchReturnInst> |
4534 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; |
4535 | |
4536 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4536, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchReturnInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4536, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands() const { return OperandTraits <CatchReturnInst>::operands(this); } template <int Idx_nocapture > Use &CatchReturnInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4537 | |
4538 | //===----------------------------------------------------------------------===// |
4539 | // CleanupReturnInst Class |
4540 | //===----------------------------------------------------------------------===// |
4541 | |
4542 | class CleanupReturnInst : public Instruction { |
4543 | using UnwindDestField = BoolBitfieldElementT<0>; |
4544 | |
4545 | private: |
4546 | CleanupReturnInst(const CleanupReturnInst &RI); |
4547 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4548 | Instruction *InsertBefore = nullptr); |
4549 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4550 | BasicBlock *InsertAtEnd); |
4551 | |
4552 | void init(Value *CleanupPad, BasicBlock *UnwindBB); |
4553 | |
4554 | protected: |
4555 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4556 | friend class Instruction; |
4557 | |
4558 | CleanupReturnInst *cloneImpl() const; |
4559 | |
4560 | public: |
4561 | static CleanupReturnInst *Create(Value *CleanupPad, |
4562 | BasicBlock *UnwindBB = nullptr, |
4563 | Instruction *InsertBefore = nullptr) { |
4564 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4564, __PRETTY_FUNCTION__)); |
4565 | unsigned Values = 1; |
4566 | if (UnwindBB) |
4567 | ++Values; |
4568 | return new (Values) |
4569 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); |
4570 | } |
4571 | |
4572 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, |
4573 | BasicBlock *InsertAtEnd) { |
4574 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4574, __PRETTY_FUNCTION__)); |
4575 | unsigned Values = 1; |
4576 | if (UnwindBB) |
4577 | ++Values; |
4578 | return new (Values) |
4579 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); |
4580 | } |
4581 | |
4582 | /// Provide fast operand accessors |
4583 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4584 | |
4585 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4586 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4587 | |
4588 | /// Convenience accessor. |
4589 | CleanupPadInst *getCleanupPad() const { |
4590 | return cast<CleanupPadInst>(Op<0>()); |
4591 | } |
4592 | void setCleanupPad(CleanupPadInst *CleanupPad) { |
4593 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4593, __PRETTY_FUNCTION__)); |
4594 | Op<0>() = CleanupPad; |
4595 | } |
4596 | |
4597 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } |
4598 | |
4599 | BasicBlock *getUnwindDest() const { |
4600 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; |
4601 | } |
4602 | void setUnwindDest(BasicBlock *NewDest) { |
4603 | assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4603, __PRETTY_FUNCTION__)); |
4604 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4604, __PRETTY_FUNCTION__)); |
4605 | Op<1>() = NewDest; |
4606 | } |
4607 | |
4608 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4609 | static bool classof(const Instruction *I) { |
4610 | return (I->getOpcode() == Instruction::CleanupRet); |
4611 | } |
4612 | static bool classof(const Value *V) { |
4613 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4614 | } |
4615 | |
4616 | private: |
4617 | BasicBlock *getSuccessor(unsigned Idx) const { |
4618 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4618, __PRETTY_FUNCTION__)); |
4619 | return getUnwindDest(); |
4620 | } |
4621 | |
4622 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4623 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4623, __PRETTY_FUNCTION__)); |
4624 | setUnwindDest(B); |
4625 | } |
4626 | |
4627 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4628 | // method so that subclasses cannot accidentally use it. |
4629 | template <typename Bitfield> |
4630 | void setSubclassData(typename Bitfield::Type Value) { |
4631 | Instruction::setSubclassData<Bitfield>(Value); |
4632 | } |
4633 | }; |
4634 | |
4635 | template <> |
4636 | struct OperandTraits<CleanupReturnInst> |
4637 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; |
4638 | |
4639 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4639, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CleanupReturnInst>::op_begin(const_cast <CleanupReturnInst*>(this))[i_nocapture].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<CleanupReturnInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4639, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands() const { return OperandTraits <CleanupReturnInst>::operands(this); } template <int Idx_nocapture> Use &CleanupReturnInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &CleanupReturnInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
4640 | |
4641 | //===----------------------------------------------------------------------===// |
4642 | // UnreachableInst Class |
4643 | //===----------------------------------------------------------------------===// |
4644 | |
4645 | //===--------------------------------------------------------------------------- |
4646 | /// This function has undefined behavior. In particular, the |
4647 | /// presence of this instruction indicates some higher level knowledge that the |
4648 | /// end of the block cannot be reached. |
4649 | /// |
4650 | class UnreachableInst : public Instruction { |
4651 | protected: |
4652 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4653 | friend class Instruction; |
4654 | |
4655 | UnreachableInst *cloneImpl() const; |
4656 | |
4657 | public: |
4658 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); |
4659 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
4660 | |
4661 | // allocate space for exactly zero operands |
4662 | void *operator new(size_t s) { |
4663 | return User::operator new(s, 0); |
4664 | } |
4665 | |
4666 | unsigned getNumSuccessors() const { return 0; } |
4667 | |
4668 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4669 | static bool classof(const Instruction *I) { |
4670 | return I->getOpcode() == Instruction::Unreachable; |
4671 | } |
4672 | static bool classof(const Value *V) { |
4673 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4674 | } |
4675 | |
4676 | private: |
4677 | BasicBlock *getSuccessor(unsigned idx) const { |
4678 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4678); |
4679 | } |
4680 | |
4681 | void setSuccessor(unsigned idx, BasicBlock *B) { |
4682 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 4682); |
4683 | } |
4684 | }; |
4685 | |
4686 | //===----------------------------------------------------------------------===// |
4687 | // TruncInst Class |
4688 | //===----------------------------------------------------------------------===// |
4689 | |
4690 | /// This class represents a truncation of integer types. |
4691 | class TruncInst : public CastInst { |
4692 | protected: |
4693 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4694 | friend class Instruction; |
4695 | |
4696 | /// Clone an identical TruncInst |
4697 | TruncInst *cloneImpl() const; |
4698 | |
4699 | public: |
4700 | /// Constructor with insert-before-instruction semantics |
4701 | TruncInst( |
4702 | Value *S, ///< The value to be truncated |
4703 | Type *Ty, ///< The (smaller) type to truncate to |
4704 | const Twine &NameStr = "", ///< A name for the new instruction |
4705 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4706 | ); |
4707 | |
4708 | /// Constructor with insert-at-end-of-block semantics |
4709 | TruncInst( |
4710 | Value *S, ///< The value to be truncated |
4711 | Type *Ty, ///< The (smaller) type to truncate to |
4712 | const Twine &NameStr, ///< A name for the new instruction |
4713 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4714 | ); |
4715 | |
4716 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4717 | static bool classof(const Instruction *I) { |
4718 | return I->getOpcode() == Trunc; |
4719 | } |
4720 | static bool classof(const Value *V) { |
4721 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4722 | } |
4723 | }; |
4724 | |
4725 | //===----------------------------------------------------------------------===// |
4726 | // ZExtInst Class |
4727 | //===----------------------------------------------------------------------===// |
4728 | |
4729 | /// This class represents zero extension of integer types. |
4730 | class ZExtInst : public CastInst { |
4731 | protected: |
4732 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4733 | friend class Instruction; |
4734 | |
4735 | /// Clone an identical ZExtInst |
4736 | ZExtInst *cloneImpl() const; |
4737 | |
4738 | public: |
4739 | /// Constructor with insert-before-instruction semantics |
4740 | ZExtInst( |
4741 | Value *S, ///< The value to be zero extended |
4742 | Type *Ty, ///< The type to zero extend to |
4743 | const Twine &NameStr = "", ///< A name for the new instruction |
4744 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4745 | ); |
4746 | |
4747 | /// Constructor with insert-at-end semantics. |
4748 | ZExtInst( |
4749 | Value *S, ///< The value to be zero extended |
4750 | Type *Ty, ///< The type to zero extend to |
4751 | const Twine &NameStr, ///< A name for the new instruction |
4752 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4753 | ); |
4754 | |
4755 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4756 | static bool classof(const Instruction *I) { |
4757 | return I->getOpcode() == ZExt; |
4758 | } |
4759 | static bool classof(const Value *V) { |
4760 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4761 | } |
4762 | }; |
4763 | |
4764 | //===----------------------------------------------------------------------===// |
4765 | // SExtInst Class |
4766 | //===----------------------------------------------------------------------===// |
4767 | |
4768 | /// This class represents a sign extension of integer types. |
4769 | class SExtInst : public CastInst { |
4770 | protected: |
4771 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4772 | friend class Instruction; |
4773 | |
4774 | /// Clone an identical SExtInst |
4775 | SExtInst *cloneImpl() const; |
4776 | |
4777 | public: |
4778 | /// Constructor with insert-before-instruction semantics |
4779 | SExtInst( |
4780 | Value *S, ///< The value to be sign extended |
4781 | Type *Ty, ///< The type to sign extend to |
4782 | const Twine &NameStr = "", ///< A name for the new instruction |
4783 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4784 | ); |
4785 | |
4786 | /// Constructor with insert-at-end-of-block semantics |
4787 | SExtInst( |
4788 | Value *S, ///< The value to be sign extended |
4789 | Type *Ty, ///< The type to sign extend to |
4790 | const Twine &NameStr, ///< A name for the new instruction |
4791 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4792 | ); |
4793 | |
4794 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4795 | static bool classof(const Instruction *I) { |
4796 | return I->getOpcode() == SExt; |
4797 | } |
4798 | static bool classof(const Value *V) { |
4799 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4800 | } |
4801 | }; |
4802 | |
4803 | //===----------------------------------------------------------------------===// |
4804 | // FPTruncInst Class |
4805 | //===----------------------------------------------------------------------===// |
4806 | |
4807 | /// This class represents a truncation of floating point types. |
4808 | class FPTruncInst : public CastInst { |
4809 | protected: |
4810 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4811 | friend class Instruction; |
4812 | |
4813 | /// Clone an identical FPTruncInst |
4814 | FPTruncInst *cloneImpl() const; |
4815 | |
4816 | public: |
4817 | /// Constructor with insert-before-instruction semantics |
4818 | FPTruncInst( |
4819 | Value *S, ///< The value to be truncated |
4820 | Type *Ty, ///< The type to truncate to |
4821 | const Twine &NameStr = "", ///< A name for the new instruction |
4822 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4823 | ); |
4824 | |
4825 | /// Constructor with insert-before-instruction semantics |
4826 | FPTruncInst( |
4827 | Value *S, ///< The value to be truncated |
4828 | Type *Ty, ///< The type to truncate to |
4829 | const Twine &NameStr, ///< A name for the new instruction |
4830 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4831 | ); |
4832 | |
4833 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4834 | static bool classof(const Instruction *I) { |
4835 | return I->getOpcode() == FPTrunc; |
4836 | } |
4837 | static bool classof(const Value *V) { |
4838 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4839 | } |
4840 | }; |
4841 | |
4842 | //===----------------------------------------------------------------------===// |
4843 | // FPExtInst Class |
4844 | //===----------------------------------------------------------------------===// |
4845 | |
4846 | /// This class represents an extension of floating point types. |
4847 | class FPExtInst : public CastInst { |
4848 | protected: |
4849 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4850 | friend class Instruction; |
4851 | |
4852 | /// Clone an identical FPExtInst |
4853 | FPExtInst *cloneImpl() const; |
4854 | |
4855 | public: |
4856 | /// Constructor with insert-before-instruction semantics |
4857 | FPExtInst( |
4858 | Value *S, ///< The value to be extended |
4859 | Type *Ty, ///< The type to extend to |
4860 | const Twine &NameStr = "", ///< A name for the new instruction |
4861 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4862 | ); |
4863 | |
4864 | /// Constructor with insert-at-end-of-block semantics |
4865 | FPExtInst( |
4866 | Value *S, ///< The value to be extended |
4867 | Type *Ty, ///< The type to extend to |
4868 | const Twine &NameStr, ///< A name for the new instruction |
4869 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4870 | ); |
4871 | |
4872 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4873 | static bool classof(const Instruction *I) { |
4874 | return I->getOpcode() == FPExt; |
4875 | } |
4876 | static bool classof(const Value *V) { |
4877 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4878 | } |
4879 | }; |
4880 | |
4881 | //===----------------------------------------------------------------------===// |
4882 | // UIToFPInst Class |
4883 | //===----------------------------------------------------------------------===// |
4884 | |
4885 | /// This class represents a cast unsigned integer to floating point. |
4886 | class UIToFPInst : public CastInst { |
4887 | protected: |
4888 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4889 | friend class Instruction; |
4890 | |
4891 | /// Clone an identical UIToFPInst |
4892 | UIToFPInst *cloneImpl() const; |
4893 | |
4894 | public: |
4895 | /// Constructor with insert-before-instruction semantics |
4896 | UIToFPInst( |
4897 | Value *S, ///< The value to be converted |
4898 | Type *Ty, ///< The type to convert to |
4899 | const Twine &NameStr = "", ///< A name for the new instruction |
4900 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4901 | ); |
4902 | |
4903 | /// Constructor with insert-at-end-of-block semantics |
4904 | UIToFPInst( |
4905 | Value *S, ///< The value to be converted |
4906 | Type *Ty, ///< The type to convert to |
4907 | const Twine &NameStr, ///< A name for the new instruction |
4908 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4909 | ); |
4910 | |
4911 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4912 | static bool classof(const Instruction *I) { |
4913 | return I->getOpcode() == UIToFP; |
4914 | } |
4915 | static bool classof(const Value *V) { |
4916 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4917 | } |
4918 | }; |
4919 | |
4920 | //===----------------------------------------------------------------------===// |
4921 | // SIToFPInst Class |
4922 | //===----------------------------------------------------------------------===// |
4923 | |
4924 | /// This class represents a cast from signed integer to floating point. |
4925 | class SIToFPInst : public CastInst { |
4926 | protected: |
4927 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4928 | friend class Instruction; |
4929 | |
4930 | /// Clone an identical SIToFPInst |
4931 | SIToFPInst *cloneImpl() const; |
4932 | |
4933 | public: |
4934 | /// Constructor with insert-before-instruction semantics |
4935 | SIToFPInst( |
4936 | Value *S, ///< The value to be converted |
4937 | Type *Ty, ///< The type to convert to |
4938 | const Twine &NameStr = "", ///< A name for the new instruction |
4939 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4940 | ); |
4941 | |
4942 | /// Constructor with insert-at-end-of-block semantics |
4943 | SIToFPInst( |
4944 | Value *S, ///< The value to be converted |
4945 | Type *Ty, ///< The type to convert to |
4946 | const Twine &NameStr, ///< A name for the new instruction |
4947 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4948 | ); |
4949 | |
4950 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4951 | static bool classof(const Instruction *I) { |
4952 | return I->getOpcode() == SIToFP; |
4953 | } |
4954 | static bool classof(const Value *V) { |
4955 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4956 | } |
4957 | }; |
4958 | |
4959 | //===----------------------------------------------------------------------===// |
4960 | // FPToUIInst Class |
4961 | //===----------------------------------------------------------------------===// |
4962 | |
4963 | /// This class represents a cast from floating point to unsigned integer |
4964 | class FPToUIInst : public CastInst { |
4965 | protected: |
4966 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4967 | friend class Instruction; |
4968 | |
4969 | /// Clone an identical FPToUIInst |
4970 | FPToUIInst *cloneImpl() const; |
4971 | |
4972 | public: |
4973 | /// Constructor with insert-before-instruction semantics |
4974 | FPToUIInst( |
4975 | Value *S, ///< The value to be converted |
4976 | Type *Ty, ///< The type to convert to |
4977 | const Twine &NameStr = "", ///< A name for the new instruction |
4978 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4979 | ); |
4980 | |
4981 | /// Constructor with insert-at-end-of-block semantics |
4982 | FPToUIInst( |
4983 | Value *S, ///< The value to be converted |
4984 | Type *Ty, ///< The type to convert to |
4985 | const Twine &NameStr, ///< A name for the new instruction |
4986 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction |
4987 | ); |
4988 | |
4989 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4990 | static bool classof(const Instruction *I) { |
4991 | return I->getOpcode() == FPToUI; |
4992 | } |
4993 | static bool classof(const Value *V) { |
4994 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4995 | } |
4996 | }; |
4997 | |
4998 | //===----------------------------------------------------------------------===// |
4999 | // FPToSIInst Class |
5000 | //===----------------------------------------------------------------------===// |
5001 | |
5002 | /// This class represents a cast from floating point to signed integer. |
5003 | class FPToSIInst : public CastInst { |
5004 | protected: |
5005 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5006 | friend class Instruction; |
5007 | |
5008 | /// Clone an identical FPToSIInst |
5009 | FPToSIInst *cloneImpl() const; |
5010 | |
5011 | public: |
5012 | /// Constructor with insert-before-instruction semantics |
5013 | FPToSIInst( |
5014 | Value *S, ///< The value to be converted |
5015 | Type *Ty, ///< The type to convert to |
5016 | const Twine &NameStr = "", ///< A name for the new instruction |
5017 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5018 | ); |
5019 | |
5020 | /// Constructor with insert-at-end-of-block semantics |
5021 | FPToSIInst( |
5022 | Value *S, ///< The value to be converted |
5023 | Type *Ty, ///< The type to convert to |
5024 | const Twine &NameStr, ///< A name for the new instruction |
5025 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5026 | ); |
5027 | |
5028 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5029 | static bool classof(const Instruction *I) { |
5030 | return I->getOpcode() == FPToSI; |
5031 | } |
5032 | static bool classof(const Value *V) { |
5033 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5034 | } |
5035 | }; |
5036 | |
5037 | //===----------------------------------------------------------------------===// |
5038 | // IntToPtrInst Class |
5039 | //===----------------------------------------------------------------------===// |
5040 | |
5041 | /// This class represents a cast from an integer to a pointer. |
5042 | class IntToPtrInst : public CastInst { |
5043 | public: |
5044 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5045 | friend class Instruction; |
5046 | |
5047 | /// Constructor with insert-before-instruction semantics |
5048 | IntToPtrInst( |
5049 | Value *S, ///< The value to be converted |
5050 | Type *Ty, ///< The type to convert to |
5051 | const Twine &NameStr = "", ///< A name for the new instruction |
5052 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5053 | ); |
5054 | |
5055 | /// Constructor with insert-at-end-of-block semantics |
5056 | IntToPtrInst( |
5057 | Value *S, ///< The value to be converted |
5058 | Type *Ty, ///< The type to convert to |
5059 | const Twine &NameStr, ///< A name for the new instruction |
5060 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5061 | ); |
5062 | |
5063 | /// Clone an identical IntToPtrInst. |
5064 | IntToPtrInst *cloneImpl() const; |
5065 | |
5066 | /// Returns the address space of this instruction's pointer type. |
5067 | unsigned getAddressSpace() const { |
5068 | return getType()->getPointerAddressSpace(); |
5069 | } |
5070 | |
5071 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5072 | static bool classof(const Instruction *I) { |
5073 | return I->getOpcode() == IntToPtr; |
5074 | } |
5075 | static bool classof(const Value *V) { |
5076 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5077 | } |
5078 | }; |
5079 | |
5080 | //===----------------------------------------------------------------------===// |
5081 | // PtrToIntInst Class |
5082 | //===----------------------------------------------------------------------===// |
5083 | |
5084 | /// This class represents a cast from a pointer to an integer. |
5085 | class PtrToIntInst : public CastInst { |
5086 | protected: |
5087 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5088 | friend class Instruction; |
5089 | |
5090 | /// Clone an identical PtrToIntInst. |
5091 | PtrToIntInst *cloneImpl() const; |
5092 | |
5093 | public: |
5094 | /// Constructor with insert-before-instruction semantics |
5095 | PtrToIntInst( |
5096 | Value *S, ///< The value to be converted |
5097 | Type *Ty, ///< The type to convert to |
5098 | const Twine &NameStr = "", ///< A name for the new instruction |
5099 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5100 | ); |
5101 | |
5102 | /// Constructor with insert-at-end-of-block semantics |
5103 | PtrToIntInst( |
5104 | Value *S, ///< The value to be converted |
5105 | Type *Ty, ///< The type to convert to |
5106 | const Twine &NameStr, ///< A name for the new instruction |
5107 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5108 | ); |
5109 | |
5110 | /// Gets the pointer operand. |
5111 | Value *getPointerOperand() { return getOperand(0); } |
5112 | /// Gets the pointer operand. |
5113 | const Value *getPointerOperand() const { return getOperand(0); } |
5114 | /// Gets the operand index of the pointer operand. |
5115 | static unsigned getPointerOperandIndex() { return 0U; } |
5116 | |
5117 | /// Returns the address space of the pointer operand. |
5118 | unsigned getPointerAddressSpace() const { |
5119 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5120 | } |
5121 | |
5122 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5123 | static bool classof(const Instruction *I) { |
5124 | return I->getOpcode() == PtrToInt; |
5125 | } |
5126 | static bool classof(const Value *V) { |
5127 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5128 | } |
5129 | }; |
5130 | |
5131 | //===----------------------------------------------------------------------===// |
5132 | // BitCastInst Class |
5133 | //===----------------------------------------------------------------------===// |
5134 | |
5135 | /// This class represents a no-op cast from one type to another. |
5136 | class BitCastInst : public CastInst { |
5137 | protected: |
5138 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5139 | friend class Instruction; |
5140 | |
5141 | /// Clone an identical BitCastInst. |
5142 | BitCastInst *cloneImpl() const; |
5143 | |
5144 | public: |
5145 | /// Constructor with insert-before-instruction semantics |
5146 | BitCastInst( |
5147 | Value *S, ///< The value to be casted |
5148 | Type *Ty, ///< The type to casted to |
5149 | const Twine &NameStr = "", ///< A name for the new instruction |
5150 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5151 | ); |
5152 | |
5153 | /// Constructor with insert-at-end-of-block semantics |
5154 | BitCastInst( |
5155 | Value *S, ///< The value to be casted |
5156 | Type *Ty, ///< The type to casted to |
5157 | const Twine &NameStr, ///< A name for the new instruction |
5158 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5159 | ); |
5160 | |
5161 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5162 | static bool classof(const Instruction *I) { |
5163 | return I->getOpcode() == BitCast; |
5164 | } |
5165 | static bool classof(const Value *V) { |
5166 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5167 | } |
5168 | }; |
5169 | |
5170 | //===----------------------------------------------------------------------===// |
5171 | // AddrSpaceCastInst Class |
5172 | //===----------------------------------------------------------------------===// |
5173 | |
5174 | /// This class represents a conversion between pointers from one address space |
5175 | /// to another. |
5176 | class AddrSpaceCastInst : public CastInst { |
5177 | protected: |
5178 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5179 | friend class Instruction; |
5180 | |
5181 | /// Clone an identical AddrSpaceCastInst. |
5182 | AddrSpaceCastInst *cloneImpl() const; |
5183 | |
5184 | public: |
5185 | /// Constructor with insert-before-instruction semantics |
5186 | AddrSpaceCastInst( |
5187 | Value *S, ///< The value to be casted |
5188 | Type *Ty, ///< The type to casted to |
5189 | const Twine &NameStr = "", ///< A name for the new instruction |
5190 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5191 | ); |
5192 | |
5193 | /// Constructor with insert-at-end-of-block semantics |
5194 | AddrSpaceCastInst( |
5195 | Value *S, ///< The value to be casted |
5196 | Type *Ty, ///< The type to casted to |
5197 | const Twine &NameStr, ///< A name for the new instruction |
5198 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5199 | ); |
5200 | |
5201 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5202 | static bool classof(const Instruction *I) { |
5203 | return I->getOpcode() == AddrSpaceCast; |
5204 | } |
5205 | static bool classof(const Value *V) { |
5206 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5207 | } |
5208 | |
5209 | /// Gets the pointer operand. |
5210 | Value *getPointerOperand() { |
5211 | return getOperand(0); |
5212 | } |
5213 | |
5214 | /// Gets the pointer operand. |
5215 | const Value *getPointerOperand() const { |
5216 | return getOperand(0); |
5217 | } |
5218 | |
5219 | /// Gets the operand index of the pointer operand. |
5220 | static unsigned getPointerOperandIndex() { |
5221 | return 0U; |
5222 | } |
5223 | |
5224 | /// Returns the address space of the pointer operand. |
5225 | unsigned getSrcAddressSpace() const { |
5226 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5227 | } |
5228 | |
5229 | /// Returns the address space of the result. |
5230 | unsigned getDestAddressSpace() const { |
5231 | return getType()->getPointerAddressSpace(); |
5232 | } |
5233 | }; |
5234 | |
5235 | /// A helper function that returns the pointer operand of a load or store |
5236 | /// instruction. Returns nullptr if not load or store. |
5237 | inline const Value *getLoadStorePointerOperand(const Value *V) { |
5238 | if (auto *Load = dyn_cast<LoadInst>(V)) |
5239 | return Load->getPointerOperand(); |
5240 | if (auto *Store = dyn_cast<StoreInst>(V)) |
5241 | return Store->getPointerOperand(); |
5242 | return nullptr; |
5243 | } |
5244 | inline Value *getLoadStorePointerOperand(Value *V) { |
5245 | return const_cast<Value *>( |
5246 | getLoadStorePointerOperand(static_cast<const Value *>(V))); |
5247 | } |
5248 | |
5249 | /// A helper function that returns the pointer operand of a load, store |
5250 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. |
5251 | inline const Value *getPointerOperand(const Value *V) { |
5252 | if (auto *Ptr = getLoadStorePointerOperand(V)) |
5253 | return Ptr; |
5254 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) |
5255 | return Gep->getPointerOperand(); |
5256 | return nullptr; |
5257 | } |
5258 | inline Value *getPointerOperand(Value *V) { |
5259 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); |
5260 | } |
5261 | |
5262 | /// A helper function that returns the alignment of load or store instruction. |
5263 | inline Align getLoadStoreAlignment(Value *I) { |
5264 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 5265, __PRETTY_FUNCTION__)) |
5265 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 5265, __PRETTY_FUNCTION__)); |
5266 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5267 | return LI->getAlign(); |
5268 | return cast<StoreInst>(I)->getAlign(); |
5269 | } |
5270 | |
5271 | /// A helper function that returns the address space of the pointer operand of |
5272 | /// load or store instruction. |
5273 | inline unsigned getLoadStoreAddressSpace(Value *I) { |
5274 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 5275, __PRETTY_FUNCTION__)) |
5275 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/IR/Instructions.h" , 5275, __PRETTY_FUNCTION__)); |
5276 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5277 | return LI->getPointerAddressSpace(); |
5278 | return cast<StoreInst>(I)->getPointerAddressSpace(); |
5279 | } |
5280 | |
5281 | //===----------------------------------------------------------------------===// |
5282 | // FreezeInst Class |
5283 | //===----------------------------------------------------------------------===// |
5284 | |
5285 | /// This class represents a freeze function that returns random concrete |
5286 | /// value if an operand is either a poison value or an undef value |
5287 | class FreezeInst : public UnaryInstruction { |
5288 | protected: |
5289 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5290 | friend class Instruction; |
5291 | |
5292 | /// Clone an identical FreezeInst |
5293 | FreezeInst *cloneImpl() const; |
5294 | |
5295 | public: |
5296 | explicit FreezeInst(Value *S, |
5297 | const Twine &NameStr = "", |
5298 | Instruction *InsertBefore = nullptr); |
5299 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); |
5300 | |
5301 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5302 | static inline bool classof(const Instruction *I) { |
5303 | return I->getOpcode() == Freeze; |
5304 | } |
5305 | static inline bool classof(const Value *V) { |
5306 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5307 | } |
5308 | }; |
5309 | |
5310 | } // end namespace llvm |
5311 | |
5312 | #endif // LLVM_IR_INSTRUCTIONS_H |
1 | //===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef LLVM_ADT_ILIST_ITERATOR_H |
10 | #define LLVM_ADT_ILIST_ITERATOR_H |
11 | |
12 | #include "llvm/ADT/ilist_node.h" |
13 | #include <cassert> |
14 | #include <cstddef> |
15 | #include <iterator> |
16 | #include <type_traits> |
17 | |
18 | namespace llvm { |
19 | |
20 | namespace ilist_detail { |
21 | |
22 | /// Find const-correct node types. |
23 | template <class OptionsT, bool IsConst> struct IteratorTraits; |
24 | template <class OptionsT> struct IteratorTraits<OptionsT, false> { |
25 | using value_type = typename OptionsT::value_type; |
26 | using pointer = typename OptionsT::pointer; |
27 | using reference = typename OptionsT::reference; |
28 | using node_pointer = ilist_node_impl<OptionsT> *; |
29 | using node_reference = ilist_node_impl<OptionsT> &; |
30 | }; |
31 | template <class OptionsT> struct IteratorTraits<OptionsT, true> { |
32 | using value_type = const typename OptionsT::value_type; |
33 | using pointer = typename OptionsT::const_pointer; |
34 | using reference = typename OptionsT::const_reference; |
35 | using node_pointer = const ilist_node_impl<OptionsT> *; |
36 | using node_reference = const ilist_node_impl<OptionsT> &; |
37 | }; |
38 | |
39 | template <bool IsReverse> struct IteratorHelper; |
40 | template <> struct IteratorHelper<false> : ilist_detail::NodeAccess { |
41 | using Access = ilist_detail::NodeAccess; |
42 | |
43 | template <class T> static void increment(T *&I) { I = Access::getNext(*I); } |
44 | template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); } |
45 | }; |
46 | template <> struct IteratorHelper<true> : ilist_detail::NodeAccess { |
47 | using Access = ilist_detail::NodeAccess; |
48 | |
49 | template <class T> static void increment(T *&I) { I = Access::getPrev(*I); } |
50 | template <class T> static void decrement(T *&I) { I = Access::getNext(*I); } |
51 | }; |
52 | |
53 | } // end namespace ilist_detail |
54 | |
55 | /// Iterator for intrusive lists based on ilist_node. |
56 | template <class OptionsT, bool IsReverse, bool IsConst> |
57 | class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> { |
58 | friend ilist_iterator<OptionsT, IsReverse, !IsConst>; |
59 | friend ilist_iterator<OptionsT, !IsReverse, IsConst>; |
60 | friend ilist_iterator<OptionsT, !IsReverse, !IsConst>; |
61 | |
62 | using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>; |
63 | using Access = ilist_detail::SpecificNodeAccess<OptionsT>; |
64 | |
65 | public: |
66 | using value_type = typename Traits::value_type; |
67 | using pointer = typename Traits::pointer; |
68 | using reference = typename Traits::reference; |
69 | using difference_type = ptrdiff_t; |
70 | using iterator_category = std::bidirectional_iterator_tag; |
71 | using const_pointer = typename OptionsT::const_pointer; |
72 | using const_reference = typename OptionsT::const_reference; |
73 | |
74 | private: |
75 | using node_pointer = typename Traits::node_pointer; |
76 | using node_reference = typename Traits::node_reference; |
77 | |
78 | node_pointer NodePtr = nullptr; |
79 | |
80 | public: |
81 | /// Create from an ilist_node. |
82 | explicit ilist_iterator(node_reference N) : NodePtr(&N) {} |
83 | |
84 | explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {} |
85 | explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {} |
86 | ilist_iterator() = default; |
87 | |
88 | // This is templated so that we can allow constructing a const iterator from |
89 | // a nonconst iterator... |
90 | template <bool RHSIsConst> |
91 | ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS, |
92 | std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr) |
93 | : NodePtr(RHS.NodePtr) {} |
94 | |
95 | // This is templated so that we can allow assigning to a const iterator from |
96 | // a nonconst iterator... |
97 | template <bool RHSIsConst> |
98 | std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &> |
99 | operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) { |
100 | NodePtr = RHS.NodePtr; |
101 | return *this; |
102 | } |
103 | |
104 | /// Explicit conversion between forward/reverse iterators. |
105 | /// |
106 | /// Translate between forward and reverse iterators without changing range |
107 | /// boundaries. The resulting iterator will dereference (and have a handle) |
108 | /// to the previous node, which is somewhat unexpected; but converting the |
109 | /// two endpoints in a range will give the same range in reverse. |
110 | /// |
111 | /// This matches std::reverse_iterator conversions. |
112 | explicit ilist_iterator( |
113 | const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS) |
114 | : ilist_iterator(++RHS.getReverse()) {} |
115 | |
116 | /// Get a reverse iterator to the same node. |
117 | /// |
118 | /// Gives a reverse iterator that will dereference (and have a handle) to the |
119 | /// same node. Converting the endpoint iterators in a range will give a |
120 | /// different range; for range operations, use the explicit conversions. |
121 | ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const { |
122 | if (NodePtr) |
123 | return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr); |
124 | return ilist_iterator<OptionsT, !IsReverse, IsConst>(); |
125 | } |
126 | |
127 | /// Const-cast. |
128 | ilist_iterator<OptionsT, IsReverse, false> getNonConst() const { |
129 | if (NodePtr) |
130 | return ilist_iterator<OptionsT, IsReverse, false>( |
131 | const_cast<typename ilist_iterator<OptionsT, IsReverse, |
132 | false>::node_reference>(*NodePtr)); |
133 | return ilist_iterator<OptionsT, IsReverse, false>(); |
134 | } |
135 | |
136 | // Accessors... |
137 | reference operator*() const { |
138 | assert(!NodePtr->isKnownSentinel())((!NodePtr->isKnownSentinel()) ? static_cast<void> ( 0) : __assert_fail ("!NodePtr->isKnownSentinel()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/ilist_iterator.h" , 138, __PRETTY_FUNCTION__)); |
139 | return *Access::getValuePtr(NodePtr); |
140 | } |
141 | pointer operator->() const { return &operator*(); } |
142 | |
143 | // Comparison operators |
144 | friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) { |
145 | return LHS.NodePtr == RHS.NodePtr; |
146 | } |
147 | friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) { |
148 | return LHS.NodePtr != RHS.NodePtr; |
149 | } |
150 | |
151 | // Increment and decrement operators... |
152 | ilist_iterator &operator--() { |
153 | NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev(); |
154 | return *this; |
155 | } |
156 | ilist_iterator &operator++() { |
157 | NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext(); |
158 | return *this; |
159 | } |
160 | ilist_iterator operator--(int) { |
161 | ilist_iterator tmp = *this; |
162 | --*this; |
163 | return tmp; |
164 | } |
165 | ilist_iterator operator++(int) { |
166 | ilist_iterator tmp = *this; |
167 | ++*this; |
168 | return tmp; |
169 | } |
170 | |
171 | /// Get the underlying ilist_node. |
172 | node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); } |
173 | |
174 | /// Check for end. Only valid if ilist_sentinel_tracking<true>. |
175 | bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; } |
176 | }; |
177 | |
178 | template <typename From> struct simplify_type; |
179 | |
180 | /// Allow ilist_iterators to convert into pointers to a node automatically when |
181 | /// used by the dyn_cast, cast, isa mechanisms... |
182 | /// |
183 | /// FIXME: remove this, since there is no implicit conversion to NodeTy. |
184 | template <class OptionsT, bool IsConst> |
185 | struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> { |
186 | using iterator = ilist_iterator<OptionsT, false, IsConst>; |
187 | using SimpleType = typename iterator::pointer; |
188 | |
189 | static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; } |
190 | }; |
191 | template <class OptionsT, bool IsConst> |
192 | struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>> |
193 | : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {}; |
194 | |
195 | } // end namespace llvm |
196 | |
197 | #endif // LLVM_ADT_ILIST_ITERATOR_H |
1 | //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the SmallVector class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_SMALLVECTOR_H |
14 | #define LLVM_ADT_SMALLVECTOR_H |
15 | |
16 | #include "llvm/ADT/iterator_range.h" |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/ErrorHandling.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include "llvm/Support/MemAlloc.h" |
21 | #include "llvm/Support/type_traits.h" |
22 | #include <algorithm> |
23 | #include <cassert> |
24 | #include <cstddef> |
25 | #include <cstdlib> |
26 | #include <cstring> |
27 | #include <initializer_list> |
28 | #include <iterator> |
29 | #include <limits> |
30 | #include <memory> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | |
35 | namespace llvm { |
36 | |
37 | /// This is all the stuff common to all SmallVectors. |
38 | /// |
39 | /// The template parameter specifies the type which should be used to hold the |
40 | /// Size and Capacity of the SmallVector, so it can be adjusted. |
41 | /// Using 32 bit size is desirable to shrink the size of the SmallVector. |
42 | /// Using 64 bit size is desirable for cases like SmallVector<char>, where a |
43 | /// 32 bit size would limit the vector to ~4GB. SmallVectors are used for |
44 | /// buffering bitcode output - which can exceed 4GB. |
45 | template <class Size_T> class SmallVectorBase { |
46 | protected: |
47 | void *BeginX; |
48 | Size_T Size = 0, Capacity; |
49 | |
50 | /// The maximum value of the Size_T used. |
51 | static constexpr size_t SizeTypeMax() { |
52 | return std::numeric_limits<Size_T>::max(); |
53 | } |
54 | |
55 | SmallVectorBase() = delete; |
56 | SmallVectorBase(void *FirstEl, size_t TotalCapacity) |
57 | : BeginX(FirstEl), Capacity(TotalCapacity) {} |
58 | |
59 | /// This is a helper for \a grow() that's out of line to reduce code |
60 | /// duplication. This function will report a fatal error if it can't grow at |
61 | /// least to \p MinSize. |
62 | void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity); |
63 | |
64 | /// This is an implementation of the grow() method which only works |
65 | /// on POD-like data types and is out of line to reduce code duplication. |
66 | /// This function will report a fatal error if it cannot increase capacity. |
67 | void grow_pod(void *FirstEl, size_t MinSize, size_t TSize); |
68 | |
69 | public: |
70 | size_t size() const { return Size; } |
71 | size_t capacity() const { return Capacity; } |
72 | |
73 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; } |
74 | |
75 | /// Set the array size to \p N, which the current array must have enough |
76 | /// capacity for. |
77 | /// |
78 | /// This does not construct or destroy any elements in the vector. |
79 | /// |
80 | /// Clients can use this in conjunction with capacity() to write past the end |
81 | /// of the buffer when they know that more elements are available, and only |
82 | /// update the size later. This avoids the cost of value initializing elements |
83 | /// which will only be overwritten. |
84 | void set_size(size_t N) { |
85 | assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail ("N <= capacity()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 85, __PRETTY_FUNCTION__)); |
86 | Size = N; |
87 | } |
88 | }; |
89 | |
90 | template <class T> |
91 | using SmallVectorSizeType = |
92 | typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, |
93 | uint32_t>::type; |
94 | |
95 | /// Figure out the offset of the first element. |
96 | template <class T, typename = void> struct SmallVectorAlignmentAndSize { |
97 | alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof( |
98 | SmallVectorBase<SmallVectorSizeType<T>>)]; |
99 | alignas(T) char FirstEl[sizeof(T)]; |
100 | }; |
101 | |
102 | /// This is the part of SmallVectorTemplateBase which does not depend on whether |
103 | /// the type T is a POD. The extra dummy template argument is used by ArrayRef |
104 | /// to avoid unnecessarily requiring T to be complete. |
105 | template <typename T, typename = void> |
106 | class SmallVectorTemplateCommon |
107 | : public SmallVectorBase<SmallVectorSizeType<T>> { |
108 | using Base = SmallVectorBase<SmallVectorSizeType<T>>; |
109 | |
110 | /// Find the address of the first element. For this pointer math to be valid |
111 | /// with small-size of 0 for T with lots of alignment, it's important that |
112 | /// SmallVectorStorage is properly-aligned even for small-size of 0. |
113 | void *getFirstEl() const { |
114 | return const_cast<void *>(reinterpret_cast<const void *>( |
115 | reinterpret_cast<const char *>(this) + |
116 | offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl ))); |
117 | } |
118 | // Space after 'FirstEl' is clobbered, do not add any instance vars after it. |
119 | |
120 | protected: |
121 | SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} |
122 | |
123 | void grow_pod(size_t MinSize, size_t TSize) { |
124 | Base::grow_pod(getFirstEl(), MinSize, TSize); |
125 | } |
126 | |
127 | /// Return true if this is a smallvector which has not had dynamic |
128 | /// memory allocated for it. |
129 | bool isSmall() const { return this->BeginX == getFirstEl(); } |
130 | |
131 | /// Put this vector in a state of being small. |
132 | void resetToSmall() { |
133 | this->BeginX = getFirstEl(); |
134 | this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. |
135 | } |
136 | |
137 | /// Return true if V is an internal reference to the given range. |
138 | bool isReferenceToRange(const void *V, const void *First, const void *Last) const { |
139 | // Use std::less to avoid UB. |
140 | std::less<> LessThan; |
141 | return !LessThan(V, First) && LessThan(V, Last); |
142 | } |
143 | |
144 | /// Return true if V is an internal reference to this vector. |
145 | bool isReferenceToStorage(const void *V) const { |
146 | return isReferenceToRange(V, this->begin(), this->end()); |
147 | } |
148 | |
149 | /// Return true if First and Last form a valid (possibly empty) range in this |
150 | /// vector's storage. |
151 | bool isRangeInStorage(const void *First, const void *Last) const { |
152 | // Use std::less to avoid UB. |
153 | std::less<> LessThan; |
154 | return !LessThan(First, this->begin()) && !LessThan(Last, First) && |
155 | !LessThan(this->end(), Last); |
156 | } |
157 | |
158 | /// Return true unless Elt will be invalidated by resizing the vector to |
159 | /// NewSize. |
160 | bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
161 | // Past the end. |
162 | if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true)) |
163 | return true; |
164 | |
165 | // Return false if Elt will be destroyed by shrinking. |
166 | if (NewSize <= this->size()) |
167 | return Elt < this->begin() + NewSize; |
168 | |
169 | // Return false if we need to grow. |
170 | return NewSize <= this->capacity(); |
171 | } |
172 | |
173 | /// Check whether Elt will be invalidated by resizing the vector to NewSize. |
174 | void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
175 | assert(isSafeToReferenceAfterResize(Elt, NewSize) &&((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation " "that invalidates it") ? static_cast<void> (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 177, __PRETTY_FUNCTION__)) |
176 | "Attempting to reference an element of the vector in an operation "((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation " "that invalidates it") ? static_cast<void> (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 177, __PRETTY_FUNCTION__)) |
177 | "that invalidates it")((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation " "that invalidates it") ? static_cast<void> (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 177, __PRETTY_FUNCTION__)); |
178 | } |
179 | |
180 | /// Check whether Elt will be invalidated by increasing the size of the |
181 | /// vector by N. |
182 | void assertSafeToAdd(const void *Elt, size_t N = 1) { |
183 | this->assertSafeToReferenceAfterResize(Elt, this->size() + N); |
184 | } |
185 | |
186 | /// Check whether any part of the range will be invalidated by clearing. |
187 | void assertSafeToReferenceAfterClear(const T *From, const T *To) { |
188 | if (From == To) |
189 | return; |
190 | this->assertSafeToReferenceAfterResize(From, 0); |
191 | this->assertSafeToReferenceAfterResize(To - 1, 0); |
192 | } |
193 | template < |
194 | class ItTy, |
195 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
196 | bool> = false> |
197 | void assertSafeToReferenceAfterClear(ItTy, ItTy) {} |
198 | |
199 | /// Check whether any part of the range will be invalidated by growing. |
200 | void assertSafeToAddRange(const T *From, const T *To) { |
201 | if (From == To) |
202 | return; |
203 | this->assertSafeToAdd(From, To - From); |
204 | this->assertSafeToAdd(To - 1, To - From); |
205 | } |
206 | template < |
207 | class ItTy, |
208 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
209 | bool> = false> |
210 | void assertSafeToAddRange(ItTy, ItTy) {} |
211 | |
212 | /// Reserve enough space to add one element, and return the updated element |
213 | /// pointer in case it was a reference to the storage. |
214 | template <class U> |
215 | static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt, |
216 | size_t N) { |
217 | size_t NewSize = This->size() + N; |
218 | if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true )) |
219 | return &Elt; |
220 | |
221 | bool ReferencesStorage = false; |
222 | int64_t Index = -1; |
223 | if (!U::TakesParamByValue) { |
224 | if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt )), false)) { |
225 | ReferencesStorage = true; |
226 | Index = &Elt - This->begin(); |
227 | } |
228 | } |
229 | This->grow(NewSize); |
230 | return ReferencesStorage ? This->begin() + Index : &Elt; |
231 | } |
232 | |
233 | public: |
234 | using size_type = size_t; |
235 | using difference_type = ptrdiff_t; |
236 | using value_type = T; |
237 | using iterator = T *; |
238 | using const_iterator = const T *; |
239 | |
240 | using const_reverse_iterator = std::reverse_iterator<const_iterator>; |
241 | using reverse_iterator = std::reverse_iterator<iterator>; |
242 | |
243 | using reference = T &; |
244 | using const_reference = const T &; |
245 | using pointer = T *; |
246 | using const_pointer = const T *; |
247 | |
248 | using Base::capacity; |
249 | using Base::empty; |
250 | using Base::size; |
251 | |
252 | // forward iterator creation methods. |
253 | iterator begin() { return (iterator)this->BeginX; } |
254 | const_iterator begin() const { return (const_iterator)this->BeginX; } |
255 | iterator end() { return begin() + size(); } |
256 | const_iterator end() const { return begin() + size(); } |
257 | |
258 | // reverse iterator creation methods. |
259 | reverse_iterator rbegin() { return reverse_iterator(end()); } |
260 | const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } |
261 | reverse_iterator rend() { return reverse_iterator(begin()); } |
262 | const_reverse_iterator rend() const { return const_reverse_iterator(begin());} |
263 | |
264 | size_type size_in_bytes() const { return size() * sizeof(T); } |
265 | size_type max_size() const { |
266 | return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); |
267 | } |
268 | |
269 | size_t capacity_in_bytes() const { return capacity() * sizeof(T); } |
270 | |
271 | /// Return a pointer to the vector's buffer, even if empty(). |
272 | pointer data() { return pointer(begin()); } |
273 | /// Return a pointer to the vector's buffer, even if empty(). |
274 | const_pointer data() const { return const_pointer(begin()); } |
275 | |
276 | reference operator[](size_type idx) { |
277 | assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail ("idx < size()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 277, __PRETTY_FUNCTION__)); |
278 | return begin()[idx]; |
279 | } |
280 | const_reference operator[](size_type idx) const { |
281 | assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail ("idx < size()", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 281, __PRETTY_FUNCTION__)); |
282 | return begin()[idx]; |
283 | } |
284 | |
285 | reference front() { |
286 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 286, __PRETTY_FUNCTION__)); |
287 | return begin()[0]; |
288 | } |
289 | const_reference front() const { |
290 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 290, __PRETTY_FUNCTION__)); |
291 | return begin()[0]; |
292 | } |
293 | |
294 | reference back() { |
295 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 295, __PRETTY_FUNCTION__)); |
296 | return end()[-1]; |
297 | } |
298 | const_reference back() const { |
299 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 299, __PRETTY_FUNCTION__)); |
300 | return end()[-1]; |
301 | } |
302 | }; |
303 | |
304 | /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put |
305 | /// method implementations that are designed to work with non-trivial T's. |
306 | /// |
307 | /// We approximate is_trivially_copyable with trivial move/copy construction and |
308 | /// trivial destruction. While the standard doesn't specify that you're allowed |
309 | /// copy these types with memcpy, there is no way for the type to observe this. |
310 | /// This catches the important case of std::pair<POD, POD>, which is not |
311 | /// trivially assignable. |
312 | template <typename T, bool = (is_trivially_copy_constructible<T>::value) && |
313 | (is_trivially_move_constructible<T>::value) && |
314 | std::is_trivially_destructible<T>::value> |
315 | class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { |
316 | friend class SmallVectorTemplateCommon<T>; |
317 | |
318 | protected: |
319 | static constexpr bool TakesParamByValue = false; |
320 | using ValueParamT = const T &; |
321 | |
322 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
323 | |
324 | static void destroy_range(T *S, T *E) { |
325 | while (S != E) { |
326 | --E; |
327 | E->~T(); |
328 | } |
329 | } |
330 | |
331 | /// Move the range [I, E) into the uninitialized memory starting with "Dest", |
332 | /// constructing elements as needed. |
333 | template<typename It1, typename It2> |
334 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
335 | std::uninitialized_copy(std::make_move_iterator(I), |
336 | std::make_move_iterator(E), Dest); |
337 | } |
338 | |
339 | /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", |
340 | /// constructing elements as needed. |
341 | template<typename It1, typename It2> |
342 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
343 | std::uninitialized_copy(I, E, Dest); |
344 | } |
345 | |
346 | /// Grow the allocated memory (without initializing new elements), doubling |
347 | /// the size of the allocated memory. Guarantees space for at least one more |
348 | /// element, or MinSize more elements if specified. |
349 | void grow(size_t MinSize = 0); |
350 | |
351 | /// Create a new allocation big enough for \p MinSize and pass back its size |
352 | /// in \p NewCapacity. This is the first section of \a grow(). |
353 | T *mallocForGrow(size_t MinSize, size_t &NewCapacity) { |
354 | return static_cast<T *>( |
355 | SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow( |
356 | MinSize, sizeof(T), NewCapacity)); |
357 | } |
358 | |
359 | /// Move existing elements over to the new allocation \p NewElts, the middle |
360 | /// section of \a grow(). |
361 | void moveElementsForGrow(T *NewElts); |
362 | |
363 | /// Transfer ownership of the allocation, finishing up \a grow(). |
364 | void takeAllocationForGrow(T *NewElts, size_t NewCapacity); |
365 | |
366 | /// Reserve enough space to add one element, and return the updated element |
367 | /// pointer in case it was a reference to the storage. |
368 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
369 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
370 | } |
371 | |
372 | /// Reserve enough space to add one element, and return the updated element |
373 | /// pointer in case it was a reference to the storage. |
374 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
375 | return const_cast<T *>( |
376 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
377 | } |
378 | |
379 | static T &&forward_value_param(T &&V) { return std::move(V); } |
380 | static const T &forward_value_param(const T &V) { return V; } |
381 | |
382 | void growAndAssign(size_t NumElts, const T &Elt) { |
383 | // Grow manually in case Elt is an internal reference. |
384 | size_t NewCapacity; |
385 | T *NewElts = mallocForGrow(NumElts, NewCapacity); |
386 | std::uninitialized_fill_n(NewElts, NumElts, Elt); |
387 | this->destroy_range(this->begin(), this->end()); |
388 | takeAllocationForGrow(NewElts, NewCapacity); |
389 | this->set_size(NumElts); |
390 | } |
391 | |
392 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
393 | // Grow manually in case one of Args is an internal reference. |
394 | size_t NewCapacity; |
395 | T *NewElts = mallocForGrow(0, NewCapacity); |
396 | ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); |
397 | moveElementsForGrow(NewElts); |
398 | takeAllocationForGrow(NewElts, NewCapacity); |
399 | this->set_size(this->size() + 1); |
400 | return this->back(); |
401 | } |
402 | |
403 | public: |
404 | void push_back(const T &Elt) { |
405 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
406 | ::new ((void *)this->end()) T(*EltPtr); |
407 | this->set_size(this->size() + 1); |
408 | } |
409 | |
410 | void push_back(T &&Elt) { |
411 | T *EltPtr = reserveForParamAndGetAddress(Elt); |
412 | ::new ((void *)this->end()) T(::std::move(*EltPtr)); |
413 | this->set_size(this->size() + 1); |
414 | } |
415 | |
416 | void pop_back() { |
417 | this->set_size(this->size() - 1); |
418 | this->end()->~T(); |
419 | } |
420 | }; |
421 | |
422 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
423 | template <typename T, bool TriviallyCopyable> |
424 | void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { |
425 | size_t NewCapacity; |
426 | T *NewElts = mallocForGrow(MinSize, NewCapacity); |
427 | moveElementsForGrow(NewElts); |
428 | takeAllocationForGrow(NewElts, NewCapacity); |
429 | } |
430 | |
431 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
432 | template <typename T, bool TriviallyCopyable> |
433 | void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow( |
434 | T *NewElts) { |
435 | // Move the elements over. |
436 | this->uninitialized_move(this->begin(), this->end(), NewElts); |
437 | |
438 | // Destroy the original elements. |
439 | destroy_range(this->begin(), this->end()); |
440 | } |
441 | |
442 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
443 | template <typename T, bool TriviallyCopyable> |
444 | void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow( |
445 | T *NewElts, size_t NewCapacity) { |
446 | // If this wasn't grown from the inline copy, deallocate the old space. |
447 | if (!this->isSmall()) |
448 | free(this->begin()); |
449 | |
450 | this->BeginX = NewElts; |
451 | this->Capacity = NewCapacity; |
452 | } |
453 | |
454 | /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put |
455 | /// method implementations that are designed to work with trivially copyable |
456 | /// T's. This allows using memcpy in place of copy/move construction and |
457 | /// skipping destruction. |
458 | template <typename T> |
459 | class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { |
460 | friend class SmallVectorTemplateCommon<T>; |
461 | |
462 | protected: |
463 | /// True if it's cheap enough to take parameters by value. Doing so avoids |
464 | /// overhead related to mitigations for reference invalidation. |
465 | static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *); |
466 | |
467 | /// Either const T& or T, depending on whether it's cheap enough to take |
468 | /// parameters by value. |
469 | using ValueParamT = |
470 | typename std::conditional<TakesParamByValue, T, const T &>::type; |
471 | |
472 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
473 | |
474 | // No need to do a destroy loop for POD's. |
475 | static void destroy_range(T *, T *) {} |
476 | |
477 | /// Move the range [I, E) onto the uninitialized memory |
478 | /// starting with "Dest", constructing elements into it as needed. |
479 | template<typename It1, typename It2> |
480 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
481 | // Just do a copy. |
482 | uninitialized_copy(I, E, Dest); |
483 | } |
484 | |
485 | /// Copy the range [I, E) onto the uninitialized memory |
486 | /// starting with "Dest", constructing elements into it as needed. |
487 | template<typename It1, typename It2> |
488 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
489 | // Arbitrary iterator types; just use the basic implementation. |
490 | std::uninitialized_copy(I, E, Dest); |
491 | } |
492 | |
493 | /// Copy the range [I, E) onto the uninitialized memory |
494 | /// starting with "Dest", constructing elements into it as needed. |
495 | template <typename T1, typename T2> |
496 | static void uninitialized_copy( |
497 | T1 *I, T1 *E, T2 *Dest, |
498 | std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, |
499 | T2>::value> * = nullptr) { |
500 | // Use memcpy for PODs iterated by pointers (which includes SmallVector |
501 | // iterators): std::uninitialized_copy optimizes to memmove, but we can |
502 | // use memcpy here. Note that I and E are iterators and thus might be |
503 | // invalid for memcpy if they are equal. |
504 | if (I != E) |
505 | memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T)); |
506 | } |
507 | |
508 | /// Double the size of the allocated memory, guaranteeing space for at |
509 | /// least one more element or MinSize if specified. |
510 | void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } |
511 | |
512 | /// Reserve enough space to add one element, and return the updated element |
513 | /// pointer in case it was a reference to the storage. |
514 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
515 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
516 | } |
517 | |
518 | /// Reserve enough space to add one element, and return the updated element |
519 | /// pointer in case it was a reference to the storage. |
520 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
521 | return const_cast<T *>( |
522 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
523 | } |
524 | |
525 | /// Copy \p V or return a reference, depending on \a ValueParamT. |
526 | static ValueParamT forward_value_param(ValueParamT V) { return V; } |
527 | |
528 | void growAndAssign(size_t NumElts, T Elt) { |
529 | // Elt has been copied in case it's an internal reference, side-stepping |
530 | // reference invalidation problems without losing the realloc optimization. |
531 | this->set_size(0); |
532 | this->grow(NumElts); |
533 | std::uninitialized_fill_n(this->begin(), NumElts, Elt); |
534 | this->set_size(NumElts); |
535 | } |
536 | |
537 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
538 | // Use push_back with a copy in case Args has an internal reference, |
539 | // side-stepping reference invalidation problems without losing the realloc |
540 | // optimization. |
541 | push_back(T(std::forward<ArgTypes>(Args)...)); |
542 | return this->back(); |
543 | } |
544 | |
545 | public: |
546 | void push_back(ValueParamT Elt) { |
547 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
548 | memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T)); |
549 | this->set_size(this->size() + 1); |
550 | } |
551 | |
552 | void pop_back() { this->set_size(this->size() - 1); } |
553 | }; |
554 | |
555 | /// This class consists of common code factored out of the SmallVector class to |
556 | /// reduce code duplication based on the SmallVector 'N' template parameter. |
557 | template <typename T> |
558 | class SmallVectorImpl : public SmallVectorTemplateBase<T> { |
559 | using SuperClass = SmallVectorTemplateBase<T>; |
560 | |
561 | public: |
562 | using iterator = typename SuperClass::iterator; |
563 | using const_iterator = typename SuperClass::const_iterator; |
564 | using reference = typename SuperClass::reference; |
565 | using size_type = typename SuperClass::size_type; |
566 | |
567 | protected: |
568 | using SmallVectorTemplateBase<T>::TakesParamByValue; |
569 | using ValueParamT = typename SuperClass::ValueParamT; |
570 | |
571 | // Default ctor - Initialize to empty. |
572 | explicit SmallVectorImpl(unsigned N) |
573 | : SmallVectorTemplateBase<T>(N) {} |
574 | |
575 | public: |
576 | SmallVectorImpl(const SmallVectorImpl &) = delete; |
577 | |
578 | ~SmallVectorImpl() { |
579 | // Subclass has already destructed this vector's elements. |
580 | // If this wasn't grown from the inline copy, deallocate the old space. |
581 | if (!this->isSmall()) |
582 | free(this->begin()); |
583 | } |
584 | |
585 | void clear() { |
586 | this->destroy_range(this->begin(), this->end()); |
587 | this->Size = 0; |
588 | } |
589 | |
590 | private: |
591 | template <bool ForOverwrite> void resizeImpl(size_type N) { |
592 | if (N < this->size()) { |
593 | this->pop_back_n(this->size() - N); |
594 | } else if (N > this->size()) { |
595 | this->reserve(N); |
596 | for (auto I = this->end(), E = this->begin() + N; I != E; ++I) |
597 | if (ForOverwrite) |
598 | new (&*I) T; |
599 | else |
600 | new (&*I) T(); |
601 | this->set_size(N); |
602 | } |
603 | } |
604 | |
605 | public: |
606 | void resize(size_type N) { resizeImpl<false>(N); } |
607 | |
608 | /// Like resize, but \ref T is POD, the new values won't be initialized. |
609 | void resize_for_overwrite(size_type N) { resizeImpl<true>(N); } |
610 | |
611 | void resize(size_type N, ValueParamT NV) { |
612 | if (N == this->size()) |
613 | return; |
614 | |
615 | if (N < this->size()) { |
616 | this->pop_back_n(this->size() - N); |
617 | return; |
618 | } |
619 | |
620 | // N > this->size(). Defer to append. |
621 | this->append(N - this->size(), NV); |
622 | } |
623 | |
624 | void reserve(size_type N) { |
625 | if (this->capacity() < N) |
626 | this->grow(N); |
627 | } |
628 | |
629 | void pop_back_n(size_type NumItems) { |
630 | assert(this->size() >= NumItems)((this->size() >= NumItems) ? static_cast<void> ( 0) : __assert_fail ("this->size() >= NumItems", "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 630, __PRETTY_FUNCTION__)); |
631 | this->destroy_range(this->end() - NumItems, this->end()); |
632 | this->set_size(this->size() - NumItems); |
633 | } |
634 | |
635 | LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() { |
636 | T Result = ::std::move(this->back()); |
637 | this->pop_back(); |
638 | return Result; |
639 | } |
640 | |
641 | void swap(SmallVectorImpl &RHS); |
642 | |
643 | /// Add the specified range to the end of the SmallVector. |
644 | template <typename in_iter, |
645 | typename = std::enable_if_t<std::is_convertible< |
646 | typename std::iterator_traits<in_iter>::iterator_category, |
647 | std::input_iterator_tag>::value>> |
648 | void append(in_iter in_start, in_iter in_end) { |
649 | this->assertSafeToAddRange(in_start, in_end); |
650 | size_type NumInputs = std::distance(in_start, in_end); |
651 | this->reserve(this->size() + NumInputs); |
652 | this->uninitialized_copy(in_start, in_end, this->end()); |
653 | this->set_size(this->size() + NumInputs); |
654 | } |
655 | |
656 | /// Append \p NumInputs copies of \p Elt to the end. |
657 | void append(size_type NumInputs, ValueParamT Elt) { |
658 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); |
659 | std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); |
660 | this->set_size(this->size() + NumInputs); |
661 | } |
662 | |
663 | void append(std::initializer_list<T> IL) { |
664 | append(IL.begin(), IL.end()); |
665 | } |
666 | |
667 | void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); } |
668 | |
669 | void assign(size_type NumElts, ValueParamT Elt) { |
670 | // Note that Elt could be an internal reference. |
671 | if (NumElts > this->capacity()) { |
672 | this->growAndAssign(NumElts, Elt); |
673 | return; |
674 | } |
675 | |
676 | // Assign over existing elements. |
677 | std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); |
678 | if (NumElts > this->size()) |
679 | std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); |
680 | else if (NumElts < this->size()) |
681 | this->destroy_range(this->begin() + NumElts, this->end()); |
682 | this->set_size(NumElts); |
683 | } |
684 | |
685 | // FIXME: Consider assigning over existing elements, rather than clearing & |
686 | // re-initializing them - for all assign(...) variants. |
687 | |
688 | template <typename in_iter, |
689 | typename = std::enable_if_t<std::is_convertible< |
690 | typename std::iterator_traits<in_iter>::iterator_category, |
691 | std::input_iterator_tag>::value>> |
692 | void assign(in_iter in_start, in_iter in_end) { |
693 | this->assertSafeToReferenceAfterClear(in_start, in_end); |
694 | clear(); |
695 | append(in_start, in_end); |
696 | } |
697 | |
698 | void assign(std::initializer_list<T> IL) { |
699 | clear(); |
700 | append(IL); |
701 | } |
702 | |
703 | void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); } |
704 | |
705 | iterator erase(const_iterator CI) { |
706 | // Just cast away constness because this is a non-const member function. |
707 | iterator I = const_cast<iterator>(CI); |
708 | |
709 | assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")((this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 709, __PRETTY_FUNCTION__)); |
710 | |
711 | iterator N = I; |
712 | // Shift all elts down one. |
713 | std::move(I+1, this->end(), I); |
714 | // Drop the last elt. |
715 | this->pop_back(); |
716 | return(N); |
717 | } |
718 | |
719 | iterator erase(const_iterator CS, const_iterator CE) { |
720 | // Just cast away constness because this is a non-const member function. |
721 | iterator S = const_cast<iterator>(CS); |
722 | iterator E = const_cast<iterator>(CE); |
723 | |
724 | assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")((this->isRangeInStorage(S, E) && "Range to erase is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 724, __PRETTY_FUNCTION__)); |
725 | |
726 | iterator N = S; |
727 | // Shift all elts down. |
728 | iterator I = std::move(E, this->end(), S); |
729 | // Drop the last elts. |
730 | this->destroy_range(I, this->end()); |
731 | this->set_size(I - this->begin()); |
732 | return(N); |
733 | } |
734 | |
735 | private: |
736 | template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) { |
737 | // Callers ensure that ArgType is derived from T. |
738 | static_assert( |
739 | std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, |
740 | T>::value, |
741 | "ArgType must be derived from T!"); |
742 | |
743 | if (I == this->end()) { // Important special case for empty vector. |
744 | this->push_back(::std::forward<ArgType>(Elt)); |
745 | return this->end()-1; |
746 | } |
747 | |
748 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((this->isReferenceToStorage(I) && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 748, __PRETTY_FUNCTION__)); |
749 | |
750 | // Grow if necessary. |
751 | size_t Index = I - this->begin(); |
752 | std::remove_reference_t<ArgType> *EltPtr = |
753 | this->reserveForParamAndGetAddress(Elt); |
754 | I = this->begin() + Index; |
755 | |
756 | ::new ((void*) this->end()) T(::std::move(this->back())); |
757 | // Push everything else over. |
758 | std::move_backward(I, this->end()-1, this->end()); |
759 | this->set_size(this->size() + 1); |
760 | |
761 | // If we just moved the element we're inserting, be sure to update |
762 | // the reference (never happens if TakesParamByValue). |
763 | static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value, |
764 | "ArgType must be 'T' when taking by value!"); |
765 | if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) |
766 | ++EltPtr; |
767 | |
768 | *I = ::std::forward<ArgType>(*EltPtr); |
769 | return I; |
770 | } |
771 | |
772 | public: |
773 | iterator insert(iterator I, T &&Elt) { |
774 | return insert_one_impl(I, this->forward_value_param(std::move(Elt))); |
775 | } |
776 | |
777 | iterator insert(iterator I, const T &Elt) { |
778 | return insert_one_impl(I, this->forward_value_param(Elt)); |
779 | } |
780 | |
781 | iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { |
782 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
783 | size_t InsertElt = I - this->begin(); |
784 | |
785 | if (I == this->end()) { // Important special case for empty vector. |
786 | append(NumToInsert, Elt); |
787 | return this->begin()+InsertElt; |
788 | } |
789 | |
790 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((this->isReferenceToStorage(I) && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 790, __PRETTY_FUNCTION__)); |
791 | |
792 | // Ensure there is enough space, and get the (maybe updated) address of |
793 | // Elt. |
794 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); |
795 | |
796 | // Uninvalidate the iterator. |
797 | I = this->begin()+InsertElt; |
798 | |
799 | // If there are more elements between the insertion point and the end of the |
800 | // range than there are being inserted, we can use a simple approach to |
801 | // insertion. Since we already reserved space, we know that this won't |
802 | // reallocate the vector. |
803 | if (size_t(this->end()-I) >= NumToInsert) { |
804 | T *OldEnd = this->end(); |
805 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
806 | std::move_iterator<iterator>(this->end())); |
807 | |
808 | // Copy the existing elements that get replaced. |
809 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
810 | |
811 | // If we just moved the element we're inserting, be sure to update |
812 | // the reference (never happens if TakesParamByValue). |
813 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
814 | EltPtr += NumToInsert; |
815 | |
816 | std::fill_n(I, NumToInsert, *EltPtr); |
817 | return I; |
818 | } |
819 | |
820 | // Otherwise, we're inserting more elements than exist already, and we're |
821 | // not inserting at the end. |
822 | |
823 | // Move over the elements that we're about to overwrite. |
824 | T *OldEnd = this->end(); |
825 | this->set_size(this->size() + NumToInsert); |
826 | size_t NumOverwritten = OldEnd-I; |
827 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
828 | |
829 | // If we just moved the element we're inserting, be sure to update |
830 | // the reference (never happens if TakesParamByValue). |
831 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
832 | EltPtr += NumToInsert; |
833 | |
834 | // Replace the overwritten part. |
835 | std::fill_n(I, NumOverwritten, *EltPtr); |
836 | |
837 | // Insert the non-overwritten middle part. |
838 | std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); |
839 | return I; |
840 | } |
841 | |
842 | template <typename ItTy, |
843 | typename = std::enable_if_t<std::is_convertible< |
844 | typename std::iterator_traits<ItTy>::iterator_category, |
845 | std::input_iterator_tag>::value>> |
846 | iterator insert(iterator I, ItTy From, ItTy To) { |
847 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
848 | size_t InsertElt = I - this->begin(); |
849 | |
850 | if (I == this->end()) { // Important special case for empty vector. |
851 | append(From, To); |
852 | return this->begin()+InsertElt; |
853 | } |
854 | |
855 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")((this->isReferenceToStorage(I) && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-13~++20210314100619+a28facba1ccd/llvm/include/llvm/ADT/SmallVector.h" , 855, __PRETTY_FUNCTION__)); |
856 | |
857 | // Check that the reserve that follows doesn't invalidate the iterators. |
858 | this->assertSafeToAddRange(From, To); |
859 | |
860 | size_t NumToInsert = std::distance(From, To); |
861 | |
862 | // Ensure there is enough space. |
863 | reserve(this->size() + NumToInsert); |
864 | |
865 | // Uninvalidate the iterator. |
866 | I = this->begin()+InsertElt; |
867 | |
868 | // If there are more elements between the insertion point and the end of the |
869 | // range than there are being inserted, we can use a simple approach to |
870 | // insertion. Since we already reserved space, we know that this won't |
871 | // reallocate the vector. |
872 | if (size_t(this->end()-I) >= NumToInsert) { |
873 | T *OldEnd = this->end(); |
874 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
875 | std::move_iterator<iterator>(this->end())); |
876 | |
877 | // Copy the existing elements that get replaced. |
878 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
879 | |
880 | std::copy(From, To, I); |
881 | return I; |
882 | } |
883 | |
884 | // Otherwise, we're inserting more elements than exist already, and we're |
885 | // not inserting at the end. |
886 | |
887 | // Move over the elements that we're about to overwrite. |
888 | T *OldEnd = this->end(); |
889 | this->set_size(this->size() + NumToInsert); |
890 | size_t NumOverwritten = OldEnd-I; |
891 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
892 | |
893 | // Replace the overwritten part. |
894 | for (T *J = I; NumOverwritten > 0; --NumOverwritten) { |
895 | *J = *From; |
896 | ++J; ++From; |
897 | } |
898 | |
899 | // Insert the non-overwritten middle part. |
900 | this->uninitialized_copy(From, To, OldEnd); |
901 | return I; |
902 | } |
903 | |
904 | void insert(iterator I, std::initializer_list<T> IL) { |
905 | insert(I, IL.begin(), IL.end()); |
906 | } |
907 | |
908 | template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) { |
909 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
910 | return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...); |
911 | |
912 | ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...); |
913 | this->set_size(this->size() + 1); |
914 | return this->back(); |
915 | } |
916 | |
917 | SmallVectorImpl &operator=(const SmallVectorImpl &RHS); |
918 | |
919 | SmallVectorImpl &operator=(SmallVectorImpl &&RHS); |
920 | |
921 | bool operator==(const SmallVectorImpl &RHS) const { |
922 | if (this->size() != RHS.size()) return false; |
923 | return std::equal(this->begin(), this->end(), RHS.begin()); |
924 | } |
925 | bool operator!=(const SmallVectorImpl &RHS) const { |
926 | return !(*this == RHS); |
927 | } |
928 | |
929 | bool operator<(const SmallVectorImpl &RHS) const { |
930 | return std::lexicographical_compare(this->begin(), this->end(), |
931 | RHS.begin(), RHS.end()); |
932 | } |
933 | }; |
934 | |
935 | template <typename T> |
936 | void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { |
937 | if (this == &RHS) return; |
938 | |
939 | // We can only avoid copying elements if neither vector is small. |
940 | if (!this->isSmall() && !RHS.isSmall()) { |
941 | std::swap(this->BeginX, RHS.BeginX); |
942 | std::swap(this->Size, RHS.Size); |
943 | std::swap(this->Capacity, RHS.Capacity); |
944 | return; |
945 | } |
946 | this->reserve(RHS.size()); |
947 | RHS.reserve(this->size()); |
948 | |
949 | // Swap the shared elements. |
950 | size_t NumShared = this->size(); |
951 | if (NumShared > RHS.size()) NumShared = RHS.size(); |
952 | for (size_type i = 0; i != NumShared; ++i) |
953 | std::swap((*this)[i], RHS[i]); |
954 | |
955 | // Copy over the extra elts. |
956 | if (this->size() > RHS.size()) { |
957 | size_t EltDiff = this->size() - RHS.size(); |
958 | this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end()); |
959 | RHS.set_size(RHS.size() + EltDiff); |
960 | this->destroy_range(this->begin()+NumShared, this->end()); |
961 | this->set_size(NumShared); |
962 | } else if (RHS.size() > this->size()) { |
963 | size_t EltDiff = RHS.size() - this->size(); |
964 | this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end()); |
965 | this->set_size(this->size() + EltDiff); |
966 | this->destroy_range(RHS.begin()+NumShared, RHS.end()); |
967 | RHS.set_size(NumShared); |
968 | } |
969 | } |
970 | |
971 | template <typename T> |
972 | SmallVectorImpl<T> &SmallVectorImpl<T>:: |
973 | operator=(const SmallVectorImpl<T> &RHS) { |
974 | // Avoid self-assignment. |
975 | if (this == &RHS) return *this; |
976 | |
977 | // If we already have sufficient space, assign the common elements, then |
978 | // destroy any excess. |
979 | size_t RHSSize = RHS.size(); |
980 | size_t CurSize = this->size(); |
981 | if (CurSize >= RHSSize) { |
982 | // Assign common elements. |
983 | iterator NewEnd; |
984 | if (RHSSize) |
985 | NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin()); |
986 | else |
987 | NewEnd = this->begin(); |
988 | |
989 | // Destroy excess elements. |
990 | this->destroy_range(NewEnd, this->end()); |
991 | |
992 | // Trim. |
993 | this->set_size(RHSSize); |
994 | return *this; |
995 | } |
996 | |
997 | // If we have to grow to have enough elements, destroy the current elements. |
998 | // This allows us to avoid copying them during the grow. |
999 | // FIXME: don't do this if they're efficiently moveable. |
1000 | if (this->capacity() < RHSSize) { |
1001 | // Destroy current elements. |
1002 | this->clear(); |
1003 | CurSize = 0; |
1004 | this->grow(RHSSize); |
1005 | } else if (CurSize) { |
1006 | // Otherwise, use assignment for the already-constructed elements. |
1007 | std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1008 | } |
1009 | |
1010 | // Copy construct the new elements in place. |
1011 | this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(), |
1012 | this->begin()+CurSize); |
1013 | |
1014 | // Set end. |
1015 | this->set_size(RHSSize); |
1016 | return *this; |
1017 | } |
1018 | |
1019 | template <typename T> |
1020 | SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) { |
1021 | // Avoid self-assignment. |
1022 | if (this == &RHS) return *this; |
1023 | |
1024 | // If the RHS isn't small, clear this vector and then steal its buffer. |
1025 | if (!RHS.isSmall()) { |
1026 | this->destroy_range(this->begin(), this->end()); |
1027 | if (!this->isSmall()) free(this->begin()); |
1028 | this->BeginX = RHS.BeginX; |
1029 | this->Size = RHS.Size; |
1030 | this->Capacity = RHS.Capacity; |
1031 | RHS.resetToSmall(); |
1032 | return *this; |
1033 | } |
1034 | |
1035 | // If we already have sufficient space, assign the common elements, then |
1036 | // destroy any excess. |
1037 | size_t RHSSize = RHS.size(); |
1038 | size_t CurSize = this->size(); |
1039 | if (CurSize >= RHSSize) { |
1040 | // Assign common elements. |
1041 | iterator NewEnd = this->begin(); |
1042 | if (RHSSize) |
1043 | NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); |
1044 | |
1045 | // Destroy excess elements and trim the bounds. |
1046 | this->destroy_range(NewEnd, this->end()); |
1047 | this->set_size(RHSSize); |
1048 | |
1049 | // Clear the RHS. |
1050 | RHS.clear(); |
1051 | |
1052 | return *this; |
1053 | } |
1054 | |
1055 | // If we have to grow to have enough elements, destroy the current elements. |
1056 | // This allows us to avoid copying them during the grow. |
1057 | // FIXME: this may not actually make any sense if we can efficiently move |
1058 | // elements. |
1059 | if (this->capacity() < RHSSize) { |
1060 | // Destroy current elements. |
1061 | this->clear(); |
1062 | CurSize = 0; |
1063 | this->grow(RHSSize); |
1064 | } else if (CurSize) { |
1065 | // Otherwise, use assignment for the already-constructed elements. |
1066 | std::move(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1067 | } |
1068 | |
1069 | // Move-construct the new elements in place. |
1070 | this->uninitialized_move(RHS.begin()+CurSize, RHS.end(), |
1071 | this->begin()+CurSize); |
1072 | |
1073 | // Set end. |
1074 | this->set_size(RHSSize); |
1075 | |
1076 | RHS.clear(); |
1077 | return *this; |
1078 | } |
1079 | |
1080 | /// Storage for the SmallVector elements. This is specialized for the N=0 case |
1081 | /// to avoid allocating unnecessary storage. |
1082 | template <typename T, unsigned N> |
1083 | struct SmallVectorStorage { |
1084 | alignas(T) char InlineElts[N * sizeof(T)]; |
1085 | }; |
1086 | |
1087 | /// We need the storage to be properly aligned even for small-size of 0 so that |
1088 | /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is |
1089 | /// well-defined. |
1090 | template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {}; |
1091 | |
1092 | /// Forward declaration of SmallVector so that |
1093 | /// calculateSmallVectorDefaultInlinedElements can reference |
1094 | /// `sizeof(SmallVector<T, 0>)`. |
1095 | template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector; |
1096 | |
1097 | /// Helper class for calculating the default number of inline elements for |
1098 | /// `SmallVector<T>`. |
1099 | /// |
1100 | /// This should be migrated to a constexpr function when our minimum |
1101 | /// compiler support is enough for multi-statement constexpr functions. |
1102 | template <typename T> struct CalculateSmallVectorDefaultInlinedElements { |
1103 | // Parameter controlling the default number of inlined elements |
1104 | // for `SmallVector<T>`. |
1105 | // |
1106 | // The default number of inlined elements ensures that |
1107 | // 1. There is at least one inlined element. |
1108 | // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless |
1109 | // it contradicts 1. |
1110 | static constexpr size_t kPreferredSmallVectorSizeof = 64; |
1111 | |
1112 | // static_assert that sizeof(T) is not "too big". |
1113 | // |
1114 | // Because our policy guarantees at least one inlined element, it is possible |
1115 | // for an arbitrarily large inlined element to allocate an arbitrarily large |
1116 | // amount of inline storage. We generally consider it an antipattern for a |
1117 | // SmallVector to allocate an excessive amount of inline storage, so we want |
1118 | // to call attention to these cases and make sure that users are making an |
1119 | // intentional decision if they request a lot of inline storage. |
1120 | // |
1121 | // We want this assertion to trigger in pathological cases, but otherwise |
1122 | // not be too easy to hit. To accomplish that, the cutoff is actually somewhat |
1123 | // larger than kPreferredSmallVectorSizeof (otherwise, |
1124 | // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that |
1125 | // pattern seems useful in practice). |
1126 | // |
1127 | // One wrinkle is that this assertion is in theory non-portable, since |
1128 | // sizeof(T) is in general platform-dependent. However, we don't expect this |
1129 | // to be much of an issue, because most LLVM development happens on 64-bit |
1130 | // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for |
1131 | // 32-bit hosts, dodging the issue. The reverse situation, where development |
1132 | // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a |
1133 | // 64-bit host, is expected to be very rare. |
1134 | static_assert( |
1135 | sizeof(T) <= 256, |
1136 | "You are trying to use a default number of inlined elements for " |
1137 | "`SmallVector<T>` but `sizeof(T)` is really big! Please use an " |
1138 | "explicit number of inlined elements with `SmallVector<T, N>` to make " |
1139 | "sure you really want that much inline storage."); |
1140 | |
1141 | // Discount the size of the header itself when calculating the maximum inline |
1142 | // bytes. |
1143 | static constexpr size_t PreferredInlineBytes = |
1144 | kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>); |
1145 | static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); |
1146 | static constexpr size_t value = |
1147 | NumElementsThatFit == 0 ? 1 : NumElementsThatFit; |
1148 | }; |
1149 | |
1150 | /// This is a 'vector' (really, a variable-sized array), optimized |
1151 | /// for the case when the array is small. It contains some number of elements |
1152 | /// in-place, which allows it to avoid heap allocation when the actual number of |
1153 | /// elements is below that threshold. This allows normal "small" cases to be |
1154 | /// fast without losing generality for large inputs. |
1155 | /// |
1156 | /// \note |
1157 | /// In the absence of a well-motivated choice for the number of inlined |
1158 | /// elements \p N, it is recommended to use \c SmallVector<T> (that is, |
1159 | /// omitting the \p N). This will choose a default number of inlined elements |
1160 | /// reasonable for allocation on the stack (for example, trying to keep \c |
1161 | /// sizeof(SmallVector<T>) around 64 bytes). |
1162 | /// |
1163 | /// \warning This does not attempt to be exception safe. |
1164 | /// |
1165 | /// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h |
1166 | template <typename T, |
1167 | unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value> |
1168 | class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>, |
1169 | SmallVectorStorage<T, N> { |
1170 | public: |
1171 | SmallVector() : SmallVectorImpl<T>(N) {} |
1172 | |
1173 | ~SmallVector() { |
1174 | // Destroy the constructed elements in the vector. |
1175 | this->destroy_range(this->begin(), this->end()); |
1176 | } |
1177 | |
1178 | explicit SmallVector(size_t Size, const T &Value = T()) |
1179 | : SmallVectorImpl<T>(N) { |
1180 | this->assign(Size, Value); |
1181 | } |
1182 | |
1183 | template <typename ItTy, |
1184 | typename = std::enable_if_t<std::is_convertible< |
1185 | typename std::iterator_traits<ItTy>::iterator_category, |
1186 | std::input_iterator_tag>::value>> |
1187 | SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { |
1188 | this->append(S, E); |
1189 | } |
1190 | |
1191 | template <typename RangeTy> |
1192 | explicit SmallVector(const iterator_range<RangeTy> &R) |
1193 | : SmallVectorImpl<T>(N) { |
1194 | this->append(R.begin(), R.end()); |
1195 | } |
1196 | |
1197 | SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) { |
1198 | this->assign(IL); |
1199 | } |
1200 | |
1201 | SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) { |
1202 | if (!RHS.empty()) |
1203 | SmallVectorImpl<T>::operator=(RHS); |
1204 | } |
1205 | |
1206 | SmallVector &operator=(const SmallVector &RHS) { |
1207 | SmallVectorImpl<T>::operator=(RHS); |
1208 | return *this; |
1209 | } |
1210 | |
1211 | SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) { |
1212 | if (!RHS.empty()) |
1213 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1214 | } |
1215 | |
1216 | SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) { |
1217 | if (!RHS.empty()) |
1218 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1219 | } |
1220 | |
1221 | SmallVector &operator=(SmallVector &&RHS) { |
1222 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1223 | return *this; |
1224 | } |
1225 | |
1226 | SmallVector &operator=(SmallVectorImpl<T> &&RHS) { |
1227 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1228 | return *this; |
1229 | } |
1230 | |
1231 | SmallVector &operator=(std::initializer_list<T> IL) { |
1232 | this->assign(IL); |
1233 | return *this; |
1234 | } |
1235 | }; |
1236 | |
1237 | template <typename T, unsigned N> |
1238 | inline size_t capacity_in_bytes(const SmallVector<T, N> &X) { |
1239 | return X.capacity_in_bytes(); |
1240 | } |
1241 | |
1242 | /// Given a range of type R, iterate the entire range and return a |
1243 | /// SmallVector with elements of the vector. This is useful, for example, |
1244 | /// when you want to iterate a range and then sort the results. |
1245 | template <unsigned Size, typename R> |
1246 | SmallVector<typename std::remove_const<typename std::remove_reference< |
1247 | decltype(*std::begin(std::declval<R &>()))>::type>::type, |
1248 | Size> |
1249 | to_vector(R &&Range) { |
1250 | return {std::begin(Range), std::end(Range)}; |
1251 | } |
1252 | |
1253 | } // end namespace llvm |
1254 | |
1255 | namespace std { |
1256 | |
1257 | /// Implement std::swap in terms of SmallVector swap. |
1258 | template<typename T> |
1259 | inline void |
1260 | swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { |
1261 | LHS.swap(RHS); |
1262 | } |
1263 | |
1264 | /// Implement std::swap in terms of SmallVector swap. |
1265 | template<typename T, unsigned N> |
1266 | inline void |
1267 | swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { |
1268 | LHS.swap(RHS); |
1269 | } |
1270 | |
1271 | } // end namespace std |
1272 | |
1273 | #endif // LLVM_ADT_SMALLVECTOR_H |