File: | llvm/lib/Transforms/Utils/LoopUnroll.cpp |
Warning: | line 546, column 34 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file implements some loop unrolling utilities. It does not define any | ||||||||
10 | // actual pass or policy, but provides a single function to perform loop | ||||||||
11 | // unrolling. | ||||||||
12 | // | ||||||||
13 | // The process of unrolling can produce extraneous basic blocks linked with | ||||||||
14 | // unconditional branches. This will be corrected in the future. | ||||||||
15 | // | ||||||||
16 | //===----------------------------------------------------------------------===// | ||||||||
17 | |||||||||
18 | #include "llvm/ADT/ArrayRef.h" | ||||||||
19 | #include "llvm/ADT/DenseMap.h" | ||||||||
20 | #include "llvm/ADT/Optional.h" | ||||||||
21 | #include "llvm/ADT/STLExtras.h" | ||||||||
22 | #include "llvm/ADT/SetVector.h" | ||||||||
23 | #include "llvm/ADT/SmallVector.h" | ||||||||
24 | #include "llvm/ADT/Statistic.h" | ||||||||
25 | #include "llvm/ADT/StringRef.h" | ||||||||
26 | #include "llvm/ADT/Twine.h" | ||||||||
27 | #include "llvm/ADT/ilist_iterator.h" | ||||||||
28 | #include "llvm/ADT/iterator_range.h" | ||||||||
29 | #include "llvm/Analysis/AssumptionCache.h" | ||||||||
30 | #include "llvm/Analysis/DomTreeUpdater.h" | ||||||||
31 | #include "llvm/Analysis/InstructionSimplify.h" | ||||||||
32 | #include "llvm/Analysis/LoopInfo.h" | ||||||||
33 | #include "llvm/Analysis/LoopIterator.h" | ||||||||
34 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||||||
35 | #include "llvm/Analysis/ScalarEvolution.h" | ||||||||
36 | #include "llvm/IR/BasicBlock.h" | ||||||||
37 | #include "llvm/IR/CFG.h" | ||||||||
38 | #include "llvm/IR/CallSite.h" | ||||||||
39 | #include "llvm/IR/Constants.h" | ||||||||
40 | #include "llvm/IR/DebugInfoMetadata.h" | ||||||||
41 | #include "llvm/IR/DebugLoc.h" | ||||||||
42 | #include "llvm/IR/DiagnosticInfo.h" | ||||||||
43 | #include "llvm/IR/Dominators.h" | ||||||||
44 | #include "llvm/IR/Function.h" | ||||||||
45 | #include "llvm/IR/Instruction.h" | ||||||||
46 | #include "llvm/IR/Instructions.h" | ||||||||
47 | #include "llvm/IR/IntrinsicInst.h" | ||||||||
48 | #include "llvm/IR/Metadata.h" | ||||||||
49 | #include "llvm/IR/Module.h" | ||||||||
50 | #include "llvm/IR/Use.h" | ||||||||
51 | #include "llvm/IR/User.h" | ||||||||
52 | #include "llvm/IR/ValueHandle.h" | ||||||||
53 | #include "llvm/IR/ValueMap.h" | ||||||||
54 | #include "llvm/Support/Casting.h" | ||||||||
55 | #include "llvm/Support/CommandLine.h" | ||||||||
56 | #include "llvm/Support/Debug.h" | ||||||||
57 | #include "llvm/Support/GenericDomTree.h" | ||||||||
58 | #include "llvm/Support/MathExtras.h" | ||||||||
59 | #include "llvm/Support/raw_ostream.h" | ||||||||
60 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||||||
61 | #include "llvm/Transforms/Utils/Cloning.h" | ||||||||
62 | #include "llvm/Transforms/Utils/Local.h" | ||||||||
63 | #include "llvm/Transforms/Utils/LoopSimplify.h" | ||||||||
64 | #include "llvm/Transforms/Utils/LoopUtils.h" | ||||||||
65 | #include "llvm/Transforms/Utils/SimplifyIndVar.h" | ||||||||
66 | #include "llvm/Transforms/Utils/UnrollLoop.h" | ||||||||
67 | #include "llvm/Transforms/Utils/ValueMapper.h" | ||||||||
68 | #include <algorithm> | ||||||||
69 | #include <assert.h> | ||||||||
70 | #include <type_traits> | ||||||||
71 | #include <vector> | ||||||||
72 | |||||||||
73 | namespace llvm { | ||||||||
74 | class DataLayout; | ||||||||
75 | class Value; | ||||||||
76 | } // namespace llvm | ||||||||
77 | |||||||||
78 | using namespace llvm; | ||||||||
79 | |||||||||
80 | #define DEBUG_TYPE"loop-unroll" "loop-unroll" | ||||||||
81 | |||||||||
82 | // TODO: Should these be here or in LoopUnroll? | ||||||||
83 | STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled")static llvm::Statistic NumCompletelyUnrolled = {"loop-unroll" , "NumCompletelyUnrolled", "Number of loops completely unrolled" }; | ||||||||
84 | STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)")static llvm::Statistic NumUnrolled = {"loop-unroll", "NumUnrolled" , "Number of loops unrolled (completely or otherwise)"}; | ||||||||
85 | STATISTIC(NumUnrolledWithHeader, "Number of loops unrolled without a "static llvm::Statistic NumUnrolledWithHeader = {"loop-unroll" , "NumUnrolledWithHeader", "Number of loops unrolled without a " "conditional latch (completely or otherwise)"} | ||||||||
86 | "conditional latch (completely or otherwise)")static llvm::Statistic NumUnrolledWithHeader = {"loop-unroll" , "NumUnrolledWithHeader", "Number of loops unrolled without a " "conditional latch (completely or otherwise)"}; | ||||||||
87 | |||||||||
88 | static cl::opt<bool> | ||||||||
89 | UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden, | ||||||||
90 | cl::desc("Allow runtime unrolled loops to be unrolled " | ||||||||
91 | "with epilog instead of prolog.")); | ||||||||
92 | |||||||||
93 | static cl::opt<bool> | ||||||||
94 | UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden, | ||||||||
95 | cl::desc("Verify domtree after unrolling"), | ||||||||
96 | #ifdef EXPENSIVE_CHECKS | ||||||||
97 | cl::init(true) | ||||||||
98 | #else | ||||||||
99 | cl::init(false) | ||||||||
100 | #endif | ||||||||
101 | ); | ||||||||
102 | |||||||||
103 | /// Check if unrolling created a situation where we need to insert phi nodes to | ||||||||
104 | /// preserve LCSSA form. | ||||||||
105 | /// \param Blocks is a vector of basic blocks representing unrolled loop. | ||||||||
106 | /// \param L is the outer loop. | ||||||||
107 | /// It's possible that some of the blocks are in L, and some are not. In this | ||||||||
108 | /// case, if there is a use is outside L, and definition is inside L, we need to | ||||||||
109 | /// insert a phi-node, otherwise LCSSA will be broken. | ||||||||
110 | /// The function is just a helper function for llvm::UnrollLoop that returns | ||||||||
111 | /// true if this situation occurs, indicating that LCSSA needs to be fixed. | ||||||||
112 | static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks, | ||||||||
113 | LoopInfo *LI) { | ||||||||
114 | for (BasicBlock *BB : Blocks) { | ||||||||
115 | if (LI->getLoopFor(BB) == L) | ||||||||
116 | continue; | ||||||||
117 | for (Instruction &I : *BB) { | ||||||||
118 | for (Use &U : I.operands()) { | ||||||||
119 | if (auto Def = dyn_cast<Instruction>(U)) { | ||||||||
120 | Loop *DefLoop = LI->getLoopFor(Def->getParent()); | ||||||||
121 | if (!DefLoop) | ||||||||
122 | continue; | ||||||||
123 | if (DefLoop->contains(L)) | ||||||||
124 | return true; | ||||||||
125 | } | ||||||||
126 | } | ||||||||
127 | } | ||||||||
128 | } | ||||||||
129 | return false; | ||||||||
130 | } | ||||||||
131 | |||||||||
132 | /// Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary | ||||||||
133 | /// and adds a mapping from the original loop to the new loop to NewLoops. | ||||||||
134 | /// Returns nullptr if no new loop was created and a pointer to the | ||||||||
135 | /// original loop OriginalBB was part of otherwise. | ||||||||
136 | const Loop* llvm::addClonedBlockToLoopInfo(BasicBlock *OriginalBB, | ||||||||
137 | BasicBlock *ClonedBB, LoopInfo *LI, | ||||||||
138 | NewLoopsMap &NewLoops) { | ||||||||
139 | // Figure out which loop New is in. | ||||||||
140 | const Loop *OldLoop = LI->getLoopFor(OriginalBB); | ||||||||
141 | assert(OldLoop && "Should (at least) be in the loop being unrolled!")((OldLoop && "Should (at least) be in the loop being unrolled!" ) ? static_cast<void> (0) : __assert_fail ("OldLoop && \"Should (at least) be in the loop being unrolled!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 141, __PRETTY_FUNCTION__)); | ||||||||
142 | |||||||||
143 | Loop *&NewLoop = NewLoops[OldLoop]; | ||||||||
144 | if (!NewLoop) { | ||||||||
145 | // Found a new sub-loop. | ||||||||
146 | assert(OriginalBB == OldLoop->getHeader() &&((OriginalBB == OldLoop->getHeader() && "Header should be first in RPO" ) ? static_cast<void> (0) : __assert_fail ("OriginalBB == OldLoop->getHeader() && \"Header should be first in RPO\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 147, __PRETTY_FUNCTION__)) | ||||||||
147 | "Header should be first in RPO")((OriginalBB == OldLoop->getHeader() && "Header should be first in RPO" ) ? static_cast<void> (0) : __assert_fail ("OriginalBB == OldLoop->getHeader() && \"Header should be first in RPO\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 147, __PRETTY_FUNCTION__)); | ||||||||
148 | |||||||||
149 | NewLoop = LI->AllocateLoop(); | ||||||||
150 | Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop()); | ||||||||
151 | |||||||||
152 | if (NewLoopParent) | ||||||||
153 | NewLoopParent->addChildLoop(NewLoop); | ||||||||
154 | else | ||||||||
155 | LI->addTopLevelLoop(NewLoop); | ||||||||
156 | |||||||||
157 | NewLoop->addBasicBlockToLoop(ClonedBB, *LI); | ||||||||
158 | return OldLoop; | ||||||||
159 | } else { | ||||||||
160 | NewLoop->addBasicBlockToLoop(ClonedBB, *LI); | ||||||||
161 | return nullptr; | ||||||||
162 | } | ||||||||
163 | } | ||||||||
164 | |||||||||
165 | /// The function chooses which type of unroll (epilog or prolog) is more | ||||||||
166 | /// profitabale. | ||||||||
167 | /// Epilog unroll is more profitable when there is PHI that starts from | ||||||||
168 | /// constant. In this case epilog will leave PHI start from constant, | ||||||||
169 | /// but prolog will convert it to non-constant. | ||||||||
170 | /// | ||||||||
171 | /// loop: | ||||||||
172 | /// PN = PHI [I, Latch], [CI, PreHeader] | ||||||||
173 | /// I = foo(PN) | ||||||||
174 | /// ... | ||||||||
175 | /// | ||||||||
176 | /// Epilog unroll case. | ||||||||
177 | /// loop: | ||||||||
178 | /// PN = PHI [I2, Latch], [CI, PreHeader] | ||||||||
179 | /// I1 = foo(PN) | ||||||||
180 | /// I2 = foo(I1) | ||||||||
181 | /// ... | ||||||||
182 | /// Prolog unroll case. | ||||||||
183 | /// NewPN = PHI [PrologI, Prolog], [CI, PreHeader] | ||||||||
184 | /// loop: | ||||||||
185 | /// PN = PHI [I2, Latch], [NewPN, PreHeader] | ||||||||
186 | /// I1 = foo(PN) | ||||||||
187 | /// I2 = foo(I1) | ||||||||
188 | /// ... | ||||||||
189 | /// | ||||||||
190 | static bool isEpilogProfitable(Loop *L) { | ||||||||
191 | BasicBlock *PreHeader = L->getLoopPreheader(); | ||||||||
192 | BasicBlock *Header = L->getHeader(); | ||||||||
193 | assert(PreHeader && Header)((PreHeader && Header) ? static_cast<void> (0) : __assert_fail ("PreHeader && Header", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 193, __PRETTY_FUNCTION__)); | ||||||||
194 | for (const PHINode &PN : Header->phis()) { | ||||||||
195 | if (isa<ConstantInt>(PN.getIncomingValueForBlock(PreHeader))) | ||||||||
196 | return true; | ||||||||
197 | } | ||||||||
198 | return false; | ||||||||
199 | } | ||||||||
200 | |||||||||
201 | /// Perform some cleanup and simplifications on loops after unrolling. It is | ||||||||
202 | /// useful to simplify the IV's in the new loop, as well as do a quick | ||||||||
203 | /// simplify/dce pass of the instructions. | ||||||||
204 | void llvm::simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI, | ||||||||
205 | ScalarEvolution *SE, DominatorTree *DT, | ||||||||
206 | AssumptionCache *AC, | ||||||||
207 | const TargetTransformInfo *TTI) { | ||||||||
208 | // Simplify any new induction variables in the partially unrolled loop. | ||||||||
209 | if (SE && SimplifyIVs) { | ||||||||
210 | SmallVector<WeakTrackingVH, 16> DeadInsts; | ||||||||
211 | simplifyLoopIVs(L, SE, DT, LI, TTI, DeadInsts); | ||||||||
212 | |||||||||
213 | // Aggressively clean up dead instructions that simplifyLoopIVs already | ||||||||
214 | // identified. Any remaining should be cleaned up below. | ||||||||
215 | while (!DeadInsts.empty()) { | ||||||||
216 | Value *V = DeadInsts.pop_back_val(); | ||||||||
217 | if (Instruction *Inst = dyn_cast_or_null<Instruction>(V)) | ||||||||
218 | RecursivelyDeleteTriviallyDeadInstructions(Inst); | ||||||||
219 | } | ||||||||
220 | } | ||||||||
221 | |||||||||
222 | // At this point, the code is well formed. We now do a quick sweep over the | ||||||||
223 | // inserted code, doing constant propagation and dead code elimination as we | ||||||||
224 | // go. | ||||||||
225 | const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); | ||||||||
226 | for (BasicBlock *BB : L->getBlocks()) { | ||||||||
227 | for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { | ||||||||
228 | Instruction *Inst = &*I++; | ||||||||
229 | |||||||||
230 | if (Value *V = SimplifyInstruction(Inst, {DL, nullptr, DT, AC})) | ||||||||
231 | if (LI->replacementPreservesLCSSAForm(Inst, V)) | ||||||||
232 | Inst->replaceAllUsesWith(V); | ||||||||
233 | if (isInstructionTriviallyDead(Inst)) | ||||||||
234 | BB->getInstList().erase(Inst); | ||||||||
235 | } | ||||||||
236 | } | ||||||||
237 | |||||||||
238 | // TODO: after peeling or unrolling, previously loop variant conditions are | ||||||||
239 | // likely to fold to constants, eagerly propagating those here will require | ||||||||
240 | // fewer cleanup passes to be run. Alternatively, a LoopEarlyCSE might be | ||||||||
241 | // appropriate. | ||||||||
242 | } | ||||||||
243 | |||||||||
244 | /// Unroll the given loop by Count. The loop must be in LCSSA form. Unrolling | ||||||||
245 | /// can only fail when the loop's latch block is not terminated by a conditional | ||||||||
246 | /// branch instruction. However, if the trip count (and multiple) are not known, | ||||||||
247 | /// loop unrolling will mostly produce more code that is no faster. | ||||||||
248 | /// | ||||||||
249 | /// TripCount is the upper bound of the iteration on which control exits | ||||||||
250 | /// LatchBlock. Control may exit the loop prior to TripCount iterations either | ||||||||
251 | /// via an early branch in other loop block or via LatchBlock terminator. This | ||||||||
252 | /// is relaxed from the general definition of trip count which is the number of | ||||||||
253 | /// times the loop header executes. Note that UnrollLoop assumes that the loop | ||||||||
254 | /// counter test is in LatchBlock in order to remove unnecesssary instances of | ||||||||
255 | /// the test. If control can exit the loop from the LatchBlock's terminator | ||||||||
256 | /// prior to TripCount iterations, flag PreserveCondBr needs to be set. | ||||||||
257 | /// | ||||||||
258 | /// PreserveCondBr indicates whether the conditional branch of the LatchBlock | ||||||||
259 | /// needs to be preserved. It is needed when we use trip count upper bound to | ||||||||
260 | /// fully unroll the loop. If PreserveOnlyFirst is also set then only the first | ||||||||
261 | /// conditional branch needs to be preserved. | ||||||||
262 | /// | ||||||||
263 | /// Similarly, TripMultiple divides the number of times that the LatchBlock may | ||||||||
264 | /// execute without exiting the loop. | ||||||||
265 | /// | ||||||||
266 | /// If AllowRuntime is true then UnrollLoop will consider unrolling loops that | ||||||||
267 | /// have a runtime (i.e. not compile time constant) trip count. Unrolling these | ||||||||
268 | /// loops require a unroll "prologue" that runs "RuntimeTripCount % Count" | ||||||||
269 | /// iterations before branching into the unrolled loop. UnrollLoop will not | ||||||||
270 | /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and | ||||||||
271 | /// AllowExpensiveTripCount is false. | ||||||||
272 | /// | ||||||||
273 | /// If we want to perform PGO-based loop peeling, PeelCount is set to the | ||||||||
274 | /// number of iterations we want to peel off. | ||||||||
275 | /// | ||||||||
276 | /// The LoopInfo Analysis that is passed will be kept consistent. | ||||||||
277 | /// | ||||||||
278 | /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and | ||||||||
279 | /// DominatorTree if they are non-null. | ||||||||
280 | /// | ||||||||
281 | /// If RemainderLoop is non-null, it will receive the remainder loop (if | ||||||||
282 | /// required and not fully unrolled). | ||||||||
283 | LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, | ||||||||
284 | ScalarEvolution *SE, DominatorTree *DT, | ||||||||
285 | AssumptionCache *AC, | ||||||||
286 | const TargetTransformInfo *TTI, | ||||||||
287 | OptimizationRemarkEmitter *ORE, | ||||||||
288 | bool PreserveLCSSA, Loop **RemainderLoop) { | ||||||||
289 | |||||||||
290 | BasicBlock *Preheader = L->getLoopPreheader(); | ||||||||
291 | if (!Preheader) { | ||||||||
| |||||||||
292 | LLVM_DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Can't unroll; loop preheader-insertion failed.\n" ; } } while (false); | ||||||||
293 | return LoopUnrollResult::Unmodified; | ||||||||
294 | } | ||||||||
295 | |||||||||
296 | BasicBlock *LatchBlock = L->getLoopLatch(); | ||||||||
297 | if (!LatchBlock) { | ||||||||
298 | LLVM_DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Can't unroll; loop exit-block-insertion failed.\n" ; } } while (false); | ||||||||
299 | return LoopUnrollResult::Unmodified; | ||||||||
300 | } | ||||||||
301 | |||||||||
302 | // Loops with indirectbr cannot be cloned. | ||||||||
303 | if (!L->isSafeToClone()) { | ||||||||
304 | LLVM_DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Can't unroll; Loop body cannot be cloned.\n" ; } } while (false); | ||||||||
305 | return LoopUnrollResult::Unmodified; | ||||||||
306 | } | ||||||||
307 | |||||||||
308 | // The current loop unroll pass can unroll loops with a single latch or header | ||||||||
309 | // that's a conditional branch exiting the loop. | ||||||||
310 | // FIXME: The implementation can be extended to work with more complicated | ||||||||
311 | // cases, e.g. loops with multiple latches. | ||||||||
312 | BasicBlock *Header = L->getHeader(); | ||||||||
313 | BranchInst *HeaderBI = dyn_cast<BranchInst>(Header->getTerminator()); | ||||||||
314 | BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator()); | ||||||||
315 | |||||||||
316 | // FIXME: Support loops without conditional latch and multiple exiting blocks. | ||||||||
317 | if (!BI
| ||||||||
318 | (BI->isUnconditional() && (!HeaderBI || HeaderBI->isUnconditional() || | ||||||||
319 | L->getExitingBlock() != Header))) { | ||||||||
320 | LLVM_DEBUG(dbgs() << " Can't unroll; loop not terminated by a conditional "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Can't unroll; loop not terminated by a conditional " "branch in the latch or header.\n"; } } while (false) | ||||||||
321 | "branch in the latch or header.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Can't unroll; loop not terminated by a conditional " "branch in the latch or header.\n"; } } while (false); | ||||||||
322 | return LoopUnrollResult::Unmodified; | ||||||||
323 | } | ||||||||
324 | |||||||||
325 | auto CheckLatchSuccessors = [&](unsigned S1, unsigned S2) { | ||||||||
326 | return BI->isConditional() && BI->getSuccessor(S1) == Header && | ||||||||
327 | !L->contains(BI->getSuccessor(S2)); | ||||||||
328 | }; | ||||||||
329 | |||||||||
330 | // If we have a conditional latch, it must exit the loop. | ||||||||
331 | if (BI
| ||||||||
332 | !CheckLatchSuccessors(1, 0)) { | ||||||||
333 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Can't unroll; a conditional latch must exit the loop" ; } } while (false) | ||||||||
334 | dbgs() << "Can't unroll; a conditional latch must exit the loop")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Can't unroll; a conditional latch must exit the loop" ; } } while (false); | ||||||||
335 | return LoopUnrollResult::Unmodified; | ||||||||
336 | } | ||||||||
337 | |||||||||
338 | auto CheckHeaderSuccessors = [&](unsigned S1, unsigned S2) { | ||||||||
339 | return HeaderBI && HeaderBI->isConditional() && | ||||||||
340 | L->contains(HeaderBI->getSuccessor(S1)) && | ||||||||
341 | !L->contains(HeaderBI->getSuccessor(S2)); | ||||||||
342 | }; | ||||||||
343 | |||||||||
344 | // If we do not have a conditional latch, the header must exit the loop. | ||||||||
345 | if (BI
| ||||||||
346 | !CheckHeaderSuccessors(0, 1) && !CheckHeaderSuccessors(1, 0)) { | ||||||||
347 | LLVM_DEBUG(dbgs() << "Can't unroll; conditional header must exit the loop")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Can't unroll; conditional header must exit the loop" ; } } while (false); | ||||||||
348 | return LoopUnrollResult::Unmodified; | ||||||||
349 | } | ||||||||
350 | |||||||||
351 | if (Header->hasAddressTaken()) { | ||||||||
352 | // The loop-rotate pass can be helpful to avoid this in many cases. | ||||||||
353 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Won't unroll loop: address of header block is taken.\n" ; } } while (false) | ||||||||
354 | dbgs() << " Won't unroll loop: address of header block is taken.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Won't unroll loop: address of header block is taken.\n" ; } } while (false); | ||||||||
355 | return LoopUnrollResult::Unmodified; | ||||||||
356 | } | ||||||||
357 | |||||||||
358 | if (ULO.TripCount != 0) | ||||||||
359 | LLVM_DEBUG(dbgs() << " Trip Count = " << ULO.TripCount << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Trip Count = " << ULO.TripCount << "\n"; } } while (false); | ||||||||
360 | if (ULO.TripMultiple != 1) | ||||||||
361 | LLVM_DEBUG(dbgs() << " Trip Multiple = " << ULO.TripMultiple << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " Trip Multiple = " << ULO.TripMultiple << "\n"; } } while (false); | ||||||||
362 | |||||||||
363 | // Effectively "DCE" unrolled iterations that are beyond the tripcount | ||||||||
364 | // and will never be executed. | ||||||||
365 | if (ULO.TripCount
| ||||||||
366 | ULO.Count = ULO.TripCount; | ||||||||
367 | |||||||||
368 | // Don't enter the unroll code if there is nothing to do. | ||||||||
369 | if (ULO.TripCount
| ||||||||
370 | LLVM_DEBUG(dbgs() << "Won't unroll; almost nothing to do\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Won't unroll; almost nothing to do\n" ; } } while (false); | ||||||||
371 | return LoopUnrollResult::Unmodified; | ||||||||
372 | } | ||||||||
373 | |||||||||
374 | assert(ULO.Count > 0)((ULO.Count > 0) ? static_cast<void> (0) : __assert_fail ("ULO.Count > 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 374, __PRETTY_FUNCTION__)); | ||||||||
375 | assert(ULO.TripMultiple > 0)((ULO.TripMultiple > 0) ? static_cast<void> (0) : __assert_fail ("ULO.TripMultiple > 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 375, __PRETTY_FUNCTION__)); | ||||||||
376 | assert(ULO.TripCount == 0 || ULO.TripCount % ULO.TripMultiple == 0)((ULO.TripCount == 0 || ULO.TripCount % ULO.TripMultiple == 0 ) ? static_cast<void> (0) : __assert_fail ("ULO.TripCount == 0 || ULO.TripCount % ULO.TripMultiple == 0" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 376, __PRETTY_FUNCTION__)); | ||||||||
377 | |||||||||
378 | // Are we eliminating the loop control altogether? | ||||||||
379 | bool CompletelyUnroll = ULO.Count
| ||||||||
380 | SmallVector<BasicBlock *, 4> ExitBlocks; | ||||||||
381 | L->getExitBlocks(ExitBlocks); | ||||||||
382 | std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks(); | ||||||||
383 | |||||||||
384 | // Go through all exits of L and see if there are any phi-nodes there. We just | ||||||||
385 | // conservatively assume that they're inserted to preserve LCSSA form, which | ||||||||
386 | // means that complete unrolling might break this form. We need to either fix | ||||||||
387 | // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For | ||||||||
388 | // now we just recompute LCSSA for the outer loop, but it should be possible | ||||||||
389 | // to fix it in-place. | ||||||||
390 | bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll && | ||||||||
391 | any_of(ExitBlocks, [](const BasicBlock *BB) { | ||||||||
392 | return isa<PHINode>(BB->begin()); | ||||||||
393 | }); | ||||||||
394 | |||||||||
395 | // We assume a run-time trip count if the compiler cannot | ||||||||
396 | // figure out the loop trip count and the unroll-runtime | ||||||||
397 | // flag is specified. | ||||||||
398 | bool RuntimeTripCount = | ||||||||
399 | (ULO.TripCount
| ||||||||
400 | |||||||||
401 | assert((!RuntimeTripCount || !ULO.PeelCount) &&(((!RuntimeTripCount || !ULO.PeelCount) && "Did not expect runtime trip-count unrolling " "and peeling for the same loop") ? static_cast<void> ( 0) : __assert_fail ("(!RuntimeTripCount || !ULO.PeelCount) && \"Did not expect runtime trip-count unrolling \" \"and peeling for the same loop\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 403, __PRETTY_FUNCTION__)) | ||||||||
402 | "Did not expect runtime trip-count unrolling "(((!RuntimeTripCount || !ULO.PeelCount) && "Did not expect runtime trip-count unrolling " "and peeling for the same loop") ? static_cast<void> ( 0) : __assert_fail ("(!RuntimeTripCount || !ULO.PeelCount) && \"Did not expect runtime trip-count unrolling \" \"and peeling for the same loop\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 403, __PRETTY_FUNCTION__)) | ||||||||
403 | "and peeling for the same loop")(((!RuntimeTripCount || !ULO.PeelCount) && "Did not expect runtime trip-count unrolling " "and peeling for the same loop") ? static_cast<void> ( 0) : __assert_fail ("(!RuntimeTripCount || !ULO.PeelCount) && \"Did not expect runtime trip-count unrolling \" \"and peeling for the same loop\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 403, __PRETTY_FUNCTION__)); | ||||||||
404 | |||||||||
405 | bool Peeled = false; | ||||||||
406 | if (ULO.PeelCount
| ||||||||
407 | Peeled = peelLoop(L, ULO.PeelCount, LI, SE, DT, AC, PreserveLCSSA); | ||||||||
408 | |||||||||
409 | // Successful peeling may result in a change in the loop preheader/trip | ||||||||
410 | // counts. If we later unroll the loop, we want these to be updated. | ||||||||
411 | if (Peeled) { | ||||||||
412 | // According to our guards and profitability checks the only | ||||||||
413 | // meaningful exit should be latch block. Other exits go to deopt, | ||||||||
414 | // so we do not worry about them. | ||||||||
415 | BasicBlock *ExitingBlock = L->getLoopLatch(); | ||||||||
416 | assert(ExitingBlock && "Loop without exiting block?")((ExitingBlock && "Loop without exiting block?") ? static_cast <void> (0) : __assert_fail ("ExitingBlock && \"Loop without exiting block?\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 416, __PRETTY_FUNCTION__)); | ||||||||
417 | assert(L->isLoopExiting(ExitingBlock) && "Latch is not exiting?")((L->isLoopExiting(ExitingBlock) && "Latch is not exiting?" ) ? static_cast<void> (0) : __assert_fail ("L->isLoopExiting(ExitingBlock) && \"Latch is not exiting?\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 417, __PRETTY_FUNCTION__)); | ||||||||
418 | Preheader = L->getLoopPreheader(); | ||||||||
419 | ULO.TripCount = SE->getSmallConstantTripCount(L, ExitingBlock); | ||||||||
420 | ULO.TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock); | ||||||||
421 | } | ||||||||
422 | } | ||||||||
423 | |||||||||
424 | // Loops containing convergent instructions must have a count that divides | ||||||||
425 | // their TripMultiple. | ||||||||
426 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
427 | {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
428 | bool HasConvergent = false;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
429 | for (auto &BB : L->blocks())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
430 | for (auto &I : *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
431 | if (auto CS = CallSite(&I))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
432 | HasConvergent |= CS.isConvergent();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
433 | assert((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
434 | "Unroll count must divide trip multiple if loop contains a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
435 | "convergent operation.");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false) | ||||||||
436 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { { bool HasConvergent = false; for (auto & BB : L->blocks()) for (auto &I : *BB) if (auto CS = CallSite (&I)) HasConvergent |= CS.isConvergent(); (((!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && "Unroll count must divide trip multiple if loop contains a " "convergent operation.") ? static_cast<void> (0) : __assert_fail ("(!HasConvergent || ULO.TripMultiple % ULO.Count == 0) && \"Unroll count must divide trip multiple if loop contains a \" \"convergent operation.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 435, __PRETTY_FUNCTION__)); }; } } while (false); | ||||||||
437 | |||||||||
438 | bool EpilogProfitability = | ||||||||
439 | UnrollRuntimeEpilog.getNumOccurrences() ? UnrollRuntimeEpilog | ||||||||
440 | : isEpilogProfitable(L); | ||||||||
441 | |||||||||
442 | if (RuntimeTripCount
| ||||||||
443 | !UnrollRuntimeLoopRemainder(L, ULO.Count, ULO.AllowExpensiveTripCount, | ||||||||
444 | EpilogProfitability, ULO.UnrollRemainder, | ||||||||
445 | ULO.ForgetAllSCEV, LI, SE, DT, AC, TTI, | ||||||||
446 | PreserveLCSSA, RemainderLoop)) { | ||||||||
447 | if (ULO.Force) | ||||||||
448 | RuntimeTripCount = false; | ||||||||
449 | else { | ||||||||
450 | LLVM_DEBUG(dbgs() << "Won't unroll; remainder loop could not be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Won't unroll; remainder loop could not be " "generated when assuming runtime trip count\n"; } } while (false ) | ||||||||
451 | "generated when assuming runtime trip count\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Won't unroll; remainder loop could not be " "generated when assuming runtime trip count\n"; } } while (false ); | ||||||||
452 | return LoopUnrollResult::Unmodified; | ||||||||
453 | } | ||||||||
454 | } | ||||||||
455 | |||||||||
456 | // If we know the trip count, we know the multiple... | ||||||||
457 | unsigned BreakoutTrip = 0; | ||||||||
458 | if (ULO.TripCount
| ||||||||
459 | BreakoutTrip = ULO.TripCount % ULO.Count; | ||||||||
460 | ULO.TripMultiple = 0; | ||||||||
461 | } else { | ||||||||
462 | // Figure out what multiple to use. | ||||||||
463 | BreakoutTrip = ULO.TripMultiple = | ||||||||
464 | (unsigned)GreatestCommonDivisor64(ULO.Count, ULO.TripMultiple); | ||||||||
465 | } | ||||||||
466 | |||||||||
467 | using namespace ore; | ||||||||
468 | // Report the unrolling decision. | ||||||||
469 | if (CompletelyUnroll
| ||||||||
470 | LLVM_DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() << " with trip count " << ULO.TripCount << "!\n"; } } while (false) | ||||||||
471 | << " with trip count " << ULO.TripCount << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() << " with trip count " << ULO.TripCount << "!\n"; } } while (false); | ||||||||
472 | if (ORE) | ||||||||
473 | ORE->emit([&]() { | ||||||||
474 | return OptimizationRemark(DEBUG_TYPE"loop-unroll", "FullyUnrolled", L->getStartLoc(), | ||||||||
475 | L->getHeader()) | ||||||||
476 | << "completely unrolled loop with " | ||||||||
477 | << NV("UnrollCount", ULO.TripCount) << " iterations"; | ||||||||
478 | }); | ||||||||
479 | } else if (ULO.PeelCount
| ||||||||
480 | LLVM_DEBUG(dbgs() << "PEELING loop %" << Header->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "PEELING loop %" << Header ->getName() << " with iteration count " << ULO .PeelCount << "!\n"; } } while (false) | ||||||||
481 | << " with iteration count " << ULO.PeelCount << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "PEELING loop %" << Header ->getName() << " with iteration count " << ULO .PeelCount << "!\n"; } } while (false); | ||||||||
482 | if (ORE) | ||||||||
483 | ORE->emit([&]() { | ||||||||
484 | return OptimizationRemark(DEBUG_TYPE"loop-unroll", "Peeled", L->getStartLoc(), | ||||||||
485 | L->getHeader()) | ||||||||
486 | << " peeled loop by " << NV("PeelCount", ULO.PeelCount) | ||||||||
487 | << " iterations"; | ||||||||
488 | }); | ||||||||
489 | } else { | ||||||||
490 | auto DiagBuilder = [&]() { | ||||||||
491 | OptimizationRemark Diag(DEBUG_TYPE"loop-unroll", "PartialUnrolled", L->getStartLoc(), | ||||||||
492 | L->getHeader()); | ||||||||
493 | return Diag << "unrolled loop by a factor of " | ||||||||
494 | << NV("UnrollCount", ULO.Count); | ||||||||
495 | }; | ||||||||
496 | |||||||||
497 | LLVM_DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() << " by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "UNROLLING loop %" << Header->getName() << " by " << ULO.Count; } } while (false) | ||||||||
498 | << ULO.Count)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "UNROLLING loop %" << Header->getName() << " by " << ULO.Count; } } while (false); | ||||||||
499 | if (ULO.TripMultiple
| ||||||||
500 | LLVM_DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " with a breakout at trip " << BreakoutTrip; } } while (false); | ||||||||
501 | if (ORE) | ||||||||
502 | ORE->emit([&]() { | ||||||||
503 | return DiagBuilder() << " with a breakout at trip " | ||||||||
504 | << NV("BreakoutTrip", BreakoutTrip); | ||||||||
505 | }); | ||||||||
506 | } else if (ULO.TripMultiple
| ||||||||
507 | LLVM_DEBUG(dbgs() << " with " << ULO.TripMultiple << " trips per branch")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " with " << ULO.TripMultiple << " trips per branch"; } } while (false); | ||||||||
508 | if (ORE) | ||||||||
509 | ORE->emit([&]() { | ||||||||
510 | return DiagBuilder() | ||||||||
511 | << " with " << NV("TripMultiple", ULO.TripMultiple) | ||||||||
512 | << " trips per branch"; | ||||||||
513 | }); | ||||||||
514 | } else if (RuntimeTripCount
| ||||||||
515 | LLVM_DEBUG(dbgs() << " with run-time trip count")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << " with run-time trip count" ; } } while (false); | ||||||||
516 | if (ORE) | ||||||||
517 | ORE->emit( | ||||||||
518 | [&]() { return DiagBuilder() << " with run-time trip count"; }); | ||||||||
519 | } | ||||||||
520 | LLVM_DEBUG(dbgs() << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "!\n"; } } while (false); | ||||||||
521 | } | ||||||||
522 | |||||||||
523 | // We are going to make changes to this loop. SCEV may be keeping cached info | ||||||||
524 | // about it, in particular about backedge taken count. The changes we make | ||||||||
525 | // are guaranteed to invalidate this information for our loop. It is tempting | ||||||||
526 | // to only invalidate the loop being unrolled, but it is incorrect as long as | ||||||||
527 | // all exiting branches from all inner loops have impact on the outer loops, | ||||||||
528 | // and if something changes inside them then any of outer loops may also | ||||||||
529 | // change. When we forget outermost loop, we also forget all contained loops | ||||||||
530 | // and this is what we need here. | ||||||||
531 | if (SE) { | ||||||||
532 | if (ULO.ForgetAllSCEV) | ||||||||
533 | SE->forgetAllLoops(); | ||||||||
534 | else | ||||||||
535 | SE->forgetTopmostLoop(L); | ||||||||
536 | } | ||||||||
537 | |||||||||
538 | bool ContinueOnTrue; | ||||||||
539 | bool LatchIsExiting = BI->isConditional(); | ||||||||
540 | BasicBlock *LoopExit = nullptr; | ||||||||
541 | if (LatchIsExiting
| ||||||||
542 | ContinueOnTrue = L->contains(BI->getSuccessor(0)); | ||||||||
543 | LoopExit = BI->getSuccessor(ContinueOnTrue); | ||||||||
544 | } else { | ||||||||
545 | NumUnrolledWithHeader++; | ||||||||
546 | ContinueOnTrue = L->contains(HeaderBI->getSuccessor(0)); | ||||||||
| |||||||||
547 | LoopExit = HeaderBI->getSuccessor(ContinueOnTrue); | ||||||||
548 | } | ||||||||
549 | |||||||||
550 | // For the first iteration of the loop, we should use the precloned values for | ||||||||
551 | // PHI nodes. Insert associations now. | ||||||||
552 | ValueToValueMapTy LastValueMap; | ||||||||
553 | std::vector<PHINode*> OrigPHINode; | ||||||||
554 | for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { | ||||||||
555 | OrigPHINode.push_back(cast<PHINode>(I)); | ||||||||
556 | } | ||||||||
557 | |||||||||
558 | std::vector<BasicBlock *> Headers; | ||||||||
559 | std::vector<BasicBlock *> HeaderSucc; | ||||||||
560 | std::vector<BasicBlock *> Latches; | ||||||||
561 | Headers.push_back(Header); | ||||||||
562 | Latches.push_back(LatchBlock); | ||||||||
563 | |||||||||
564 | if (!LatchIsExiting) { | ||||||||
565 | auto *Term = cast<BranchInst>(Header->getTerminator()); | ||||||||
566 | if (Term->isUnconditional() || L->contains(Term->getSuccessor(0))) { | ||||||||
567 | assert(L->contains(Term->getSuccessor(0)))((L->contains(Term->getSuccessor(0))) ? static_cast< void> (0) : __assert_fail ("L->contains(Term->getSuccessor(0))" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 567, __PRETTY_FUNCTION__)); | ||||||||
568 | HeaderSucc.push_back(Term->getSuccessor(0)); | ||||||||
569 | } else { | ||||||||
570 | assert(L->contains(Term->getSuccessor(1)))((L->contains(Term->getSuccessor(1))) ? static_cast< void> (0) : __assert_fail ("L->contains(Term->getSuccessor(1))" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 570, __PRETTY_FUNCTION__)); | ||||||||
571 | HeaderSucc.push_back(Term->getSuccessor(1)); | ||||||||
572 | } | ||||||||
573 | } | ||||||||
574 | |||||||||
575 | // The current on-the-fly SSA update requires blocks to be processed in | ||||||||
576 | // reverse postorder so that LastValueMap contains the correct value at each | ||||||||
577 | // exit. | ||||||||
578 | LoopBlocksDFS DFS(L); | ||||||||
579 | DFS.perform(LI); | ||||||||
580 | |||||||||
581 | // Stash the DFS iterators before adding blocks to the loop. | ||||||||
582 | LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO(); | ||||||||
583 | LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO(); | ||||||||
584 | |||||||||
585 | std::vector<BasicBlock*> UnrolledLoopBlocks = L->getBlocks(); | ||||||||
586 | |||||||||
587 | // Loop Unrolling might create new loops. While we do preserve LoopInfo, we | ||||||||
588 | // might break loop-simplified form for these loops (as they, e.g., would | ||||||||
589 | // share the same exit blocks). We'll keep track of loops for which we can | ||||||||
590 | // break this so that later we can re-simplify them. | ||||||||
591 | SmallSetVector<Loop *, 4> LoopsToSimplify; | ||||||||
592 | for (Loop *SubLoop : *L) | ||||||||
593 | LoopsToSimplify.insert(SubLoop); | ||||||||
594 | |||||||||
595 | if (Header->getParent()->isDebugInfoForProfiling()) | ||||||||
596 | for (BasicBlock *BB : L->getBlocks()) | ||||||||
597 | for (Instruction &I : *BB) | ||||||||
598 | if (!isa<DbgInfoIntrinsic>(&I)) | ||||||||
599 | if (const DILocation *DIL = I.getDebugLoc()) { | ||||||||
600 | auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(ULO.Count); | ||||||||
601 | if (NewDIL) | ||||||||
602 | I.setDebugLoc(NewDIL.getValue()); | ||||||||
603 | else | ||||||||
604 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false) | ||||||||
605 | << "Failed to create new discriminator: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false) | ||||||||
606 | << DIL->getFilename() << " Line: " << DIL->getLine())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-unroll")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false); | ||||||||
607 | } | ||||||||
608 | |||||||||
609 | for (unsigned It = 1; It != ULO.Count; ++It) { | ||||||||
610 | SmallVector<BasicBlock *, 8> NewBlocks; | ||||||||
611 | SmallDenseMap<const Loop *, Loop *, 4> NewLoops; | ||||||||
612 | NewLoops[L] = L; | ||||||||
613 | |||||||||
614 | for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { | ||||||||
615 | ValueToValueMapTy VMap; | ||||||||
616 | BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It)); | ||||||||
617 | Header->getParent()->getBasicBlockList().push_back(New); | ||||||||
618 | |||||||||
619 | assert((*BB != Header || LI->getLoopFor(*BB) == L) &&(((*BB != Header || LI->getLoopFor(*BB) == L) && "Header should not be in a sub-loop" ) ? static_cast<void> (0) : __assert_fail ("(*BB != Header || LI->getLoopFor(*BB) == L) && \"Header should not be in a sub-loop\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 620, __PRETTY_FUNCTION__)) | ||||||||
620 | "Header should not be in a sub-loop")(((*BB != Header || LI->getLoopFor(*BB) == L) && "Header should not be in a sub-loop" ) ? static_cast<void> (0) : __assert_fail ("(*BB != Header || LI->getLoopFor(*BB) == L) && \"Header should not be in a sub-loop\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 620, __PRETTY_FUNCTION__)); | ||||||||
621 | // Tell LI about New. | ||||||||
622 | const Loop *OldLoop = addClonedBlockToLoopInfo(*BB, New, LI, NewLoops); | ||||||||
623 | if (OldLoop) | ||||||||
624 | LoopsToSimplify.insert(NewLoops[OldLoop]); | ||||||||
625 | |||||||||
626 | if (*BB == Header) | ||||||||
627 | // Loop over all of the PHI nodes in the block, changing them to use | ||||||||
628 | // the incoming values from the previous block. | ||||||||
629 | for (PHINode *OrigPHI : OrigPHINode) { | ||||||||
630 | PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]); | ||||||||
631 | Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock); | ||||||||
632 | if (Instruction *InValI = dyn_cast<Instruction>(InVal)) | ||||||||
633 | if (It > 1 && L->contains(InValI)) | ||||||||
634 | InVal = LastValueMap[InValI]; | ||||||||
635 | VMap[OrigPHI] = InVal; | ||||||||
636 | New->getInstList().erase(NewPHI); | ||||||||
637 | } | ||||||||
638 | |||||||||
639 | // Update our running map of newest clones | ||||||||
640 | LastValueMap[*BB] = New; | ||||||||
641 | for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end(); | ||||||||
642 | VI != VE; ++VI) | ||||||||
643 | LastValueMap[VI->first] = VI->second; | ||||||||
644 | |||||||||
645 | // Add phi entries for newly created values to all exit blocks. | ||||||||
646 | for (BasicBlock *Succ : successors(*BB)) { | ||||||||
647 | if (L->contains(Succ)) | ||||||||
648 | continue; | ||||||||
649 | for (PHINode &PHI : Succ->phis()) { | ||||||||
650 | Value *Incoming = PHI.getIncomingValueForBlock(*BB); | ||||||||
651 | ValueToValueMapTy::iterator It = LastValueMap.find(Incoming); | ||||||||
652 | if (It != LastValueMap.end()) | ||||||||
653 | Incoming = It->second; | ||||||||
654 | PHI.addIncoming(Incoming, New); | ||||||||
655 | } | ||||||||
656 | } | ||||||||
657 | // Keep track of new headers and latches as we create them, so that | ||||||||
658 | // we can insert the proper branches later. | ||||||||
659 | if (*BB == Header) | ||||||||
660 | Headers.push_back(New); | ||||||||
661 | if (*BB == LatchBlock) | ||||||||
662 | Latches.push_back(New); | ||||||||
663 | |||||||||
664 | // Keep track of the successor of the new header in the current iteration. | ||||||||
665 | for (auto *Pred : predecessors(*BB)) | ||||||||
666 | if (Pred == Header) { | ||||||||
667 | HeaderSucc.push_back(New); | ||||||||
668 | break; | ||||||||
669 | } | ||||||||
670 | |||||||||
671 | NewBlocks.push_back(New); | ||||||||
672 | UnrolledLoopBlocks.push_back(New); | ||||||||
673 | |||||||||
674 | // Update DomTree: since we just copy the loop body, and each copy has a | ||||||||
675 | // dedicated entry block (copy of the header block), this header's copy | ||||||||
676 | // dominates all copied blocks. That means, dominance relations in the | ||||||||
677 | // copied body are the same as in the original body. | ||||||||
678 | if (DT) { | ||||||||
679 | if (*BB == Header) | ||||||||
680 | DT->addNewBlock(New, Latches[It - 1]); | ||||||||
681 | else { | ||||||||
682 | auto BBDomNode = DT->getNode(*BB); | ||||||||
683 | auto BBIDom = BBDomNode->getIDom(); | ||||||||
684 | BasicBlock *OriginalBBIDom = BBIDom->getBlock(); | ||||||||
685 | DT->addNewBlock( | ||||||||
686 | New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)])); | ||||||||
687 | } | ||||||||
688 | } | ||||||||
689 | } | ||||||||
690 | |||||||||
691 | // Remap all instructions in the most recent iteration | ||||||||
692 | remapInstructionsInBlocks(NewBlocks, LastValueMap); | ||||||||
693 | for (BasicBlock *NewBlock : NewBlocks) { | ||||||||
694 | for (Instruction &I : *NewBlock) { | ||||||||
695 | if (auto *II = dyn_cast<IntrinsicInst>(&I)) | ||||||||
696 | if (II->getIntrinsicID() == Intrinsic::assume) | ||||||||
697 | AC->registerAssumption(II); | ||||||||
698 | } | ||||||||
699 | } | ||||||||
700 | } | ||||||||
701 | |||||||||
702 | // Loop over the PHI nodes in the original block, setting incoming values. | ||||||||
703 | for (PHINode *PN : OrigPHINode) { | ||||||||
704 | if (CompletelyUnroll) { | ||||||||
705 | PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader)); | ||||||||
706 | Header->getInstList().erase(PN); | ||||||||
707 | } else if (ULO.Count > 1) { | ||||||||
708 | Value *InVal = PN->removeIncomingValue(LatchBlock, false); | ||||||||
709 | // If this value was defined in the loop, take the value defined by the | ||||||||
710 | // last iteration of the loop. | ||||||||
711 | if (Instruction *InValI = dyn_cast<Instruction>(InVal)) { | ||||||||
712 | if (L->contains(InValI)) | ||||||||
713 | InVal = LastValueMap[InVal]; | ||||||||
714 | } | ||||||||
715 | assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch")((Latches.back() == LastValueMap[LatchBlock] && "bad last latch" ) ? static_cast<void> (0) : __assert_fail ("Latches.back() == LastValueMap[LatchBlock] && \"bad last latch\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 715, __PRETTY_FUNCTION__)); | ||||||||
716 | PN->addIncoming(InVal, Latches.back()); | ||||||||
717 | } | ||||||||
718 | } | ||||||||
719 | |||||||||
720 | auto setDest = [LoopExit, ContinueOnTrue](BasicBlock *Src, BasicBlock *Dest, | ||||||||
721 | ArrayRef<BasicBlock *> NextBlocks, | ||||||||
722 | BasicBlock *BlockInLoop, | ||||||||
723 | bool NeedConditional) { | ||||||||
724 | auto *Term = cast<BranchInst>(Src->getTerminator()); | ||||||||
725 | if (NeedConditional) { | ||||||||
726 | // Update the conditional branch's successor for the following | ||||||||
727 | // iteration. | ||||||||
728 | Term->setSuccessor(!ContinueOnTrue, Dest); | ||||||||
729 | } else { | ||||||||
730 | // Remove phi operands at this loop exit | ||||||||
731 | if (Dest != LoopExit) { | ||||||||
732 | BasicBlock *BB = Src; | ||||||||
733 | for (BasicBlock *Succ : successors(BB)) { | ||||||||
734 | // Preserve the incoming value from BB if we are jumping to the block | ||||||||
735 | // in the current loop. | ||||||||
736 | if (Succ == BlockInLoop) | ||||||||
737 | continue; | ||||||||
738 | for (PHINode &Phi : Succ->phis()) | ||||||||
739 | Phi.removeIncomingValue(BB, false); | ||||||||
740 | } | ||||||||
741 | } | ||||||||
742 | // Replace the conditional branch with an unconditional one. | ||||||||
743 | BranchInst::Create(Dest, Term); | ||||||||
744 | Term->eraseFromParent(); | ||||||||
745 | } | ||||||||
746 | }; | ||||||||
747 | |||||||||
748 | // Now that all the basic blocks for the unrolled iterations are in place, | ||||||||
749 | // set up the branches to connect them. | ||||||||
750 | if (LatchIsExiting) { | ||||||||
751 | // Set up latches to branch to the new header in the unrolled iterations or | ||||||||
752 | // the loop exit for the last latch in a fully unrolled loop. | ||||||||
753 | for (unsigned i = 0, e = Latches.size(); i != e; ++i) { | ||||||||
754 | // The branch destination. | ||||||||
755 | unsigned j = (i + 1) % e; | ||||||||
756 | BasicBlock *Dest = Headers[j]; | ||||||||
757 | bool NeedConditional = true; | ||||||||
758 | |||||||||
759 | if (RuntimeTripCount && j != 0) { | ||||||||
760 | NeedConditional = false; | ||||||||
761 | } | ||||||||
762 | |||||||||
763 | // For a complete unroll, make the last iteration end with a branch | ||||||||
764 | // to the exit block. | ||||||||
765 | if (CompletelyUnroll) { | ||||||||
766 | if (j == 0) | ||||||||
767 | Dest = LoopExit; | ||||||||
768 | // If using trip count upper bound to completely unroll, we need to keep | ||||||||
769 | // the conditional branch except the last one because the loop may exit | ||||||||
770 | // after any iteration. | ||||||||
771 | assert(NeedConditional &&((NeedConditional && "NeedCondition cannot be modified by both complete " "unrolling and runtime unrolling") ? static_cast<void> (0) : __assert_fail ("NeedConditional && \"NeedCondition cannot be modified by both complete \" \"unrolling and runtime unrolling\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 773, __PRETTY_FUNCTION__)) | ||||||||
772 | "NeedCondition cannot be modified by both complete "((NeedConditional && "NeedCondition cannot be modified by both complete " "unrolling and runtime unrolling") ? static_cast<void> (0) : __assert_fail ("NeedConditional && \"NeedCondition cannot be modified by both complete \" \"unrolling and runtime unrolling\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 773, __PRETTY_FUNCTION__)) | ||||||||
773 | "unrolling and runtime unrolling")((NeedConditional && "NeedCondition cannot be modified by both complete " "unrolling and runtime unrolling") ? static_cast<void> (0) : __assert_fail ("NeedConditional && \"NeedCondition cannot be modified by both complete \" \"unrolling and runtime unrolling\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 773, __PRETTY_FUNCTION__)); | ||||||||
774 | NeedConditional = | ||||||||
775 | (ULO.PreserveCondBr && j && !(ULO.PreserveOnlyFirst && i != 0)); | ||||||||
776 | } else if (j != BreakoutTrip && | ||||||||
777 | (ULO.TripMultiple == 0 || j % ULO.TripMultiple != 0)) { | ||||||||
778 | // If we know the trip count or a multiple of it, we can safely use an | ||||||||
779 | // unconditional branch for some iterations. | ||||||||
780 | NeedConditional = false; | ||||||||
781 | } | ||||||||
782 | |||||||||
783 | setDest(Latches[i], Dest, Headers, Headers[i], NeedConditional); | ||||||||
784 | } | ||||||||
785 | } else { | ||||||||
786 | // Setup headers to branch to their new successors in the unrolled | ||||||||
787 | // iterations. | ||||||||
788 | for (unsigned i = 0, e = Headers.size(); i != e; ++i) { | ||||||||
789 | // The branch destination. | ||||||||
790 | unsigned j = (i + 1) % e; | ||||||||
791 | BasicBlock *Dest = HeaderSucc[i]; | ||||||||
792 | bool NeedConditional = true; | ||||||||
793 | |||||||||
794 | if (RuntimeTripCount && j != 0) | ||||||||
795 | NeedConditional = false; | ||||||||
796 | |||||||||
797 | if (CompletelyUnroll) | ||||||||
798 | // We cannot drop the conditional branch for the last condition, as we | ||||||||
799 | // may have to execute the loop body depending on the condition. | ||||||||
800 | NeedConditional = j == 0 || ULO.PreserveCondBr; | ||||||||
801 | else if (j != BreakoutTrip && | ||||||||
802 | (ULO.TripMultiple == 0 || j % ULO.TripMultiple != 0)) | ||||||||
803 | // If we know the trip count or a multiple of it, we can safely use an | ||||||||
804 | // unconditional branch for some iterations. | ||||||||
805 | NeedConditional = false; | ||||||||
806 | |||||||||
807 | setDest(Headers[i], Dest, Headers, HeaderSucc[i], NeedConditional); | ||||||||
808 | } | ||||||||
809 | |||||||||
810 | // Set up latches to branch to the new header in the unrolled iterations or | ||||||||
811 | // the loop exit for the last latch in a fully unrolled loop. | ||||||||
812 | |||||||||
813 | for (unsigned i = 0, e = Latches.size(); i != e; ++i) { | ||||||||
814 | // The original branch was replicated in each unrolled iteration. | ||||||||
815 | BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator()); | ||||||||
816 | |||||||||
817 | // The branch destination. | ||||||||
818 | unsigned j = (i + 1) % e; | ||||||||
819 | BasicBlock *Dest = Headers[j]; | ||||||||
820 | |||||||||
821 | // When completely unrolling, the last latch becomes unreachable. | ||||||||
822 | if (CompletelyUnroll && j == 0) | ||||||||
823 | new UnreachableInst(Term->getContext(), Term); | ||||||||
824 | else | ||||||||
825 | // Replace the conditional branch with an unconditional one. | ||||||||
826 | BranchInst::Create(Dest, Term); | ||||||||
827 | |||||||||
828 | Term->eraseFromParent(); | ||||||||
829 | } | ||||||||
830 | } | ||||||||
831 | |||||||||
832 | // Update dominators of blocks we might reach through exits. | ||||||||
833 | // Immediate dominator of such block might change, because we add more | ||||||||
834 | // routes which can lead to the exit: we can now reach it from the copied | ||||||||
835 | // iterations too. | ||||||||
836 | if (DT && ULO.Count > 1) { | ||||||||
837 | for (auto *BB : OriginalLoopBlocks) { | ||||||||
838 | auto *BBDomNode = DT->getNode(BB); | ||||||||
839 | SmallVector<BasicBlock *, 16> ChildrenToUpdate; | ||||||||
840 | for (auto *ChildDomNode : BBDomNode->getChildren()) { | ||||||||
841 | auto *ChildBB = ChildDomNode->getBlock(); | ||||||||
842 | if (!L->contains(ChildBB)) | ||||||||
843 | ChildrenToUpdate.push_back(ChildBB); | ||||||||
844 | } | ||||||||
845 | BasicBlock *NewIDom; | ||||||||
846 | BasicBlock *&TermBlock = LatchIsExiting ? LatchBlock : Header; | ||||||||
847 | auto &TermBlocks = LatchIsExiting ? Latches : Headers; | ||||||||
848 | if (BB == TermBlock) { | ||||||||
849 | // The latch is special because we emit unconditional branches in | ||||||||
850 | // some cases where the original loop contained a conditional branch. | ||||||||
851 | // Since the latch is always at the bottom of the loop, if the latch | ||||||||
852 | // dominated an exit before unrolling, the new dominator of that exit | ||||||||
853 | // must also be a latch. Specifically, the dominator is the first | ||||||||
854 | // latch which ends in a conditional branch, or the last latch if | ||||||||
855 | // there is no such latch. | ||||||||
856 | // For loops exiting from the header, we limit the supported loops | ||||||||
857 | // to have a single exiting block. | ||||||||
858 | NewIDom = TermBlocks.back(); | ||||||||
859 | for (BasicBlock *Iter : TermBlocks) { | ||||||||
860 | Instruction *Term = Iter->getTerminator(); | ||||||||
861 | if (isa<BranchInst>(Term) && cast<BranchInst>(Term)->isConditional()) { | ||||||||
862 | NewIDom = Iter; | ||||||||
863 | break; | ||||||||
864 | } | ||||||||
865 | } | ||||||||
866 | } else { | ||||||||
867 | // The new idom of the block will be the nearest common dominator | ||||||||
868 | // of all copies of the previous idom. This is equivalent to the | ||||||||
869 | // nearest common dominator of the previous idom and the first latch, | ||||||||
870 | // which dominates all copies of the previous idom. | ||||||||
871 | NewIDom = DT->findNearestCommonDominator(BB, LatchBlock); | ||||||||
872 | } | ||||||||
873 | for (auto *ChildBB : ChildrenToUpdate) | ||||||||
874 | DT->changeImmediateDominator(ChildBB, NewIDom); | ||||||||
875 | } | ||||||||
876 | } | ||||||||
877 | |||||||||
878 | assert(!DT || !UnrollVerifyDomtree ||((!DT || !UnrollVerifyDomtree || DT->verify(DominatorTree:: VerificationLevel::Fast)) ? static_cast<void> (0) : __assert_fail ("!DT || !UnrollVerifyDomtree || DT->verify(DominatorTree::VerificationLevel::Fast)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 879, __PRETTY_FUNCTION__)) | ||||||||
879 | DT->verify(DominatorTree::VerificationLevel::Fast))((!DT || !UnrollVerifyDomtree || DT->verify(DominatorTree:: VerificationLevel::Fast)) ? static_cast<void> (0) : __assert_fail ("!DT || !UnrollVerifyDomtree || DT->verify(DominatorTree::VerificationLevel::Fast)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 879, __PRETTY_FUNCTION__)); | ||||||||
880 | |||||||||
881 | DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); | ||||||||
882 | // Merge adjacent basic blocks, if possible. | ||||||||
883 | for (BasicBlock *Latch : Latches) { | ||||||||
884 | BranchInst *Term = dyn_cast<BranchInst>(Latch->getTerminator()); | ||||||||
885 | assert((Term ||(((Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && "Need a branch as terminator, except when fully unrolling with " "unconditional latch") ? static_cast<void> (0) : __assert_fail ("(Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && \"Need a branch as terminator, except when fully unrolling with \" \"unconditional latch\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 888, __PRETTY_FUNCTION__)) | ||||||||
886 | (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) &&(((Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && "Need a branch as terminator, except when fully unrolling with " "unconditional latch") ? static_cast<void> (0) : __assert_fail ("(Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && \"Need a branch as terminator, except when fully unrolling with \" \"unconditional latch\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 888, __PRETTY_FUNCTION__)) | ||||||||
887 | "Need a branch as terminator, except when fully unrolling with "(((Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && "Need a branch as terminator, except when fully unrolling with " "unconditional latch") ? static_cast<void> (0) : __assert_fail ("(Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && \"Need a branch as terminator, except when fully unrolling with \" \"unconditional latch\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 888, __PRETTY_FUNCTION__)) | ||||||||
888 | "unconditional latch")(((Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && "Need a branch as terminator, except when fully unrolling with " "unconditional latch") ? static_cast<void> (0) : __assert_fail ("(Term || (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) && \"Need a branch as terminator, except when fully unrolling with \" \"unconditional latch\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 888, __PRETTY_FUNCTION__)); | ||||||||
889 | if (Term && Term->isUnconditional()) { | ||||||||
890 | BasicBlock *Dest = Term->getSuccessor(0); | ||||||||
891 | BasicBlock *Fold = Dest->getUniquePredecessor(); | ||||||||
892 | if (MergeBlockIntoPredecessor(Dest, &DTU, LI)) { | ||||||||
893 | // Dest has been folded into Fold. Update our worklists accordingly. | ||||||||
894 | std::replace(Latches.begin(), Latches.end(), Dest, Fold); | ||||||||
895 | UnrolledLoopBlocks.erase(std::remove(UnrolledLoopBlocks.begin(), | ||||||||
896 | UnrolledLoopBlocks.end(), Dest), | ||||||||
897 | UnrolledLoopBlocks.end()); | ||||||||
898 | } | ||||||||
899 | } | ||||||||
900 | } | ||||||||
901 | // Apply updates to the DomTree. | ||||||||
902 | DT = &DTU.getDomTree(); | ||||||||
903 | |||||||||
904 | // At this point, the code is well formed. We now simplify the unrolled loop, | ||||||||
905 | // doing constant propagation and dead code elimination as we go. | ||||||||
906 | simplifyLoopAfterUnroll(L, !CompletelyUnroll && (ULO.Count > 1 || Peeled), LI, | ||||||||
907 | SE, DT, AC, TTI); | ||||||||
908 | |||||||||
909 | NumCompletelyUnrolled += CompletelyUnroll; | ||||||||
910 | ++NumUnrolled; | ||||||||
911 | |||||||||
912 | Loop *OuterL = L->getParentLoop(); | ||||||||
913 | // Update LoopInfo if the loop is completely removed. | ||||||||
914 | if (CompletelyUnroll) | ||||||||
915 | LI->erase(L); | ||||||||
916 | |||||||||
917 | // After complete unrolling most of the blocks should be contained in OuterL. | ||||||||
918 | // However, some of them might happen to be out of OuterL (e.g. if they | ||||||||
919 | // precede a loop exit). In this case we might need to insert PHI nodes in | ||||||||
920 | // order to preserve LCSSA form. | ||||||||
921 | // We don't need to check this if we already know that we need to fix LCSSA | ||||||||
922 | // form. | ||||||||
923 | // TODO: For now we just recompute LCSSA for the outer loop in this case, but | ||||||||
924 | // it should be possible to fix it in-place. | ||||||||
925 | if (PreserveLCSSA && OuterL && CompletelyUnroll && !NeedToFixLCSSA) | ||||||||
926 | NeedToFixLCSSA |= ::needToInsertPhisForLCSSA(OuterL, UnrolledLoopBlocks, LI); | ||||||||
927 | |||||||||
928 | // If we have a pass and a DominatorTree we should re-simplify impacted loops | ||||||||
929 | // to ensure subsequent analyses can rely on this form. We want to simplify | ||||||||
930 | // at least one layer outside of the loop that was unrolled so that any | ||||||||
931 | // changes to the parent loop exposed by the unrolling are considered. | ||||||||
932 | if (DT) { | ||||||||
933 | if (OuterL) { | ||||||||
934 | // OuterL includes all loops for which we can break loop-simplify, so | ||||||||
935 | // it's sufficient to simplify only it (it'll recursively simplify inner | ||||||||
936 | // loops too). | ||||||||
937 | if (NeedToFixLCSSA) { | ||||||||
938 | // LCSSA must be performed on the outermost affected loop. The unrolled | ||||||||
939 | // loop's last loop latch is guaranteed to be in the outermost loop | ||||||||
940 | // after LoopInfo's been updated by LoopInfo::erase. | ||||||||
941 | Loop *LatchLoop = LI->getLoopFor(Latches.back()); | ||||||||
942 | Loop *FixLCSSALoop = OuterL; | ||||||||
943 | if (!FixLCSSALoop->contains(LatchLoop)) | ||||||||
944 | while (FixLCSSALoop->getParentLoop() != LatchLoop) | ||||||||
945 | FixLCSSALoop = FixLCSSALoop->getParentLoop(); | ||||||||
946 | |||||||||
947 | formLCSSARecursively(*FixLCSSALoop, *DT, LI, SE); | ||||||||
948 | } else if (PreserveLCSSA) { | ||||||||
949 | assert(OuterL->isLCSSAForm(*DT) &&((OuterL->isLCSSAForm(*DT) && "Loops should be in LCSSA form after loop-unroll." ) ? static_cast<void> (0) : __assert_fail ("OuterL->isLCSSAForm(*DT) && \"Loops should be in LCSSA form after loop-unroll.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 950, __PRETTY_FUNCTION__)) | ||||||||
950 | "Loops should be in LCSSA form after loop-unroll.")((OuterL->isLCSSAForm(*DT) && "Loops should be in LCSSA form after loop-unroll." ) ? static_cast<void> (0) : __assert_fail ("OuterL->isLCSSAForm(*DT) && \"Loops should be in LCSSA form after loop-unroll.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 950, __PRETTY_FUNCTION__)); | ||||||||
951 | } | ||||||||
952 | |||||||||
953 | // TODO: That potentially might be compile-time expensive. We should try | ||||||||
954 | // to fix the loop-simplified form incrementally. | ||||||||
955 | simplifyLoop(OuterL, DT, LI, SE, AC, nullptr, PreserveLCSSA); | ||||||||
956 | } else { | ||||||||
957 | // Simplify loops for which we might've broken loop-simplify form. | ||||||||
958 | for (Loop *SubLoop : LoopsToSimplify) | ||||||||
959 | simplifyLoop(SubLoop, DT, LI, SE, AC, nullptr, PreserveLCSSA); | ||||||||
960 | } | ||||||||
961 | } | ||||||||
962 | |||||||||
963 | return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled | ||||||||
964 | : LoopUnrollResult::PartiallyUnrolled; | ||||||||
965 | } | ||||||||
966 | |||||||||
967 | /// Given an llvm.loop loop id metadata node, returns the loop hint metadata | ||||||||
968 | /// node with the given name (for example, "llvm.loop.unroll.count"). If no | ||||||||
969 | /// such metadata node exists, then nullptr is returned. | ||||||||
970 | MDNode *llvm::GetUnrollMetadata(MDNode *LoopID, StringRef Name) { | ||||||||
971 | // First operand should refer to the loop id itself. | ||||||||
972 | assert(LoopID->getNumOperands() > 0 && "requires at least one operand")((LoopID->getNumOperands() > 0 && "requires at least one operand" ) ? static_cast<void> (0) : __assert_fail ("LoopID->getNumOperands() > 0 && \"requires at least one operand\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 972, __PRETTY_FUNCTION__)); | ||||||||
973 | assert(LoopID->getOperand(0) == LoopID && "invalid loop id")((LoopID->getOperand(0) == LoopID && "invalid loop id" ) ? static_cast<void> (0) : __assert_fail ("LoopID->getOperand(0) == LoopID && \"invalid loop id\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Utils/LoopUnroll.cpp" , 973, __PRETTY_FUNCTION__)); | ||||||||
974 | |||||||||
975 | for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) { | ||||||||
976 | MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); | ||||||||
977 | if (!MD) | ||||||||
978 | continue; | ||||||||
979 | |||||||||
980 | MDString *S = dyn_cast<MDString>(MD->getOperand(0)); | ||||||||
981 | if (!S) | ||||||||
982 | continue; | ||||||||
983 | |||||||||
984 | if (Name.equals(S->getString())) | ||||||||
985 | return MD; | ||||||||
986 | } | ||||||||
987 | return nullptr; | ||||||||
988 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file exposes the class definitions of all of the subclasses of the |
10 | // Instruction class. This is meant to be an easy way to get access to all |
11 | // instruction subclasses. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_INSTRUCTIONS_H |
16 | #define LLVM_IR_INSTRUCTIONS_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/ADT/StringRef.h" |
23 | #include "llvm/ADT/Twine.h" |
24 | #include "llvm/ADT/iterator.h" |
25 | #include "llvm/ADT/iterator_range.h" |
26 | #include "llvm/IR/Attributes.h" |
27 | #include "llvm/IR/BasicBlock.h" |
28 | #include "llvm/IR/CallingConv.h" |
29 | #include "llvm/IR/Constant.h" |
30 | #include "llvm/IR/DerivedTypes.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/IR/InstrTypes.h" |
33 | #include "llvm/IR/Instruction.h" |
34 | #include "llvm/IR/OperandTraits.h" |
35 | #include "llvm/IR/Type.h" |
36 | #include "llvm/IR/Use.h" |
37 | #include "llvm/IR/User.h" |
38 | #include "llvm/IR/Value.h" |
39 | #include "llvm/Support/AtomicOrdering.h" |
40 | #include "llvm/Support/Casting.h" |
41 | #include "llvm/Support/ErrorHandling.h" |
42 | #include <cassert> |
43 | #include <cstddef> |
44 | #include <cstdint> |
45 | #include <iterator> |
46 | |
47 | namespace llvm { |
48 | |
49 | class APInt; |
50 | class ConstantInt; |
51 | class DataLayout; |
52 | class LLVMContext; |
53 | |
54 | //===----------------------------------------------------------------------===// |
55 | // AllocaInst Class |
56 | //===----------------------------------------------------------------------===// |
57 | |
58 | /// an instruction to allocate memory on the stack |
59 | class AllocaInst : public UnaryInstruction { |
60 | Type *AllocatedType; |
61 | |
62 | protected: |
63 | // Note: Instruction needs to be a friend here to call cloneImpl. |
64 | friend class Instruction; |
65 | |
66 | AllocaInst *cloneImpl() const; |
67 | |
68 | public: |
69 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, |
70 | Value *ArraySize = nullptr, |
71 | const Twine &Name = "", |
72 | Instruction *InsertBefore = nullptr); |
73 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
74 | const Twine &Name, BasicBlock *InsertAtEnd); |
75 | |
76 | AllocaInst(Type *Ty, unsigned AddrSpace, |
77 | const Twine &Name, Instruction *InsertBefore = nullptr); |
78 | AllocaInst(Type *Ty, unsigned AddrSpace, |
79 | const Twine &Name, BasicBlock *InsertAtEnd); |
80 | |
81 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align, |
82 | const Twine &Name = "", Instruction *InsertBefore = nullptr); |
83 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align, |
84 | const Twine &Name, BasicBlock *InsertAtEnd); |
85 | |
86 | /// Return true if there is an allocation size parameter to the allocation |
87 | /// instruction that is not 1. |
88 | bool isArrayAllocation() const; |
89 | |
90 | /// Get the number of elements allocated. For a simple allocation of a single |
91 | /// element, this will return a constant 1 value. |
92 | const Value *getArraySize() const { return getOperand(0); } |
93 | Value *getArraySize() { return getOperand(0); } |
94 | |
95 | /// Overload to return most specific pointer type. |
96 | PointerType *getType() const { |
97 | return cast<PointerType>(Instruction::getType()); |
98 | } |
99 | |
100 | /// Get allocation size in bits. Returns None if size can't be determined, |
101 | /// e.g. in case of a VLA. |
102 | Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const; |
103 | |
104 | /// Return the type that is being allocated by the instruction. |
105 | Type *getAllocatedType() const { return AllocatedType; } |
106 | /// for use only in special circumstances that need to generically |
107 | /// transform a whole instruction (eg: IR linking and vectorization). |
108 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
109 | |
110 | /// Return the alignment of the memory that is being allocated by the |
111 | /// instruction. |
112 | MaybeAlign getAlign() const { |
113 | return decodeMaybeAlign(getSubclassDataFromInstruction() & 31); |
114 | } |
115 | // FIXME: Remove this one transition to Align is over. |
116 | unsigned getAlignment() const { |
117 | if (const auto MA = getAlign()) |
118 | return MA->value(); |
119 | return 0; |
120 | } |
121 | void setAlignment(MaybeAlign Align); |
122 | |
123 | /// Return true if this alloca is in the entry block of the function and is a |
124 | /// constant size. If so, the code generator will fold it into the |
125 | /// prolog/epilog code, so it is basically free. |
126 | bool isStaticAlloca() const; |
127 | |
128 | /// Return true if this alloca is used as an inalloca argument to a call. Such |
129 | /// allocas are never considered static even if they are in the entry block. |
130 | bool isUsedWithInAlloca() const { |
131 | return getSubclassDataFromInstruction() & 32; |
132 | } |
133 | |
134 | /// Specify whether this alloca is used to represent the arguments to a call. |
135 | void setUsedWithInAlloca(bool V) { |
136 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) | |
137 | (V ? 32 : 0)); |
138 | } |
139 | |
140 | /// Return true if this alloca is used as a swifterror argument to a call. |
141 | bool isSwiftError() const { |
142 | return getSubclassDataFromInstruction() & 64; |
143 | } |
144 | |
145 | /// Specify whether this alloca is used to represent a swifterror. |
146 | void setSwiftError(bool V) { |
147 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) | |
148 | (V ? 64 : 0)); |
149 | } |
150 | |
151 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
152 | static bool classof(const Instruction *I) { |
153 | return (I->getOpcode() == Instruction::Alloca); |
154 | } |
155 | static bool classof(const Value *V) { |
156 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
157 | } |
158 | |
159 | private: |
160 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
161 | // method so that subclasses cannot accidentally use it. |
162 | void setInstructionSubclassData(unsigned short D) { |
163 | Instruction::setInstructionSubclassData(D); |
164 | } |
165 | }; |
166 | |
167 | //===----------------------------------------------------------------------===// |
168 | // LoadInst Class |
169 | //===----------------------------------------------------------------------===// |
170 | |
171 | /// An instruction for reading from memory. This uses the SubclassData field in |
172 | /// Value to store whether or not the load is volatile. |
173 | class LoadInst : public UnaryInstruction { |
174 | void AssertOK(); |
175 | |
176 | protected: |
177 | // Note: Instruction needs to be a friend here to call cloneImpl. |
178 | friend class Instruction; |
179 | |
180 | LoadInst *cloneImpl() const; |
181 | |
182 | public: |
183 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "", |
184 | Instruction *InsertBefore = nullptr); |
185 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
186 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
187 | Instruction *InsertBefore = nullptr); |
188 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
189 | BasicBlock *InsertAtEnd); |
190 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
191 | MaybeAlign Align, Instruction *InsertBefore = nullptr); |
192 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
193 | MaybeAlign Align, BasicBlock *InsertAtEnd); |
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
195 | MaybeAlign Align, AtomicOrdering Order, |
196 | SyncScope::ID SSID = SyncScope::System, |
197 | Instruction *InsertBefore = nullptr); |
198 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
199 | MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, |
200 | BasicBlock *InsertAtEnd); |
201 | |
202 | // Deprecated [opaque pointer types] |
203 | explicit LoadInst(Value *Ptr, const Twine &NameStr = "", |
204 | Instruction *InsertBefore = nullptr) |
205 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
206 | InsertBefore) {} |
207 | LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd) |
208 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
209 | InsertAtEnd) {} |
210 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
211 | Instruction *InsertBefore = nullptr) |
212 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
213 | isVolatile, InsertBefore) {} |
214 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
215 | BasicBlock *InsertAtEnd) |
216 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
217 | isVolatile, InsertAtEnd) {} |
218 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align, |
219 | Instruction *InsertBefore = nullptr) |
220 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
221 | isVolatile, Align, InsertBefore) {} |
222 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align, |
223 | BasicBlock *InsertAtEnd) |
224 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
225 | isVolatile, Align, InsertAtEnd) {} |
226 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align, |
227 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
228 | Instruction *InsertBefore = nullptr) |
229 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
230 | isVolatile, Align, Order, SSID, InsertBefore) {} |
231 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align, |
232 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd) |
233 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
234 | isVolatile, Align, Order, SSID, InsertAtEnd) {} |
235 | |
236 | /// Return true if this is a load from a volatile memory location. |
237 | bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
238 | |
239 | /// Specify whether this is a volatile load or not. |
240 | void setVolatile(bool V) { |
241 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
242 | (V ? 1 : 0)); |
243 | } |
244 | |
245 | /// Return the alignment of the access that is being performed. |
246 | /// FIXME: Remove this function once transition to Align is over. |
247 | /// Use getAlign() instead. |
248 | unsigned getAlignment() const { |
249 | if (const auto MA = getAlign()) |
250 | return MA->value(); |
251 | return 0; |
252 | } |
253 | |
254 | /// Return the alignment of the access that is being performed. |
255 | MaybeAlign getAlign() const { |
256 | return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); |
257 | } |
258 | |
259 | void setAlignment(MaybeAlign Alignment); |
260 | |
261 | /// Returns the ordering constraint of this load instruction. |
262 | AtomicOrdering getOrdering() const { |
263 | return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
264 | } |
265 | |
266 | /// Sets the ordering constraint of this load instruction. May not be Release |
267 | /// or AcquireRelease. |
268 | void setOrdering(AtomicOrdering Ordering) { |
269 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
270 | ((unsigned)Ordering << 7)); |
271 | } |
272 | |
273 | /// Returns the synchronization scope ID of this load instruction. |
274 | SyncScope::ID getSyncScopeID() const { |
275 | return SSID; |
276 | } |
277 | |
278 | /// Sets the synchronization scope ID of this load instruction. |
279 | void setSyncScopeID(SyncScope::ID SSID) { |
280 | this->SSID = SSID; |
281 | } |
282 | |
283 | /// Sets the ordering constraint and the synchronization scope ID of this load |
284 | /// instruction. |
285 | void setAtomic(AtomicOrdering Ordering, |
286 | SyncScope::ID SSID = SyncScope::System) { |
287 | setOrdering(Ordering); |
288 | setSyncScopeID(SSID); |
289 | } |
290 | |
291 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
292 | |
293 | bool isUnordered() const { |
294 | return (getOrdering() == AtomicOrdering::NotAtomic || |
295 | getOrdering() == AtomicOrdering::Unordered) && |
296 | !isVolatile(); |
297 | } |
298 | |
299 | Value *getPointerOperand() { return getOperand(0); } |
300 | const Value *getPointerOperand() const { return getOperand(0); } |
301 | static unsigned getPointerOperandIndex() { return 0U; } |
302 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
303 | |
304 | /// Returns the address space of the pointer operand. |
305 | unsigned getPointerAddressSpace() const { |
306 | return getPointerOperandType()->getPointerAddressSpace(); |
307 | } |
308 | |
309 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
310 | static bool classof(const Instruction *I) { |
311 | return I->getOpcode() == Instruction::Load; |
312 | } |
313 | static bool classof(const Value *V) { |
314 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
315 | } |
316 | |
317 | private: |
318 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
319 | // method so that subclasses cannot accidentally use it. |
320 | void setInstructionSubclassData(unsigned short D) { |
321 | Instruction::setInstructionSubclassData(D); |
322 | } |
323 | |
324 | /// The synchronization scope ID of this load instruction. Not quite enough |
325 | /// room in SubClassData for everything, so synchronization scope ID gets its |
326 | /// own field. |
327 | SyncScope::ID SSID; |
328 | }; |
329 | |
330 | //===----------------------------------------------------------------------===// |
331 | // StoreInst Class |
332 | //===----------------------------------------------------------------------===// |
333 | |
334 | /// An instruction for storing to memory. |
335 | class StoreInst : public Instruction { |
336 | void AssertOK(); |
337 | |
338 | protected: |
339 | // Note: Instruction needs to be a friend here to call cloneImpl. |
340 | friend class Instruction; |
341 | |
342 | StoreInst *cloneImpl() const; |
343 | |
344 | public: |
345 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
346 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
347 | StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, |
348 | Instruction *InsertBefore = nullptr); |
349 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
350 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align, |
351 | Instruction *InsertBefore = nullptr); |
352 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align, |
353 | BasicBlock *InsertAtEnd); |
354 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align, |
355 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
356 | Instruction *InsertBefore = nullptr); |
357 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align, |
358 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
359 | |
360 | // allocate space for exactly two operands |
361 | void *operator new(size_t s) { |
362 | return User::operator new(s, 2); |
363 | } |
364 | |
365 | /// Return true if this is a store to a volatile memory location. |
366 | bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
367 | |
368 | /// Specify whether this is a volatile store or not. |
369 | void setVolatile(bool V) { |
370 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
371 | (V ? 1 : 0)); |
372 | } |
373 | |
374 | /// Transparently provide more efficient getOperand methods. |
375 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
376 | |
377 | /// Return the alignment of the access that is being performed |
378 | /// FIXME: Remove this function once transition to Align is over. |
379 | /// Use getAlign() instead. |
380 | unsigned getAlignment() const { |
381 | if (const auto MA = getAlign()) |
382 | return MA->value(); |
383 | return 0; |
384 | } |
385 | |
386 | MaybeAlign getAlign() const { |
387 | return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); |
388 | } |
389 | |
390 | void setAlignment(MaybeAlign Alignment); |
391 | |
392 | /// Returns the ordering constraint of this store instruction. |
393 | AtomicOrdering getOrdering() const { |
394 | return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
395 | } |
396 | |
397 | /// Sets the ordering constraint of this store instruction. May not be |
398 | /// Acquire or AcquireRelease. |
399 | void setOrdering(AtomicOrdering Ordering) { |
400 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
401 | ((unsigned)Ordering << 7)); |
402 | } |
403 | |
404 | /// Returns the synchronization scope ID of this store instruction. |
405 | SyncScope::ID getSyncScopeID() const { |
406 | return SSID; |
407 | } |
408 | |
409 | /// Sets the synchronization scope ID of this store instruction. |
410 | void setSyncScopeID(SyncScope::ID SSID) { |
411 | this->SSID = SSID; |
412 | } |
413 | |
414 | /// Sets the ordering constraint and the synchronization scope ID of this |
415 | /// store instruction. |
416 | void setAtomic(AtomicOrdering Ordering, |
417 | SyncScope::ID SSID = SyncScope::System) { |
418 | setOrdering(Ordering); |
419 | setSyncScopeID(SSID); |
420 | } |
421 | |
422 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
423 | |
424 | bool isUnordered() const { |
425 | return (getOrdering() == AtomicOrdering::NotAtomic || |
426 | getOrdering() == AtomicOrdering::Unordered) && |
427 | !isVolatile(); |
428 | } |
429 | |
430 | Value *getValueOperand() { return getOperand(0); } |
431 | const Value *getValueOperand() const { return getOperand(0); } |
432 | |
433 | Value *getPointerOperand() { return getOperand(1); } |
434 | const Value *getPointerOperand() const { return getOperand(1); } |
435 | static unsigned getPointerOperandIndex() { return 1U; } |
436 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
437 | |
438 | /// Returns the address space of the pointer operand. |
439 | unsigned getPointerAddressSpace() const { |
440 | return getPointerOperandType()->getPointerAddressSpace(); |
441 | } |
442 | |
443 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
444 | static bool classof(const Instruction *I) { |
445 | return I->getOpcode() == Instruction::Store; |
446 | } |
447 | static bool classof(const Value *V) { |
448 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
449 | } |
450 | |
451 | private: |
452 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
453 | // method so that subclasses cannot accidentally use it. |
454 | void setInstructionSubclassData(unsigned short D) { |
455 | Instruction::setInstructionSubclassData(D); |
456 | } |
457 | |
458 | /// The synchronization scope ID of this store instruction. Not quite enough |
459 | /// room in SubClassData for everything, so synchronization scope ID gets its |
460 | /// own field. |
461 | SyncScope::ID SSID; |
462 | }; |
463 | |
464 | template <> |
465 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
466 | }; |
467 | |
468 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <StoreInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 468, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst *>(this))[i_nocapture].get()); } void StoreInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 468, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst ::getNumOperands() const { return OperandTraits<StoreInst> ::operands(this); } template <int Idx_nocapture> Use & StoreInst::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &StoreInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
469 | |
470 | //===----------------------------------------------------------------------===// |
471 | // FenceInst Class |
472 | //===----------------------------------------------------------------------===// |
473 | |
474 | /// An instruction for ordering other memory operations. |
475 | class FenceInst : public Instruction { |
476 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
477 | |
478 | protected: |
479 | // Note: Instruction needs to be a friend here to call cloneImpl. |
480 | friend class Instruction; |
481 | |
482 | FenceInst *cloneImpl() const; |
483 | |
484 | public: |
485 | // Ordering may only be Acquire, Release, AcquireRelease, or |
486 | // SequentiallyConsistent. |
487 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
488 | SyncScope::ID SSID = SyncScope::System, |
489 | Instruction *InsertBefore = nullptr); |
490 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
491 | BasicBlock *InsertAtEnd); |
492 | |
493 | // allocate space for exactly zero operands |
494 | void *operator new(size_t s) { |
495 | return User::operator new(s, 0); |
496 | } |
497 | |
498 | /// Returns the ordering constraint of this fence instruction. |
499 | AtomicOrdering getOrdering() const { |
500 | return AtomicOrdering(getSubclassDataFromInstruction() >> 1); |
501 | } |
502 | |
503 | /// Sets the ordering constraint of this fence instruction. May only be |
504 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
505 | void setOrdering(AtomicOrdering Ordering) { |
506 | setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | |
507 | ((unsigned)Ordering << 1)); |
508 | } |
509 | |
510 | /// Returns the synchronization scope ID of this fence instruction. |
511 | SyncScope::ID getSyncScopeID() const { |
512 | return SSID; |
513 | } |
514 | |
515 | /// Sets the synchronization scope ID of this fence instruction. |
516 | void setSyncScopeID(SyncScope::ID SSID) { |
517 | this->SSID = SSID; |
518 | } |
519 | |
520 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
521 | static bool classof(const Instruction *I) { |
522 | return I->getOpcode() == Instruction::Fence; |
523 | } |
524 | static bool classof(const Value *V) { |
525 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
526 | } |
527 | |
528 | private: |
529 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
530 | // method so that subclasses cannot accidentally use it. |
531 | void setInstructionSubclassData(unsigned short D) { |
532 | Instruction::setInstructionSubclassData(D); |
533 | } |
534 | |
535 | /// The synchronization scope ID of this fence instruction. Not quite enough |
536 | /// room in SubClassData for everything, so synchronization scope ID gets its |
537 | /// own field. |
538 | SyncScope::ID SSID; |
539 | }; |
540 | |
541 | //===----------------------------------------------------------------------===// |
542 | // AtomicCmpXchgInst Class |
543 | //===----------------------------------------------------------------------===// |
544 | |
545 | /// An instruction that atomically checks whether a |
546 | /// specified value is in a memory location, and, if it is, stores a new value |
547 | /// there. The value returned by this instruction is a pair containing the |
548 | /// original value as first element, and an i1 indicating success (true) or |
549 | /// failure (false) as second element. |
550 | /// |
551 | class AtomicCmpXchgInst : public Instruction { |
552 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, |
553 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
554 | SyncScope::ID SSID); |
555 | |
556 | protected: |
557 | // Note: Instruction needs to be a friend here to call cloneImpl. |
558 | friend class Instruction; |
559 | |
560 | AtomicCmpXchgInst *cloneImpl() const; |
561 | |
562 | public: |
563 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
564 | AtomicOrdering SuccessOrdering, |
565 | AtomicOrdering FailureOrdering, |
566 | SyncScope::ID SSID, Instruction *InsertBefore = nullptr); |
567 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
568 | AtomicOrdering SuccessOrdering, |
569 | AtomicOrdering FailureOrdering, |
570 | SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
571 | |
572 | // allocate space for exactly three operands |
573 | void *operator new(size_t s) { |
574 | return User::operator new(s, 3); |
575 | } |
576 | |
577 | /// Return true if this is a cmpxchg from a volatile memory |
578 | /// location. |
579 | /// |
580 | bool isVolatile() const { |
581 | return getSubclassDataFromInstruction() & 1; |
582 | } |
583 | |
584 | /// Specify whether this is a volatile cmpxchg. |
585 | /// |
586 | void setVolatile(bool V) { |
587 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
588 | (unsigned)V); |
589 | } |
590 | |
591 | /// Return true if this cmpxchg may spuriously fail. |
592 | bool isWeak() const { |
593 | return getSubclassDataFromInstruction() & 0x100; |
594 | } |
595 | |
596 | void setWeak(bool IsWeak) { |
597 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) | |
598 | (IsWeak << 8)); |
599 | } |
600 | |
601 | /// Transparently provide more efficient getOperand methods. |
602 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
603 | |
604 | /// Returns the success ordering constraint of this cmpxchg instruction. |
605 | AtomicOrdering getSuccessOrdering() const { |
606 | return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
607 | } |
608 | |
609 | /// Sets the success ordering constraint of this cmpxchg instruction. |
610 | void setSuccessOrdering(AtomicOrdering Ordering) { |
611 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 612, __PRETTY_FUNCTION__)) |
612 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 612, __PRETTY_FUNCTION__)); |
613 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | |
614 | ((unsigned)Ordering << 2)); |
615 | } |
616 | |
617 | /// Returns the failure ordering constraint of this cmpxchg instruction. |
618 | AtomicOrdering getFailureOrdering() const { |
619 | return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); |
620 | } |
621 | |
622 | /// Sets the failure ordering constraint of this cmpxchg instruction. |
623 | void setFailureOrdering(AtomicOrdering Ordering) { |
624 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 625, __PRETTY_FUNCTION__)) |
625 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 625, __PRETTY_FUNCTION__)); |
626 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | |
627 | ((unsigned)Ordering << 5)); |
628 | } |
629 | |
630 | /// Returns the synchronization scope ID of this cmpxchg instruction. |
631 | SyncScope::ID getSyncScopeID() const { |
632 | return SSID; |
633 | } |
634 | |
635 | /// Sets the synchronization scope ID of this cmpxchg instruction. |
636 | void setSyncScopeID(SyncScope::ID SSID) { |
637 | this->SSID = SSID; |
638 | } |
639 | |
640 | Value *getPointerOperand() { return getOperand(0); } |
641 | const Value *getPointerOperand() const { return getOperand(0); } |
642 | static unsigned getPointerOperandIndex() { return 0U; } |
643 | |
644 | Value *getCompareOperand() { return getOperand(1); } |
645 | const Value *getCompareOperand() const { return getOperand(1); } |
646 | |
647 | Value *getNewValOperand() { return getOperand(2); } |
648 | const Value *getNewValOperand() const { return getOperand(2); } |
649 | |
650 | /// Returns the address space of the pointer operand. |
651 | unsigned getPointerAddressSpace() const { |
652 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
653 | } |
654 | |
655 | /// Returns the strongest permitted ordering on failure, given the |
656 | /// desired ordering on success. |
657 | /// |
658 | /// If the comparison in a cmpxchg operation fails, there is no atomic store |
659 | /// so release semantics cannot be provided. So this function drops explicit |
660 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
661 | /// operation would remain SequentiallyConsistent. |
662 | static AtomicOrdering |
663 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
664 | switch (SuccessOrdering) { |
665 | default: |
666 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 666); |
667 | case AtomicOrdering::Release: |
668 | case AtomicOrdering::Monotonic: |
669 | return AtomicOrdering::Monotonic; |
670 | case AtomicOrdering::AcquireRelease: |
671 | case AtomicOrdering::Acquire: |
672 | return AtomicOrdering::Acquire; |
673 | case AtomicOrdering::SequentiallyConsistent: |
674 | return AtomicOrdering::SequentiallyConsistent; |
675 | } |
676 | } |
677 | |
678 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
679 | static bool classof(const Instruction *I) { |
680 | return I->getOpcode() == Instruction::AtomicCmpXchg; |
681 | } |
682 | static bool classof(const Value *V) { |
683 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
684 | } |
685 | |
686 | private: |
687 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
688 | // method so that subclasses cannot accidentally use it. |
689 | void setInstructionSubclassData(unsigned short D) { |
690 | Instruction::setInstructionSubclassData(D); |
691 | } |
692 | |
693 | /// The synchronization scope ID of this cmpxchg instruction. Not quite |
694 | /// enough room in SubClassData for everything, so synchronization scope ID |
695 | /// gets its own field. |
696 | SyncScope::ID SSID; |
697 | }; |
698 | |
699 | template <> |
700 | struct OperandTraits<AtomicCmpXchgInst> : |
701 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
702 | }; |
703 | |
704 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 704, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast <AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 704, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands() const { return OperandTraits <AtomicCmpXchgInst>::operands(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &AtomicCmpXchgInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
705 | |
706 | //===----------------------------------------------------------------------===// |
707 | // AtomicRMWInst Class |
708 | //===----------------------------------------------------------------------===// |
709 | |
710 | /// an instruction that atomically reads a memory location, |
711 | /// combines it with another value, and then stores the result back. Returns |
712 | /// the old value. |
713 | /// |
714 | class AtomicRMWInst : public Instruction { |
715 | protected: |
716 | // Note: Instruction needs to be a friend here to call cloneImpl. |
717 | friend class Instruction; |
718 | |
719 | AtomicRMWInst *cloneImpl() const; |
720 | |
721 | public: |
722 | /// This enumeration lists the possible modifications atomicrmw can make. In |
723 | /// the descriptions, 'p' is the pointer to the instruction's memory location, |
724 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
725 | /// instruction. These instructions always return 'old'. |
726 | enum BinOp { |
727 | /// *p = v |
728 | Xchg, |
729 | /// *p = old + v |
730 | Add, |
731 | /// *p = old - v |
732 | Sub, |
733 | /// *p = old & v |
734 | And, |
735 | /// *p = ~(old & v) |
736 | Nand, |
737 | /// *p = old | v |
738 | Or, |
739 | /// *p = old ^ v |
740 | Xor, |
741 | /// *p = old >signed v ? old : v |
742 | Max, |
743 | /// *p = old <signed v ? old : v |
744 | Min, |
745 | /// *p = old >unsigned v ? old : v |
746 | UMax, |
747 | /// *p = old <unsigned v ? old : v |
748 | UMin, |
749 | |
750 | /// *p = old + v |
751 | FAdd, |
752 | |
753 | /// *p = old - v |
754 | FSub, |
755 | |
756 | FIRST_BINOP = Xchg, |
757 | LAST_BINOP = FSub, |
758 | BAD_BINOP |
759 | }; |
760 | |
761 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
762 | AtomicOrdering Ordering, SyncScope::ID SSID, |
763 | Instruction *InsertBefore = nullptr); |
764 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
765 | AtomicOrdering Ordering, SyncScope::ID SSID, |
766 | BasicBlock *InsertAtEnd); |
767 | |
768 | // allocate space for exactly two operands |
769 | void *operator new(size_t s) { |
770 | return User::operator new(s, 2); |
771 | } |
772 | |
773 | BinOp getOperation() const { |
774 | return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5); |
775 | } |
776 | |
777 | static StringRef getOperationName(BinOp Op); |
778 | |
779 | static bool isFPOperation(BinOp Op) { |
780 | switch (Op) { |
781 | case AtomicRMWInst::FAdd: |
782 | case AtomicRMWInst::FSub: |
783 | return true; |
784 | default: |
785 | return false; |
786 | } |
787 | } |
788 | |
789 | void setOperation(BinOp Operation) { |
790 | unsigned short SubclassData = getSubclassDataFromInstruction(); |
791 | setInstructionSubclassData((SubclassData & 31) | |
792 | (Operation << 5)); |
793 | } |
794 | |
795 | /// Return true if this is a RMW on a volatile memory location. |
796 | /// |
797 | bool isVolatile() const { |
798 | return getSubclassDataFromInstruction() & 1; |
799 | } |
800 | |
801 | /// Specify whether this is a volatile RMW or not. |
802 | /// |
803 | void setVolatile(bool V) { |
804 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
805 | (unsigned)V); |
806 | } |
807 | |
808 | /// Transparently provide more efficient getOperand methods. |
809 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
810 | |
811 | /// Returns the ordering constraint of this rmw instruction. |
812 | AtomicOrdering getOrdering() const { |
813 | return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
814 | } |
815 | |
816 | /// Sets the ordering constraint of this rmw instruction. |
817 | void setOrdering(AtomicOrdering Ordering) { |
818 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 819, __PRETTY_FUNCTION__)) |
819 | "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 819, __PRETTY_FUNCTION__)); |
820 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | |
821 | ((unsigned)Ordering << 2)); |
822 | } |
823 | |
824 | /// Returns the synchronization scope ID of this rmw instruction. |
825 | SyncScope::ID getSyncScopeID() const { |
826 | return SSID; |
827 | } |
828 | |
829 | /// Sets the synchronization scope ID of this rmw instruction. |
830 | void setSyncScopeID(SyncScope::ID SSID) { |
831 | this->SSID = SSID; |
832 | } |
833 | |
834 | Value *getPointerOperand() { return getOperand(0); } |
835 | const Value *getPointerOperand() const { return getOperand(0); } |
836 | static unsigned getPointerOperandIndex() { return 0U; } |
837 | |
838 | Value *getValOperand() { return getOperand(1); } |
839 | const Value *getValOperand() const { return getOperand(1); } |
840 | |
841 | /// Returns the address space of the pointer operand. |
842 | unsigned getPointerAddressSpace() const { |
843 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
844 | } |
845 | |
846 | bool isFloatingPointOperation() const { |
847 | return isFPOperation(getOperation()); |
848 | } |
849 | |
850 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
851 | static bool classof(const Instruction *I) { |
852 | return I->getOpcode() == Instruction::AtomicRMW; |
853 | } |
854 | static bool classof(const Value *V) { |
855 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
856 | } |
857 | |
858 | private: |
859 | void Init(BinOp Operation, Value *Ptr, Value *Val, |
860 | AtomicOrdering Ordering, SyncScope::ID SSID); |
861 | |
862 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
863 | // method so that subclasses cannot accidentally use it. |
864 | void setInstructionSubclassData(unsigned short D) { |
865 | Instruction::setInstructionSubclassData(D); |
866 | } |
867 | |
868 | /// The synchronization scope ID of this rmw instruction. Not quite enough |
869 | /// room in SubClassData for everything, so synchronization scope ID gets its |
870 | /// own field. |
871 | SyncScope::ID SSID; |
872 | }; |
873 | |
874 | template <> |
875 | struct OperandTraits<AtomicRMWInst> |
876 | : public FixedNumOperandTraits<AtomicRMWInst,2> { |
877 | }; |
878 | |
879 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <AtomicRMWInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 879, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicRMWInst>::op_begin(const_cast< AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<AtomicRMWInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 879, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst ::getNumOperands() const { return OperandTraits<AtomicRMWInst >::operands(this); } template <int Idx_nocapture> Use &AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
880 | |
881 | //===----------------------------------------------------------------------===// |
882 | // GetElementPtrInst Class |
883 | //===----------------------------------------------------------------------===// |
884 | |
885 | // checkGEPType - Simple wrapper function to give a better assertion failure |
886 | // message on bad indexes for a gep instruction. |
887 | // |
888 | inline Type *checkGEPType(Type *Ty) { |
889 | assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!" ) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 889, __PRETTY_FUNCTION__)); |
890 | return Ty; |
891 | } |
892 | |
893 | /// an instruction for type-safe pointer arithmetic to |
894 | /// access elements of arrays and structs |
895 | /// |
896 | class GetElementPtrInst : public Instruction { |
897 | Type *SourceElementType; |
898 | Type *ResultElementType; |
899 | |
900 | GetElementPtrInst(const GetElementPtrInst &GEPI); |
901 | |
902 | /// Constructors - Create a getelementptr instruction with a base pointer an |
903 | /// list of indices. The first ctor can optionally insert before an existing |
904 | /// instruction, the second appends the new instruction to the specified |
905 | /// BasicBlock. |
906 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
907 | ArrayRef<Value *> IdxList, unsigned Values, |
908 | const Twine &NameStr, Instruction *InsertBefore); |
909 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
910 | ArrayRef<Value *> IdxList, unsigned Values, |
911 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
912 | |
913 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
914 | |
915 | protected: |
916 | // Note: Instruction needs to be a friend here to call cloneImpl. |
917 | friend class Instruction; |
918 | |
919 | GetElementPtrInst *cloneImpl() const; |
920 | |
921 | public: |
922 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
923 | ArrayRef<Value *> IdxList, |
924 | const Twine &NameStr = "", |
925 | Instruction *InsertBefore = nullptr) { |
926 | unsigned Values = 1 + unsigned(IdxList.size()); |
927 | if (!PointeeType) |
928 | PointeeType = |
929 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
930 | else |
931 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 933, __PRETTY_FUNCTION__)) |
932 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 933, __PRETTY_FUNCTION__)) |
933 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 933, __PRETTY_FUNCTION__)); |
934 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
935 | NameStr, InsertBefore); |
936 | } |
937 | |
938 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
939 | ArrayRef<Value *> IdxList, |
940 | const Twine &NameStr, |
941 | BasicBlock *InsertAtEnd) { |
942 | unsigned Values = 1 + unsigned(IdxList.size()); |
943 | if (!PointeeType) |
944 | PointeeType = |
945 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
946 | else |
947 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 949, __PRETTY_FUNCTION__)) |
948 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 949, __PRETTY_FUNCTION__)) |
949 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 949, __PRETTY_FUNCTION__)); |
950 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
951 | NameStr, InsertAtEnd); |
952 | } |
953 | |
954 | /// Create an "inbounds" getelementptr. See the documentation for the |
955 | /// "inbounds" flag in LangRef.html for details. |
956 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
957 | ArrayRef<Value *> IdxList, |
958 | const Twine &NameStr = "", |
959 | Instruction *InsertBefore = nullptr){ |
960 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); |
961 | } |
962 | |
963 | static GetElementPtrInst * |
964 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
965 | const Twine &NameStr = "", |
966 | Instruction *InsertBefore = nullptr) { |
967 | GetElementPtrInst *GEP = |
968 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
969 | GEP->setIsInBounds(true); |
970 | return GEP; |
971 | } |
972 | |
973 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
974 | ArrayRef<Value *> IdxList, |
975 | const Twine &NameStr, |
976 | BasicBlock *InsertAtEnd) { |
977 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); |
978 | } |
979 | |
980 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
981 | ArrayRef<Value *> IdxList, |
982 | const Twine &NameStr, |
983 | BasicBlock *InsertAtEnd) { |
984 | GetElementPtrInst *GEP = |
985 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
986 | GEP->setIsInBounds(true); |
987 | return GEP; |
988 | } |
989 | |
990 | /// Transparently provide more efficient getOperand methods. |
991 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
992 | |
993 | Type *getSourceElementType() const { return SourceElementType; } |
994 | |
995 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
996 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
997 | |
998 | Type *getResultElementType() const { |
999 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1000, __PRETTY_FUNCTION__)) |
1000 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1000, __PRETTY_FUNCTION__)); |
1001 | return ResultElementType; |
1002 | } |
1003 | |
1004 | /// Returns the address space of this instruction's pointer type. |
1005 | unsigned getAddressSpace() const { |
1006 | // Note that this is always the same as the pointer operand's address space |
1007 | // and that is cheaper to compute, so cheat here. |
1008 | return getPointerAddressSpace(); |
1009 | } |
1010 | |
1011 | /// Returns the type of the element that would be loaded with |
1012 | /// a load instruction with the specified parameters. |
1013 | /// |
1014 | /// Null is returned if the indices are invalid for the specified |
1015 | /// pointer type. |
1016 | /// |
1017 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
1018 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
1019 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
1020 | |
1021 | inline op_iterator idx_begin() { return op_begin()+1; } |
1022 | inline const_op_iterator idx_begin() const { return op_begin()+1; } |
1023 | inline op_iterator idx_end() { return op_end(); } |
1024 | inline const_op_iterator idx_end() const { return op_end(); } |
1025 | |
1026 | inline iterator_range<op_iterator> indices() { |
1027 | return make_range(idx_begin(), idx_end()); |
1028 | } |
1029 | |
1030 | inline iterator_range<const_op_iterator> indices() const { |
1031 | return make_range(idx_begin(), idx_end()); |
1032 | } |
1033 | |
1034 | Value *getPointerOperand() { |
1035 | return getOperand(0); |
1036 | } |
1037 | const Value *getPointerOperand() const { |
1038 | return getOperand(0); |
1039 | } |
1040 | static unsigned getPointerOperandIndex() { |
1041 | return 0U; // get index for modifying correct operand. |
1042 | } |
1043 | |
1044 | /// Method to return the pointer operand as a |
1045 | /// PointerType. |
1046 | Type *getPointerOperandType() const { |
1047 | return getPointerOperand()->getType(); |
1048 | } |
1049 | |
1050 | /// Returns the address space of the pointer operand. |
1051 | unsigned getPointerAddressSpace() const { |
1052 | return getPointerOperandType()->getPointerAddressSpace(); |
1053 | } |
1054 | |
1055 | /// Returns the pointer type returned by the GEP |
1056 | /// instruction, which may be a vector of pointers. |
1057 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
1058 | ArrayRef<Value *> IdxList) { |
1059 | Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), |
1060 | Ptr->getType()->getPointerAddressSpace()); |
1061 | // Vector GEP |
1062 | if (Ptr->getType()->isVectorTy()) { |
1063 | ElementCount EltCount = Ptr->getType()->getVectorElementCount(); |
1064 | return VectorType::get(PtrTy, EltCount); |
1065 | } |
1066 | for (Value *Index : IdxList) |
1067 | if (Index->getType()->isVectorTy()) { |
1068 | ElementCount EltCount = Index->getType()->getVectorElementCount(); |
1069 | return VectorType::get(PtrTy, EltCount); |
1070 | } |
1071 | // Scalar GEP |
1072 | return PtrTy; |
1073 | } |
1074 | |
1075 | unsigned getNumIndices() const { // Note: always non-negative |
1076 | return getNumOperands() - 1; |
1077 | } |
1078 | |
1079 | bool hasIndices() const { |
1080 | return getNumOperands() > 1; |
1081 | } |
1082 | |
1083 | /// Return true if all of the indices of this GEP are |
1084 | /// zeros. If so, the result pointer and the first operand have the same |
1085 | /// value, just potentially different types. |
1086 | bool hasAllZeroIndices() const; |
1087 | |
1088 | /// Return true if all of the indices of this GEP are |
1089 | /// constant integers. If so, the result pointer and the first operand have |
1090 | /// a constant offset between them. |
1091 | bool hasAllConstantIndices() const; |
1092 | |
1093 | /// Set or clear the inbounds flag on this GEP instruction. |
1094 | /// See LangRef.html for the meaning of inbounds on a getelementptr. |
1095 | void setIsInBounds(bool b = true); |
1096 | |
1097 | /// Determine whether the GEP has the inbounds flag. |
1098 | bool isInBounds() const; |
1099 | |
1100 | /// Accumulate the constant address offset of this GEP if possible. |
1101 | /// |
1102 | /// This routine accepts an APInt into which it will accumulate the constant |
1103 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
1104 | /// all-constant, it returns false and the value of the offset APInt is |
1105 | /// undefined (it is *not* preserved!). The APInt passed into this routine |
1106 | /// must be at least as wide as the IntPtr type for the address space of |
1107 | /// the base GEP pointer. |
1108 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
1109 | |
1110 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1111 | static bool classof(const Instruction *I) { |
1112 | return (I->getOpcode() == Instruction::GetElementPtr); |
1113 | } |
1114 | static bool classof(const Value *V) { |
1115 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1116 | } |
1117 | }; |
1118 | |
1119 | template <> |
1120 | struct OperandTraits<GetElementPtrInst> : |
1121 | public VariadicOperandTraits<GetElementPtrInst, 1> { |
1122 | }; |
1123 | |
1124 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1125 | ArrayRef<Value *> IdxList, unsigned Values, |
1126 | const Twine &NameStr, |
1127 | Instruction *InsertBefore) |
1128 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1129 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1130 | Values, InsertBefore), |
1131 | SourceElementType(PointeeType), |
1132 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1133 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1134, __PRETTY_FUNCTION__)) |
1134 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1134, __PRETTY_FUNCTION__)); |
1135 | init(Ptr, IdxList, NameStr); |
1136 | } |
1137 | |
1138 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1139 | ArrayRef<Value *> IdxList, unsigned Values, |
1140 | const Twine &NameStr, |
1141 | BasicBlock *InsertAtEnd) |
1142 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1143 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1144 | Values, InsertAtEnd), |
1145 | SourceElementType(PointeeType), |
1146 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1147 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1148, __PRETTY_FUNCTION__)) |
1148 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1148, __PRETTY_FUNCTION__)); |
1149 | init(Ptr, IdxList, NameStr); |
1150 | } |
1151 | |
1152 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1152, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<GetElementPtrInst>::op_begin(const_cast <GetElementPtrInst*>(this))[i_nocapture].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<GetElementPtrInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1152, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands() const { return OperandTraits <GetElementPtrInst>::operands(this); } template <int Idx_nocapture> Use &GetElementPtrInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &GetElementPtrInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1153 | |
1154 | //===----------------------------------------------------------------------===// |
1155 | // ICmpInst Class |
1156 | //===----------------------------------------------------------------------===// |
1157 | |
1158 | /// This instruction compares its operands according to the predicate given |
1159 | /// to the constructor. It only operates on integers or pointers. The operands |
1160 | /// must be identical types. |
1161 | /// Represent an integer comparison operator. |
1162 | class ICmpInst: public CmpInst { |
1163 | void AssertOK() { |
1164 | assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1165, __PRETTY_FUNCTION__)) |
1165 | "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1165, __PRETTY_FUNCTION__)); |
1166 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1167, __PRETTY_FUNCTION__)) |
1167 | "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1167, __PRETTY_FUNCTION__)); |
1168 | // Check that the operands are the right type |
1169 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1171, __PRETTY_FUNCTION__)) |
1170 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1171, __PRETTY_FUNCTION__)) |
1171 | "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1171, __PRETTY_FUNCTION__)); |
1172 | } |
1173 | |
1174 | protected: |
1175 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1176 | friend class Instruction; |
1177 | |
1178 | /// Clone an identical ICmpInst |
1179 | ICmpInst *cloneImpl() const; |
1180 | |
1181 | public: |
1182 | /// Constructor with insert-before-instruction semantics. |
1183 | ICmpInst( |
1184 | Instruction *InsertBefore, ///< Where to insert |
1185 | Predicate pred, ///< The predicate to use for the comparison |
1186 | Value *LHS, ///< The left-hand-side of the expression |
1187 | Value *RHS, ///< The right-hand-side of the expression |
1188 | const Twine &NameStr = "" ///< Name of the instruction |
1189 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1190 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1191 | InsertBefore) { |
1192 | #ifndef NDEBUG |
1193 | AssertOK(); |
1194 | #endif |
1195 | } |
1196 | |
1197 | /// Constructor with insert-at-end semantics. |
1198 | ICmpInst( |
1199 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1200 | Predicate pred, ///< The predicate to use for the comparison |
1201 | Value *LHS, ///< The left-hand-side of the expression |
1202 | Value *RHS, ///< The right-hand-side of the expression |
1203 | const Twine &NameStr = "" ///< Name of the instruction |
1204 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1205 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1206 | &InsertAtEnd) { |
1207 | #ifndef NDEBUG |
1208 | AssertOK(); |
1209 | #endif |
1210 | } |
1211 | |
1212 | /// Constructor with no-insertion semantics |
1213 | ICmpInst( |
1214 | Predicate pred, ///< The predicate to use for the comparison |
1215 | Value *LHS, ///< The left-hand-side of the expression |
1216 | Value *RHS, ///< The right-hand-side of the expression |
1217 | const Twine &NameStr = "" ///< Name of the instruction |
1218 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1219 | Instruction::ICmp, pred, LHS, RHS, NameStr) { |
1220 | #ifndef NDEBUG |
1221 | AssertOK(); |
1222 | #endif |
1223 | } |
1224 | |
1225 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
1226 | /// @returns the predicate that would be the result if the operand were |
1227 | /// regarded as signed. |
1228 | /// Return the signed version of the predicate |
1229 | Predicate getSignedPredicate() const { |
1230 | return getSignedPredicate(getPredicate()); |
1231 | } |
1232 | |
1233 | /// This is a static version that you can use without an instruction. |
1234 | /// Return the signed version of the predicate. |
1235 | static Predicate getSignedPredicate(Predicate pred); |
1236 | |
1237 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
1238 | /// @returns the predicate that would be the result if the operand were |
1239 | /// regarded as unsigned. |
1240 | /// Return the unsigned version of the predicate |
1241 | Predicate getUnsignedPredicate() const { |
1242 | return getUnsignedPredicate(getPredicate()); |
1243 | } |
1244 | |
1245 | /// This is a static version that you can use without an instruction. |
1246 | /// Return the unsigned version of the predicate. |
1247 | static Predicate getUnsignedPredicate(Predicate pred); |
1248 | |
1249 | /// Return true if this predicate is either EQ or NE. This also |
1250 | /// tests for commutativity. |
1251 | static bool isEquality(Predicate P) { |
1252 | return P == ICMP_EQ || P == ICMP_NE; |
1253 | } |
1254 | |
1255 | /// Return true if this predicate is either EQ or NE. This also |
1256 | /// tests for commutativity. |
1257 | bool isEquality() const { |
1258 | return isEquality(getPredicate()); |
1259 | } |
1260 | |
1261 | /// @returns true if the predicate of this ICmpInst is commutative |
1262 | /// Determine if this relation is commutative. |
1263 | bool isCommutative() const { return isEquality(); } |
1264 | |
1265 | /// Return true if the predicate is relational (not EQ or NE). |
1266 | /// |
1267 | bool isRelational() const { |
1268 | return !isEquality(); |
1269 | } |
1270 | |
1271 | /// Return true if the predicate is relational (not EQ or NE). |
1272 | /// |
1273 | static bool isRelational(Predicate P) { |
1274 | return !isEquality(P); |
1275 | } |
1276 | |
1277 | /// Exchange the two operands to this instruction in such a way that it does |
1278 | /// not modify the semantics of the instruction. The predicate value may be |
1279 | /// changed to retain the same result if the predicate is order dependent |
1280 | /// (e.g. ult). |
1281 | /// Swap operands and adjust predicate. |
1282 | void swapOperands() { |
1283 | setPredicate(getSwappedPredicate()); |
1284 | Op<0>().swap(Op<1>()); |
1285 | } |
1286 | |
1287 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1288 | static bool classof(const Instruction *I) { |
1289 | return I->getOpcode() == Instruction::ICmp; |
1290 | } |
1291 | static bool classof(const Value *V) { |
1292 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1293 | } |
1294 | }; |
1295 | |
1296 | //===----------------------------------------------------------------------===// |
1297 | // FCmpInst Class |
1298 | //===----------------------------------------------------------------------===// |
1299 | |
1300 | /// This instruction compares its operands according to the predicate given |
1301 | /// to the constructor. It only operates on floating point values or packed |
1302 | /// vectors of floating point values. The operands must be identical types. |
1303 | /// Represents a floating point comparison operator. |
1304 | class FCmpInst: public CmpInst { |
1305 | void AssertOK() { |
1306 | assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ? static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1306, __PRETTY_FUNCTION__)); |
1307 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1308, __PRETTY_FUNCTION__)) |
1308 | "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1308, __PRETTY_FUNCTION__)); |
1309 | // Check that the operands are the right type |
1310 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1311, __PRETTY_FUNCTION__)) |
1311 | "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1311, __PRETTY_FUNCTION__)); |
1312 | } |
1313 | |
1314 | protected: |
1315 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1316 | friend class Instruction; |
1317 | |
1318 | /// Clone an identical FCmpInst |
1319 | FCmpInst *cloneImpl() const; |
1320 | |
1321 | public: |
1322 | /// Constructor with insert-before-instruction semantics. |
1323 | FCmpInst( |
1324 | Instruction *InsertBefore, ///< Where to insert |
1325 | Predicate pred, ///< The predicate to use for the comparison |
1326 | Value *LHS, ///< The left-hand-side of the expression |
1327 | Value *RHS, ///< The right-hand-side of the expression |
1328 | const Twine &NameStr = "" ///< Name of the instruction |
1329 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1330 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1331 | InsertBefore) { |
1332 | AssertOK(); |
1333 | } |
1334 | |
1335 | /// Constructor with insert-at-end semantics. |
1336 | FCmpInst( |
1337 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1338 | Predicate pred, ///< The predicate to use for the comparison |
1339 | Value *LHS, ///< The left-hand-side of the expression |
1340 | Value *RHS, ///< The right-hand-side of the expression |
1341 | const Twine &NameStr = "" ///< Name of the instruction |
1342 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1343 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1344 | &InsertAtEnd) { |
1345 | AssertOK(); |
1346 | } |
1347 | |
1348 | /// Constructor with no-insertion semantics |
1349 | FCmpInst( |
1350 | Predicate Pred, ///< The predicate to use for the comparison |
1351 | Value *LHS, ///< The left-hand-side of the expression |
1352 | Value *RHS, ///< The right-hand-side of the expression |
1353 | const Twine &NameStr = "", ///< Name of the instruction |
1354 | Instruction *FlagsSource = nullptr |
1355 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
1356 | RHS, NameStr, nullptr, FlagsSource) { |
1357 | AssertOK(); |
1358 | } |
1359 | |
1360 | /// @returns true if the predicate of this instruction is EQ or NE. |
1361 | /// Determine if this is an equality predicate. |
1362 | static bool isEquality(Predicate Pred) { |
1363 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
1364 | Pred == FCMP_UNE; |
1365 | } |
1366 | |
1367 | /// @returns true if the predicate of this instruction is EQ or NE. |
1368 | /// Determine if this is an equality predicate. |
1369 | bool isEquality() const { return isEquality(getPredicate()); } |
1370 | |
1371 | /// @returns true if the predicate of this instruction is commutative. |
1372 | /// Determine if this is a commutative predicate. |
1373 | bool isCommutative() const { |
1374 | return isEquality() || |
1375 | getPredicate() == FCMP_FALSE || |
1376 | getPredicate() == FCMP_TRUE || |
1377 | getPredicate() == FCMP_ORD || |
1378 | getPredicate() == FCMP_UNO; |
1379 | } |
1380 | |
1381 | /// @returns true if the predicate is relational (not EQ or NE). |
1382 | /// Determine if this a relational predicate. |
1383 | bool isRelational() const { return !isEquality(); } |
1384 | |
1385 | /// Exchange the two operands to this instruction in such a way that it does |
1386 | /// not modify the semantics of the instruction. The predicate value may be |
1387 | /// changed to retain the same result if the predicate is order dependent |
1388 | /// (e.g. ult). |
1389 | /// Swap operands and adjust predicate. |
1390 | void swapOperands() { |
1391 | setPredicate(getSwappedPredicate()); |
1392 | Op<0>().swap(Op<1>()); |
1393 | } |
1394 | |
1395 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1396 | static bool classof(const Instruction *I) { |
1397 | return I->getOpcode() == Instruction::FCmp; |
1398 | } |
1399 | static bool classof(const Value *V) { |
1400 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1401 | } |
1402 | }; |
1403 | |
1404 | //===----------------------------------------------------------------------===// |
1405 | /// This class represents a function call, abstracting a target |
1406 | /// machine's calling convention. This class uses low bit of the SubClassData |
1407 | /// field to indicate whether or not this is a tail call. The rest of the bits |
1408 | /// hold the calling convention of the call. |
1409 | /// |
1410 | class CallInst : public CallBase { |
1411 | CallInst(const CallInst &CI); |
1412 | |
1413 | /// Construct a CallInst given a range of arguments. |
1414 | /// Construct a CallInst from a range of arguments |
1415 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1416 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1417 | Instruction *InsertBefore); |
1418 | |
1419 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1420 | const Twine &NameStr, Instruction *InsertBefore) |
1421 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} |
1422 | |
1423 | /// Construct a CallInst given a range of arguments. |
1424 | /// Construct a CallInst from a range of arguments |
1425 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1426 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1427 | BasicBlock *InsertAtEnd); |
1428 | |
1429 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, |
1430 | Instruction *InsertBefore); |
1431 | |
1432 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, |
1433 | BasicBlock *InsertAtEnd); |
1434 | |
1435 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
1436 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
1437 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); |
1438 | |
1439 | /// Compute the number of operands to allocate. |
1440 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
1441 | // We need one operand for the called function, plus the input operand |
1442 | // counts provided. |
1443 | return 1 + NumArgs + NumBundleInputs; |
1444 | } |
1445 | |
1446 | protected: |
1447 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1448 | friend class Instruction; |
1449 | |
1450 | CallInst *cloneImpl() const; |
1451 | |
1452 | public: |
1453 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", |
1454 | Instruction *InsertBefore = nullptr) { |
1455 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); |
1456 | } |
1457 | |
1458 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1459 | const Twine &NameStr, |
1460 | Instruction *InsertBefore = nullptr) { |
1461 | return new (ComputeNumOperands(Args.size())) |
1462 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
1463 | } |
1464 | |
1465 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1466 | ArrayRef<OperandBundleDef> Bundles = None, |
1467 | const Twine &NameStr = "", |
1468 | Instruction *InsertBefore = nullptr) { |
1469 | const int NumOperands = |
1470 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1471 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1472 | |
1473 | return new (NumOperands, DescriptorBytes) |
1474 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
1475 | } |
1476 | |
1477 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, |
1478 | BasicBlock *InsertAtEnd) { |
1479 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); |
1480 | } |
1481 | |
1482 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1483 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1484 | return new (ComputeNumOperands(Args.size())) |
1485 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); |
1486 | } |
1487 | |
1488 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1489 | ArrayRef<OperandBundleDef> Bundles, |
1490 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1491 | const int NumOperands = |
1492 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1493 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1494 | |
1495 | return new (NumOperands, DescriptorBytes) |
1496 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); |
1497 | } |
1498 | |
1499 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", |
1500 | Instruction *InsertBefore = nullptr) { |
1501 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1502 | InsertBefore); |
1503 | } |
1504 | |
1505 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1506 | ArrayRef<OperandBundleDef> Bundles = None, |
1507 | const Twine &NameStr = "", |
1508 | Instruction *InsertBefore = nullptr) { |
1509 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1510 | NameStr, InsertBefore); |
1511 | } |
1512 | |
1513 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1514 | const Twine &NameStr, |
1515 | Instruction *InsertBefore = nullptr) { |
1516 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1517 | InsertBefore); |
1518 | } |
1519 | |
1520 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, |
1521 | BasicBlock *InsertAtEnd) { |
1522 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1523 | InsertAtEnd); |
1524 | } |
1525 | |
1526 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1527 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1528 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1529 | InsertAtEnd); |
1530 | } |
1531 | |
1532 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1533 | ArrayRef<OperandBundleDef> Bundles, |
1534 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1535 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1536 | NameStr, InsertAtEnd); |
1537 | } |
1538 | |
1539 | // Deprecated [opaque pointer types] |
1540 | static CallInst *Create(Value *Func, const Twine &NameStr = "", |
1541 | Instruction *InsertBefore = nullptr) { |
1542 | return Create(cast<FunctionType>( |
1543 | cast<PointerType>(Func->getType())->getElementType()), |
1544 | Func, NameStr, InsertBefore); |
1545 | } |
1546 | |
1547 | // Deprecated [opaque pointer types] |
1548 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1549 | const Twine &NameStr, |
1550 | Instruction *InsertBefore = nullptr) { |
1551 | return Create(cast<FunctionType>( |
1552 | cast<PointerType>(Func->getType())->getElementType()), |
1553 | Func, Args, NameStr, InsertBefore); |
1554 | } |
1555 | |
1556 | // Deprecated [opaque pointer types] |
1557 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1558 | ArrayRef<OperandBundleDef> Bundles = None, |
1559 | const Twine &NameStr = "", |
1560 | Instruction *InsertBefore = nullptr) { |
1561 | return Create(cast<FunctionType>( |
1562 | cast<PointerType>(Func->getType())->getElementType()), |
1563 | Func, Args, Bundles, NameStr, InsertBefore); |
1564 | } |
1565 | |
1566 | // Deprecated [opaque pointer types] |
1567 | static CallInst *Create(Value *Func, const Twine &NameStr, |
1568 | BasicBlock *InsertAtEnd) { |
1569 | return Create(cast<FunctionType>( |
1570 | cast<PointerType>(Func->getType())->getElementType()), |
1571 | Func, NameStr, InsertAtEnd); |
1572 | } |
1573 | |
1574 | // Deprecated [opaque pointer types] |
1575 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1576 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1577 | return Create(cast<FunctionType>( |
1578 | cast<PointerType>(Func->getType())->getElementType()), |
1579 | Func, Args, NameStr, InsertAtEnd); |
1580 | } |
1581 | |
1582 | // Deprecated [opaque pointer types] |
1583 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1584 | ArrayRef<OperandBundleDef> Bundles, |
1585 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1586 | return Create(cast<FunctionType>( |
1587 | cast<PointerType>(Func->getType())->getElementType()), |
1588 | Func, Args, Bundles, NameStr, InsertAtEnd); |
1589 | } |
1590 | |
1591 | /// Create a clone of \p CI with a different set of operand bundles and |
1592 | /// insert it before \p InsertPt. |
1593 | /// |
1594 | /// The returned call instruction is identical \p CI in every way except that |
1595 | /// the operand bundles for the new instruction are set to the operand bundles |
1596 | /// in \p Bundles. |
1597 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
1598 | Instruction *InsertPt = nullptr); |
1599 | |
1600 | /// Generate the IR for a call to malloc: |
1601 | /// 1. Compute the malloc call's argument as the specified type's size, |
1602 | /// possibly multiplied by the array size if the array size is not |
1603 | /// constant 1. |
1604 | /// 2. Call malloc with that argument. |
1605 | /// 3. Bitcast the result of the malloc call to the specified type. |
1606 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1607 | Type *AllocTy, Value *AllocSize, |
1608 | Value *ArraySize = nullptr, |
1609 | Function *MallocF = nullptr, |
1610 | const Twine &Name = ""); |
1611 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1612 | Type *AllocTy, Value *AllocSize, |
1613 | Value *ArraySize = nullptr, |
1614 | Function *MallocF = nullptr, |
1615 | const Twine &Name = ""); |
1616 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1617 | Type *AllocTy, Value *AllocSize, |
1618 | Value *ArraySize = nullptr, |
1619 | ArrayRef<OperandBundleDef> Bundles = None, |
1620 | Function *MallocF = nullptr, |
1621 | const Twine &Name = ""); |
1622 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1623 | Type *AllocTy, Value *AllocSize, |
1624 | Value *ArraySize = nullptr, |
1625 | ArrayRef<OperandBundleDef> Bundles = None, |
1626 | Function *MallocF = nullptr, |
1627 | const Twine &Name = ""); |
1628 | /// Generate the IR for a call to the builtin free function. |
1629 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
1630 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
1631 | static Instruction *CreateFree(Value *Source, |
1632 | ArrayRef<OperandBundleDef> Bundles, |
1633 | Instruction *InsertBefore); |
1634 | static Instruction *CreateFree(Value *Source, |
1635 | ArrayRef<OperandBundleDef> Bundles, |
1636 | BasicBlock *InsertAtEnd); |
1637 | |
1638 | // Note that 'musttail' implies 'tail'. |
1639 | enum TailCallKind { |
1640 | TCK_None = 0, |
1641 | TCK_Tail = 1, |
1642 | TCK_MustTail = 2, |
1643 | TCK_NoTail = 3 |
1644 | }; |
1645 | TailCallKind getTailCallKind() const { |
1646 | return TailCallKind(getSubclassDataFromInstruction() & 3); |
1647 | } |
1648 | |
1649 | bool isTailCall() const { |
1650 | unsigned Kind = getSubclassDataFromInstruction() & 3; |
1651 | return Kind == TCK_Tail || Kind == TCK_MustTail; |
1652 | } |
1653 | |
1654 | bool isMustTailCall() const { |
1655 | return (getSubclassDataFromInstruction() & 3) == TCK_MustTail; |
1656 | } |
1657 | |
1658 | bool isNoTailCall() const { |
1659 | return (getSubclassDataFromInstruction() & 3) == TCK_NoTail; |
1660 | } |
1661 | |
1662 | void setTailCall(bool isTC = true) { |
1663 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
1664 | unsigned(isTC ? TCK_Tail : TCK_None)); |
1665 | } |
1666 | |
1667 | void setTailCallKind(TailCallKind TCK) { |
1668 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
1669 | unsigned(TCK)); |
1670 | } |
1671 | |
1672 | /// Return true if the call can return twice |
1673 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
1674 | void setCanReturnTwice() { |
1675 | addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); |
1676 | } |
1677 | |
1678 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1679 | static bool classof(const Instruction *I) { |
1680 | return I->getOpcode() == Instruction::Call; |
1681 | } |
1682 | static bool classof(const Value *V) { |
1683 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1684 | } |
1685 | |
1686 | /// Updates profile metadata by scaling it by \p S / \p T. |
1687 | void updateProfWeight(uint64_t S, uint64_t T); |
1688 | |
1689 | private: |
1690 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
1691 | // method so that subclasses cannot accidentally use it. |
1692 | void setInstructionSubclassData(unsigned short D) { |
1693 | Instruction::setInstructionSubclassData(D); |
1694 | } |
1695 | }; |
1696 | |
1697 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1698 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1699 | BasicBlock *InsertAtEnd) |
1700 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1701 | OperandTraits<CallBase>::op_end(this) - |
1702 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1703 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1704 | InsertAtEnd) { |
1705 | init(Ty, Func, Args, Bundles, NameStr); |
1706 | } |
1707 | |
1708 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1709 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1710 | Instruction *InsertBefore) |
1711 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1712 | OperandTraits<CallBase>::op_end(this) - |
1713 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1714 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1715 | InsertBefore) { |
1716 | init(Ty, Func, Args, Bundles, NameStr); |
1717 | } |
1718 | |
1719 | //===----------------------------------------------------------------------===// |
1720 | // SelectInst Class |
1721 | //===----------------------------------------------------------------------===// |
1722 | |
1723 | /// This class represents the LLVM 'select' instruction. |
1724 | /// |
1725 | class SelectInst : public Instruction { |
1726 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1727 | Instruction *InsertBefore) |
1728 | : Instruction(S1->getType(), Instruction::Select, |
1729 | &Op<0>(), 3, InsertBefore) { |
1730 | init(C, S1, S2); |
1731 | setName(NameStr); |
1732 | } |
1733 | |
1734 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1735 | BasicBlock *InsertAtEnd) |
1736 | : Instruction(S1->getType(), Instruction::Select, |
1737 | &Op<0>(), 3, InsertAtEnd) { |
1738 | init(C, S1, S2); |
1739 | setName(NameStr); |
1740 | } |
1741 | |
1742 | void init(Value *C, Value *S1, Value *S2) { |
1743 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select" ) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1743, __PRETTY_FUNCTION__)); |
1744 | Op<0>() = C; |
1745 | Op<1>() = S1; |
1746 | Op<2>() = S2; |
1747 | } |
1748 | |
1749 | protected: |
1750 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1751 | friend class Instruction; |
1752 | |
1753 | SelectInst *cloneImpl() const; |
1754 | |
1755 | public: |
1756 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1757 | const Twine &NameStr = "", |
1758 | Instruction *InsertBefore = nullptr, |
1759 | Instruction *MDFrom = nullptr) { |
1760 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
1761 | if (MDFrom) |
1762 | Sel->copyMetadata(*MDFrom); |
1763 | return Sel; |
1764 | } |
1765 | |
1766 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1767 | const Twine &NameStr, |
1768 | BasicBlock *InsertAtEnd) { |
1769 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
1770 | } |
1771 | |
1772 | const Value *getCondition() const { return Op<0>(); } |
1773 | const Value *getTrueValue() const { return Op<1>(); } |
1774 | const Value *getFalseValue() const { return Op<2>(); } |
1775 | Value *getCondition() { return Op<0>(); } |
1776 | Value *getTrueValue() { return Op<1>(); } |
1777 | Value *getFalseValue() { return Op<2>(); } |
1778 | |
1779 | void setCondition(Value *V) { Op<0>() = V; } |
1780 | void setTrueValue(Value *V) { Op<1>() = V; } |
1781 | void setFalseValue(Value *V) { Op<2>() = V; } |
1782 | |
1783 | /// Swap the true and false values of the select instruction. |
1784 | /// This doesn't swap prof metadata. |
1785 | void swapValues() { Op<1>().swap(Op<2>()); } |
1786 | |
1787 | /// Return a string if the specified operands are invalid |
1788 | /// for a select operation, otherwise return null. |
1789 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
1790 | |
1791 | /// Transparently provide more efficient getOperand methods. |
1792 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1793 | |
1794 | OtherOps getOpcode() const { |
1795 | return static_cast<OtherOps>(Instruction::getOpcode()); |
1796 | } |
1797 | |
1798 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1799 | static bool classof(const Instruction *I) { |
1800 | return I->getOpcode() == Instruction::Select; |
1801 | } |
1802 | static bool classof(const Value *V) { |
1803 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1804 | } |
1805 | }; |
1806 | |
1807 | template <> |
1808 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
1809 | }; |
1810 | |
1811 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1811, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst *>(this))[i_nocapture].get()); } void SelectInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1811, __PRETTY_FUNCTION__)); OperandTraits<SelectInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst ::getNumOperands() const { return OperandTraits<SelectInst >::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SelectInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
1812 | |
1813 | //===----------------------------------------------------------------------===// |
1814 | // VAArgInst Class |
1815 | //===----------------------------------------------------------------------===// |
1816 | |
1817 | /// This class represents the va_arg llvm instruction, which returns |
1818 | /// an argument of the specified type given a va_list and increments that list |
1819 | /// |
1820 | class VAArgInst : public UnaryInstruction { |
1821 | protected: |
1822 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1823 | friend class Instruction; |
1824 | |
1825 | VAArgInst *cloneImpl() const; |
1826 | |
1827 | public: |
1828 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
1829 | Instruction *InsertBefore = nullptr) |
1830 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
1831 | setName(NameStr); |
1832 | } |
1833 | |
1834 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
1835 | BasicBlock *InsertAtEnd) |
1836 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
1837 | setName(NameStr); |
1838 | } |
1839 | |
1840 | Value *getPointerOperand() { return getOperand(0); } |
1841 | const Value *getPointerOperand() const { return getOperand(0); } |
1842 | static unsigned getPointerOperandIndex() { return 0U; } |
1843 | |
1844 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1845 | static bool classof(const Instruction *I) { |
1846 | return I->getOpcode() == VAArg; |
1847 | } |
1848 | static bool classof(const Value *V) { |
1849 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1850 | } |
1851 | }; |
1852 | |
1853 | //===----------------------------------------------------------------------===// |
1854 | // ExtractElementInst Class |
1855 | //===----------------------------------------------------------------------===// |
1856 | |
1857 | /// This instruction extracts a single (scalar) |
1858 | /// element from a VectorType value |
1859 | /// |
1860 | class ExtractElementInst : public Instruction { |
1861 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
1862 | Instruction *InsertBefore = nullptr); |
1863 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
1864 | BasicBlock *InsertAtEnd); |
1865 | |
1866 | protected: |
1867 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1868 | friend class Instruction; |
1869 | |
1870 | ExtractElementInst *cloneImpl() const; |
1871 | |
1872 | public: |
1873 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1874 | const Twine &NameStr = "", |
1875 | Instruction *InsertBefore = nullptr) { |
1876 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
1877 | } |
1878 | |
1879 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1880 | const Twine &NameStr, |
1881 | BasicBlock *InsertAtEnd) { |
1882 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
1883 | } |
1884 | |
1885 | /// Return true if an extractelement instruction can be |
1886 | /// formed with the specified operands. |
1887 | static bool isValidOperands(const Value *Vec, const Value *Idx); |
1888 | |
1889 | Value *getVectorOperand() { return Op<0>(); } |
1890 | Value *getIndexOperand() { return Op<1>(); } |
1891 | const Value *getVectorOperand() const { return Op<0>(); } |
1892 | const Value *getIndexOperand() const { return Op<1>(); } |
1893 | |
1894 | VectorType *getVectorOperandType() const { |
1895 | return cast<VectorType>(getVectorOperand()->getType()); |
1896 | } |
1897 | |
1898 | /// Transparently provide more efficient getOperand methods. |
1899 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1900 | |
1901 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1902 | static bool classof(const Instruction *I) { |
1903 | return I->getOpcode() == Instruction::ExtractElement; |
1904 | } |
1905 | static bool classof(const Value *V) { |
1906 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1907 | } |
1908 | }; |
1909 | |
1910 | template <> |
1911 | struct OperandTraits<ExtractElementInst> : |
1912 | public FixedNumOperandTraits<ExtractElementInst, 2> { |
1913 | }; |
1914 | |
1915 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ExtractElementInst>:: operands(this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1915, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ExtractElementInst>::op_begin(const_cast <ExtractElementInst*>(this))[i_nocapture].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture, Value * Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1915, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands() const { return OperandTraits <ExtractElementInst>::operands(this); } template <int Idx_nocapture> Use &ExtractElementInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
1916 | |
1917 | //===----------------------------------------------------------------------===// |
1918 | // InsertElementInst Class |
1919 | //===----------------------------------------------------------------------===// |
1920 | |
1921 | /// This instruction inserts a single (scalar) |
1922 | /// element into a VectorType value |
1923 | /// |
1924 | class InsertElementInst : public Instruction { |
1925 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
1926 | const Twine &NameStr = "", |
1927 | Instruction *InsertBefore = nullptr); |
1928 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
1929 | BasicBlock *InsertAtEnd); |
1930 | |
1931 | protected: |
1932 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1933 | friend class Instruction; |
1934 | |
1935 | InsertElementInst *cloneImpl() const; |
1936 | |
1937 | public: |
1938 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1939 | const Twine &NameStr = "", |
1940 | Instruction *InsertBefore = nullptr) { |
1941 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
1942 | } |
1943 | |
1944 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1945 | const Twine &NameStr, |
1946 | BasicBlock *InsertAtEnd) { |
1947 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
1948 | } |
1949 | |
1950 | /// Return true if an insertelement instruction can be |
1951 | /// formed with the specified operands. |
1952 | static bool isValidOperands(const Value *Vec, const Value *NewElt, |
1953 | const Value *Idx); |
1954 | |
1955 | /// Overload to return most specific vector type. |
1956 | /// |
1957 | VectorType *getType() const { |
1958 | return cast<VectorType>(Instruction::getType()); |
1959 | } |
1960 | |
1961 | /// Transparently provide more efficient getOperand methods. |
1962 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1963 | |
1964 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1965 | static bool classof(const Instruction *I) { |
1966 | return I->getOpcode() == Instruction::InsertElement; |
1967 | } |
1968 | static bool classof(const Value *V) { |
1969 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1970 | } |
1971 | }; |
1972 | |
1973 | template <> |
1974 | struct OperandTraits<InsertElementInst> : |
1975 | public FixedNumOperandTraits<InsertElementInst, 3> { |
1976 | }; |
1977 | |
1978 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1978, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertElementInst>::op_begin(const_cast <InsertElementInst*>(this))[i_nocapture].get()); } void InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<InsertElementInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 1978, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertElementInst::getNumOperands() const { return OperandTraits <InsertElementInst>::operands(this); } template <int Idx_nocapture> Use &InsertElementInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &InsertElementInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1979 | |
1980 | //===----------------------------------------------------------------------===// |
1981 | // ShuffleVectorInst Class |
1982 | //===----------------------------------------------------------------------===// |
1983 | |
1984 | /// This instruction constructs a fixed permutation of two |
1985 | /// input vectors. |
1986 | /// |
1987 | class ShuffleVectorInst : public Instruction { |
1988 | protected: |
1989 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1990 | friend class Instruction; |
1991 | |
1992 | ShuffleVectorInst *cloneImpl() const; |
1993 | |
1994 | public: |
1995 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
1996 | const Twine &NameStr = "", |
1997 | Instruction *InsertBefor = nullptr); |
1998 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
1999 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2000 | |
2001 | // allocate space for exactly three operands |
2002 | void *operator new(size_t s) { |
2003 | return User::operator new(s, 3); |
2004 | } |
2005 | |
2006 | /// Swap the first 2 operands and adjust the mask to preserve the semantics |
2007 | /// of the instruction. |
2008 | void commute(); |
2009 | |
2010 | /// Return true if a shufflevector instruction can be |
2011 | /// formed with the specified operands. |
2012 | static bool isValidOperands(const Value *V1, const Value *V2, |
2013 | const Value *Mask); |
2014 | |
2015 | /// Overload to return most specific vector type. |
2016 | /// |
2017 | VectorType *getType() const { |
2018 | return cast<VectorType>(Instruction::getType()); |
2019 | } |
2020 | |
2021 | /// Transparently provide more efficient getOperand methods. |
2022 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2023 | |
2024 | Constant *getMask() const { |
2025 | return cast<Constant>(getOperand(2)); |
2026 | } |
2027 | |
2028 | /// Return the shuffle mask value for the specified element of the mask. |
2029 | /// Return -1 if the element is undef. |
2030 | static int getMaskValue(const Constant *Mask, unsigned Elt); |
2031 | |
2032 | /// Return the shuffle mask value of this instruction for the given element |
2033 | /// index. Return -1 if the element is undef. |
2034 | int getMaskValue(unsigned Elt) const { |
2035 | return getMaskValue(getMask(), Elt); |
2036 | } |
2037 | |
2038 | /// Convert the input shuffle mask operand to a vector of integers. Undefined |
2039 | /// elements of the mask are returned as -1. |
2040 | static void getShuffleMask(const Constant *Mask, |
2041 | SmallVectorImpl<int> &Result); |
2042 | |
2043 | /// Return the mask for this instruction as a vector of integers. Undefined |
2044 | /// elements of the mask are returned as -1. |
2045 | void getShuffleMask(SmallVectorImpl<int> &Result) const { |
2046 | return getShuffleMask(getMask(), Result); |
2047 | } |
2048 | |
2049 | SmallVector<int, 16> getShuffleMask() const { |
2050 | SmallVector<int, 16> Mask; |
2051 | getShuffleMask(Mask); |
2052 | return Mask; |
2053 | } |
2054 | |
2055 | /// Return true if this shuffle returns a vector with a different number of |
2056 | /// elements than its source vectors. |
2057 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
2058 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
2059 | bool changesLength() const { |
2060 | unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
2061 | unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
2062 | return NumSourceElts != NumMaskElts; |
2063 | } |
2064 | |
2065 | /// Return true if this shuffle returns a vector with a greater number of |
2066 | /// elements than its source vectors. |
2067 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
2068 | bool increasesLength() const { |
2069 | unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
2070 | unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
2071 | return NumSourceElts < NumMaskElts; |
2072 | } |
2073 | |
2074 | /// Return true if this shuffle mask chooses elements from exactly one source |
2075 | /// vector. |
2076 | /// Example: <7,5,undef,7> |
2077 | /// This assumes that vector operands are the same length as the mask. |
2078 | static bool isSingleSourceMask(ArrayRef<int> Mask); |
2079 | static bool isSingleSourceMask(const Constant *Mask) { |
2080 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2080, __PRETTY_FUNCTION__)); |
2081 | SmallVector<int, 16> MaskAsInts; |
2082 | getShuffleMask(Mask, MaskAsInts); |
2083 | return isSingleSourceMask(MaskAsInts); |
2084 | } |
2085 | |
2086 | /// Return true if this shuffle chooses elements from exactly one source |
2087 | /// vector without changing the length of that vector. |
2088 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
2089 | /// TODO: Optionally allow length-changing shuffles. |
2090 | bool isSingleSource() const { |
2091 | return !changesLength() && isSingleSourceMask(getMask()); |
2092 | } |
2093 | |
2094 | /// Return true if this shuffle mask chooses elements from exactly one source |
2095 | /// vector without lane crossings. A shuffle using this mask is not |
2096 | /// necessarily a no-op because it may change the number of elements from its |
2097 | /// input vectors or it may provide demanded bits knowledge via undef lanes. |
2098 | /// Example: <undef,undef,2,3> |
2099 | static bool isIdentityMask(ArrayRef<int> Mask); |
2100 | static bool isIdentityMask(const Constant *Mask) { |
2101 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2101, __PRETTY_FUNCTION__)); |
2102 | SmallVector<int, 16> MaskAsInts; |
2103 | getShuffleMask(Mask, MaskAsInts); |
2104 | return isIdentityMask(MaskAsInts); |
2105 | } |
2106 | |
2107 | /// Return true if this shuffle chooses elements from exactly one source |
2108 | /// vector without lane crossings and does not change the number of elements |
2109 | /// from its input vectors. |
2110 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
2111 | bool isIdentity() const { |
2112 | return !changesLength() && isIdentityMask(getShuffleMask()); |
2113 | } |
2114 | |
2115 | /// Return true if this shuffle lengthens exactly one source vector with |
2116 | /// undefs in the high elements. |
2117 | bool isIdentityWithPadding() const; |
2118 | |
2119 | /// Return true if this shuffle extracts the first N elements of exactly one |
2120 | /// source vector. |
2121 | bool isIdentityWithExtract() const; |
2122 | |
2123 | /// Return true if this shuffle concatenates its 2 source vectors. This |
2124 | /// returns false if either input is undefined. In that case, the shuffle is |
2125 | /// is better classified as an identity with padding operation. |
2126 | bool isConcat() const; |
2127 | |
2128 | /// Return true if this shuffle mask chooses elements from its source vectors |
2129 | /// without lane crossings. A shuffle using this mask would be |
2130 | /// equivalent to a vector select with a constant condition operand. |
2131 | /// Example: <4,1,6,undef> |
2132 | /// This returns false if the mask does not choose from both input vectors. |
2133 | /// In that case, the shuffle is better classified as an identity shuffle. |
2134 | /// This assumes that vector operands are the same length as the mask |
2135 | /// (a length-changing shuffle can never be equivalent to a vector select). |
2136 | static bool isSelectMask(ArrayRef<int> Mask); |
2137 | static bool isSelectMask(const Constant *Mask) { |
2138 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2138, __PRETTY_FUNCTION__)); |
2139 | SmallVector<int, 16> MaskAsInts; |
2140 | getShuffleMask(Mask, MaskAsInts); |
2141 | return isSelectMask(MaskAsInts); |
2142 | } |
2143 | |
2144 | /// Return true if this shuffle chooses elements from its source vectors |
2145 | /// without lane crossings and all operands have the same number of elements. |
2146 | /// In other words, this shuffle is equivalent to a vector select with a |
2147 | /// constant condition operand. |
2148 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
2149 | /// This returns false if the mask does not choose from both input vectors. |
2150 | /// In that case, the shuffle is better classified as an identity shuffle. |
2151 | /// TODO: Optionally allow length-changing shuffles. |
2152 | bool isSelect() const { |
2153 | return !changesLength() && isSelectMask(getMask()); |
2154 | } |
2155 | |
2156 | /// Return true if this shuffle mask swaps the order of elements from exactly |
2157 | /// one source vector. |
2158 | /// Example: <7,6,undef,4> |
2159 | /// This assumes that vector operands are the same length as the mask. |
2160 | static bool isReverseMask(ArrayRef<int> Mask); |
2161 | static bool isReverseMask(const Constant *Mask) { |
2162 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2162, __PRETTY_FUNCTION__)); |
2163 | SmallVector<int, 16> MaskAsInts; |
2164 | getShuffleMask(Mask, MaskAsInts); |
2165 | return isReverseMask(MaskAsInts); |
2166 | } |
2167 | |
2168 | /// Return true if this shuffle swaps the order of elements from exactly |
2169 | /// one source vector. |
2170 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
2171 | /// TODO: Optionally allow length-changing shuffles. |
2172 | bool isReverse() const { |
2173 | return !changesLength() && isReverseMask(getMask()); |
2174 | } |
2175 | |
2176 | /// Return true if this shuffle mask chooses all elements with the same value |
2177 | /// as the first element of exactly one source vector. |
2178 | /// Example: <4,undef,undef,4> |
2179 | /// This assumes that vector operands are the same length as the mask. |
2180 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
2181 | static bool isZeroEltSplatMask(const Constant *Mask) { |
2182 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2182, __PRETTY_FUNCTION__)); |
2183 | SmallVector<int, 16> MaskAsInts; |
2184 | getShuffleMask(Mask, MaskAsInts); |
2185 | return isZeroEltSplatMask(MaskAsInts); |
2186 | } |
2187 | |
2188 | /// Return true if all elements of this shuffle are the same value as the |
2189 | /// first element of exactly one source vector without changing the length |
2190 | /// of that vector. |
2191 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
2192 | /// TODO: Optionally allow length-changing shuffles. |
2193 | /// TODO: Optionally allow splats from other elements. |
2194 | bool isZeroEltSplat() const { |
2195 | return !changesLength() && isZeroEltSplatMask(getMask()); |
2196 | } |
2197 | |
2198 | /// Return true if this shuffle mask is a transpose mask. |
2199 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
2200 | /// even- or odd-numbered vector elements from two n-dimensional source |
2201 | /// vectors and write each result into consecutive elements of an |
2202 | /// n-dimensional destination vector. Two shuffles are necessary to complete |
2203 | /// the transpose, one for the even elements and another for the odd elements. |
2204 | /// This description closely follows how the TRN1 and TRN2 AArch64 |
2205 | /// instructions operate. |
2206 | /// |
2207 | /// For example, a simple 2x2 matrix can be transposed with: |
2208 | /// |
2209 | /// ; Original matrix |
2210 | /// m0 = < a, b > |
2211 | /// m1 = < c, d > |
2212 | /// |
2213 | /// ; Transposed matrix |
2214 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
2215 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
2216 | /// |
2217 | /// For matrices having greater than n columns, the resulting nx2 transposed |
2218 | /// matrix is stored in two result vectors such that one vector contains |
2219 | /// interleaved elements from all the even-numbered rows and the other vector |
2220 | /// contains interleaved elements from all the odd-numbered rows. For example, |
2221 | /// a 2x4 matrix can be transposed with: |
2222 | /// |
2223 | /// ; Original matrix |
2224 | /// m0 = < a, b, c, d > |
2225 | /// m1 = < e, f, g, h > |
2226 | /// |
2227 | /// ; Transposed matrix |
2228 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
2229 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
2230 | static bool isTransposeMask(ArrayRef<int> Mask); |
2231 | static bool isTransposeMask(const Constant *Mask) { |
2232 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2232, __PRETTY_FUNCTION__)); |
2233 | SmallVector<int, 16> MaskAsInts; |
2234 | getShuffleMask(Mask, MaskAsInts); |
2235 | return isTransposeMask(MaskAsInts); |
2236 | } |
2237 | |
2238 | /// Return true if this shuffle transposes the elements of its inputs without |
2239 | /// changing the length of the vectors. This operation may also be known as a |
2240 | /// merge or interleave. See the description for isTransposeMask() for the |
2241 | /// exact specification. |
2242 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
2243 | bool isTranspose() const { |
2244 | return !changesLength() && isTransposeMask(getMask()); |
2245 | } |
2246 | |
2247 | /// Return true if this shuffle mask is an extract subvector mask. |
2248 | /// A valid extract subvector mask returns a smaller vector from a single |
2249 | /// source operand. The base extraction index is returned as well. |
2250 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2251 | int &Index); |
2252 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
2253 | int &Index) { |
2254 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2254, __PRETTY_FUNCTION__)); |
2255 | SmallVector<int, 16> MaskAsInts; |
2256 | getShuffleMask(Mask, MaskAsInts); |
2257 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
2258 | } |
2259 | |
2260 | /// Return true if this shuffle mask is an extract subvector mask. |
2261 | bool isExtractSubvectorMask(int &Index) const { |
2262 | int NumSrcElts = Op<0>()->getType()->getVectorNumElements(); |
2263 | return isExtractSubvectorMask(getMask(), NumSrcElts, Index); |
2264 | } |
2265 | |
2266 | /// Change values in a shuffle permute mask assuming the two vector operands |
2267 | /// of length InVecNumElts have swapped position. |
2268 | static void commuteShuffleMask(MutableArrayRef<int> Mask, |
2269 | unsigned InVecNumElts) { |
2270 | for (int &Idx : Mask) { |
2271 | if (Idx == -1) |
2272 | continue; |
2273 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
2274 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2275, __PRETTY_FUNCTION__)) |
2275 | "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2275, __PRETTY_FUNCTION__)); |
2276 | } |
2277 | } |
2278 | |
2279 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2280 | static bool classof(const Instruction *I) { |
2281 | return I->getOpcode() == Instruction::ShuffleVector; |
2282 | } |
2283 | static bool classof(const Value *V) { |
2284 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2285 | } |
2286 | }; |
2287 | |
2288 | template <> |
2289 | struct OperandTraits<ShuffleVectorInst> : |
2290 | public FixedNumOperandTraits<ShuffleVectorInst, 3> { |
2291 | }; |
2292 | |
2293 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2293, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ShuffleVectorInst>::op_begin(const_cast <ShuffleVectorInst*>(this))[i_nocapture].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<ShuffleVectorInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2293, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands() const { return OperandTraits <ShuffleVectorInst>::operands(this); } template <int Idx_nocapture> Use &ShuffleVectorInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ShuffleVectorInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
2294 | |
2295 | //===----------------------------------------------------------------------===// |
2296 | // ExtractValueInst Class |
2297 | //===----------------------------------------------------------------------===// |
2298 | |
2299 | /// This instruction extracts a struct member or array |
2300 | /// element value from an aggregate value. |
2301 | /// |
2302 | class ExtractValueInst : public UnaryInstruction { |
2303 | SmallVector<unsigned, 4> Indices; |
2304 | |
2305 | ExtractValueInst(const ExtractValueInst &EVI); |
2306 | |
2307 | /// Constructors - Create a extractvalue instruction with a base aggregate |
2308 | /// value and a list of indices. The first ctor can optionally insert before |
2309 | /// an existing instruction, the second appends the new instruction to the |
2310 | /// specified BasicBlock. |
2311 | inline ExtractValueInst(Value *Agg, |
2312 | ArrayRef<unsigned> Idxs, |
2313 | const Twine &NameStr, |
2314 | Instruction *InsertBefore); |
2315 | inline ExtractValueInst(Value *Agg, |
2316 | ArrayRef<unsigned> Idxs, |
2317 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2318 | |
2319 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
2320 | |
2321 | protected: |
2322 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2323 | friend class Instruction; |
2324 | |
2325 | ExtractValueInst *cloneImpl() const; |
2326 | |
2327 | public: |
2328 | static ExtractValueInst *Create(Value *Agg, |
2329 | ArrayRef<unsigned> Idxs, |
2330 | const Twine &NameStr = "", |
2331 | Instruction *InsertBefore = nullptr) { |
2332 | return new |
2333 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
2334 | } |
2335 | |
2336 | static ExtractValueInst *Create(Value *Agg, |
2337 | ArrayRef<unsigned> Idxs, |
2338 | const Twine &NameStr, |
2339 | BasicBlock *InsertAtEnd) { |
2340 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
2341 | } |
2342 | |
2343 | /// Returns the type of the element that would be extracted |
2344 | /// with an extractvalue instruction with the specified parameters. |
2345 | /// |
2346 | /// Null is returned if the indices are invalid for the specified type. |
2347 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
2348 | |
2349 | using idx_iterator = const unsigned*; |
2350 | |
2351 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2352 | inline idx_iterator idx_end() const { return Indices.end(); } |
2353 | inline iterator_range<idx_iterator> indices() const { |
2354 | return make_range(idx_begin(), idx_end()); |
2355 | } |
2356 | |
2357 | Value *getAggregateOperand() { |
2358 | return getOperand(0); |
2359 | } |
2360 | const Value *getAggregateOperand() const { |
2361 | return getOperand(0); |
2362 | } |
2363 | static unsigned getAggregateOperandIndex() { |
2364 | return 0U; // get index for modifying correct operand |
2365 | } |
2366 | |
2367 | ArrayRef<unsigned> getIndices() const { |
2368 | return Indices; |
2369 | } |
2370 | |
2371 | unsigned getNumIndices() const { |
2372 | return (unsigned)Indices.size(); |
2373 | } |
2374 | |
2375 | bool hasIndices() const { |
2376 | return true; |
2377 | } |
2378 | |
2379 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2380 | static bool classof(const Instruction *I) { |
2381 | return I->getOpcode() == Instruction::ExtractValue; |
2382 | } |
2383 | static bool classof(const Value *V) { |
2384 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2385 | } |
2386 | }; |
2387 | |
2388 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2389 | ArrayRef<unsigned> Idxs, |
2390 | const Twine &NameStr, |
2391 | Instruction *InsertBefore) |
2392 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2393 | ExtractValue, Agg, InsertBefore) { |
2394 | init(Idxs, NameStr); |
2395 | } |
2396 | |
2397 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2398 | ArrayRef<unsigned> Idxs, |
2399 | const Twine &NameStr, |
2400 | BasicBlock *InsertAtEnd) |
2401 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2402 | ExtractValue, Agg, InsertAtEnd) { |
2403 | init(Idxs, NameStr); |
2404 | } |
2405 | |
2406 | //===----------------------------------------------------------------------===// |
2407 | // InsertValueInst Class |
2408 | //===----------------------------------------------------------------------===// |
2409 | |
2410 | /// This instruction inserts a struct field of array element |
2411 | /// value into an aggregate value. |
2412 | /// |
2413 | class InsertValueInst : public Instruction { |
2414 | SmallVector<unsigned, 4> Indices; |
2415 | |
2416 | InsertValueInst(const InsertValueInst &IVI); |
2417 | |
2418 | /// Constructors - Create a insertvalue instruction with a base aggregate |
2419 | /// value, a value to insert, and a list of indices. The first ctor can |
2420 | /// optionally insert before an existing instruction, the second appends |
2421 | /// the new instruction to the specified BasicBlock. |
2422 | inline InsertValueInst(Value *Agg, Value *Val, |
2423 | ArrayRef<unsigned> Idxs, |
2424 | const Twine &NameStr, |
2425 | Instruction *InsertBefore); |
2426 | inline InsertValueInst(Value *Agg, Value *Val, |
2427 | ArrayRef<unsigned> Idxs, |
2428 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2429 | |
2430 | /// Constructors - These two constructors are convenience methods because one |
2431 | /// and two index insertvalue instructions are so common. |
2432 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
2433 | const Twine &NameStr = "", |
2434 | Instruction *InsertBefore = nullptr); |
2435 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
2436 | BasicBlock *InsertAtEnd); |
2437 | |
2438 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2439 | const Twine &NameStr); |
2440 | |
2441 | protected: |
2442 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2443 | friend class Instruction; |
2444 | |
2445 | InsertValueInst *cloneImpl() const; |
2446 | |
2447 | public: |
2448 | // allocate space for exactly two operands |
2449 | void *operator new(size_t s) { |
2450 | return User::operator new(s, 2); |
2451 | } |
2452 | |
2453 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2454 | ArrayRef<unsigned> Idxs, |
2455 | const Twine &NameStr = "", |
2456 | Instruction *InsertBefore = nullptr) { |
2457 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
2458 | } |
2459 | |
2460 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2461 | ArrayRef<unsigned> Idxs, |
2462 | const Twine &NameStr, |
2463 | BasicBlock *InsertAtEnd) { |
2464 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
2465 | } |
2466 | |
2467 | /// Transparently provide more efficient getOperand methods. |
2468 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2469 | |
2470 | using idx_iterator = const unsigned*; |
2471 | |
2472 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2473 | inline idx_iterator idx_end() const { return Indices.end(); } |
2474 | inline iterator_range<idx_iterator> indices() const { |
2475 | return make_range(idx_begin(), idx_end()); |
2476 | } |
2477 | |
2478 | Value *getAggregateOperand() { |
2479 | return getOperand(0); |
2480 | } |
2481 | const Value *getAggregateOperand() const { |
2482 | return getOperand(0); |
2483 | } |
2484 | static unsigned getAggregateOperandIndex() { |
2485 | return 0U; // get index for modifying correct operand |
2486 | } |
2487 | |
2488 | Value *getInsertedValueOperand() { |
2489 | return getOperand(1); |
2490 | } |
2491 | const Value *getInsertedValueOperand() const { |
2492 | return getOperand(1); |
2493 | } |
2494 | static unsigned getInsertedValueOperandIndex() { |
2495 | return 1U; // get index for modifying correct operand |
2496 | } |
2497 | |
2498 | ArrayRef<unsigned> getIndices() const { |
2499 | return Indices; |
2500 | } |
2501 | |
2502 | unsigned getNumIndices() const { |
2503 | return (unsigned)Indices.size(); |
2504 | } |
2505 | |
2506 | bool hasIndices() const { |
2507 | return true; |
2508 | } |
2509 | |
2510 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2511 | static bool classof(const Instruction *I) { |
2512 | return I->getOpcode() == Instruction::InsertValue; |
2513 | } |
2514 | static bool classof(const Value *V) { |
2515 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2516 | } |
2517 | }; |
2518 | |
2519 | template <> |
2520 | struct OperandTraits<InsertValueInst> : |
2521 | public FixedNumOperandTraits<InsertValueInst, 2> { |
2522 | }; |
2523 | |
2524 | InsertValueInst::InsertValueInst(Value *Agg, |
2525 | Value *Val, |
2526 | ArrayRef<unsigned> Idxs, |
2527 | const Twine &NameStr, |
2528 | Instruction *InsertBefore) |
2529 | : Instruction(Agg->getType(), InsertValue, |
2530 | OperandTraits<InsertValueInst>::op_begin(this), |
2531 | 2, InsertBefore) { |
2532 | init(Agg, Val, Idxs, NameStr); |
2533 | } |
2534 | |
2535 | InsertValueInst::InsertValueInst(Value *Agg, |
2536 | Value *Val, |
2537 | ArrayRef<unsigned> Idxs, |
2538 | const Twine &NameStr, |
2539 | BasicBlock *InsertAtEnd) |
2540 | : Instruction(Agg->getType(), InsertValue, |
2541 | OperandTraits<InsertValueInst>::op_begin(this), |
2542 | 2, InsertAtEnd) { |
2543 | init(Agg, Val, Idxs, NameStr); |
2544 | } |
2545 | |
2546 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2546, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<InsertValueInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2546, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertValueInst::getNumOperands() const { return OperandTraits <InsertValueInst>::operands(this); } template <int Idx_nocapture > Use &InsertValueInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2547 | |
2548 | //===----------------------------------------------------------------------===// |
2549 | // PHINode Class |
2550 | //===----------------------------------------------------------------------===// |
2551 | |
2552 | // PHINode - The PHINode class is used to represent the magical mystical PHI |
2553 | // node, that can not exist in nature, but can be synthesized in a computer |
2554 | // scientist's overactive imagination. |
2555 | // |
2556 | class PHINode : public Instruction { |
2557 | /// The number of operands actually allocated. NumOperands is |
2558 | /// the number actually in use. |
2559 | unsigned ReservedSpace; |
2560 | |
2561 | PHINode(const PHINode &PN); |
2562 | |
2563 | explicit PHINode(Type *Ty, unsigned NumReservedValues, |
2564 | const Twine &NameStr = "", |
2565 | Instruction *InsertBefore = nullptr) |
2566 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), |
2567 | ReservedSpace(NumReservedValues) { |
2568 | setName(NameStr); |
2569 | allocHungoffUses(ReservedSpace); |
2570 | } |
2571 | |
2572 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, |
2573 | BasicBlock *InsertAtEnd) |
2574 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), |
2575 | ReservedSpace(NumReservedValues) { |
2576 | setName(NameStr); |
2577 | allocHungoffUses(ReservedSpace); |
2578 | } |
2579 | |
2580 | protected: |
2581 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2582 | friend class Instruction; |
2583 | |
2584 | PHINode *cloneImpl() const; |
2585 | |
2586 | // allocHungoffUses - this is more complicated than the generic |
2587 | // User::allocHungoffUses, because we have to allocate Uses for the incoming |
2588 | // values and pointers to the incoming blocks, all in one allocation. |
2589 | void allocHungoffUses(unsigned N) { |
2590 | User::allocHungoffUses(N, /* IsPhi */ true); |
2591 | } |
2592 | |
2593 | public: |
2594 | /// Constructors - NumReservedValues is a hint for the number of incoming |
2595 | /// edges that this phi node will have (use 0 if you really have no idea). |
2596 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2597 | const Twine &NameStr = "", |
2598 | Instruction *InsertBefore = nullptr) { |
2599 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); |
2600 | } |
2601 | |
2602 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2603 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
2604 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); |
2605 | } |
2606 | |
2607 | /// Provide fast operand accessors |
2608 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2609 | |
2610 | // Block iterator interface. This provides access to the list of incoming |
2611 | // basic blocks, which parallels the list of incoming values. |
2612 | |
2613 | using block_iterator = BasicBlock **; |
2614 | using const_block_iterator = BasicBlock * const *; |
2615 | |
2616 | block_iterator block_begin() { |
2617 | Use::UserRef *ref = |
2618 | reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace); |
2619 | return reinterpret_cast<block_iterator>(ref + 1); |
2620 | } |
2621 | |
2622 | const_block_iterator block_begin() const { |
2623 | const Use::UserRef *ref = |
2624 | reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace); |
2625 | return reinterpret_cast<const_block_iterator>(ref + 1); |
2626 | } |
2627 | |
2628 | block_iterator block_end() { |
2629 | return block_begin() + getNumOperands(); |
2630 | } |
2631 | |
2632 | const_block_iterator block_end() const { |
2633 | return block_begin() + getNumOperands(); |
2634 | } |
2635 | |
2636 | iterator_range<block_iterator> blocks() { |
2637 | return make_range(block_begin(), block_end()); |
2638 | } |
2639 | |
2640 | iterator_range<const_block_iterator> blocks() const { |
2641 | return make_range(block_begin(), block_end()); |
2642 | } |
2643 | |
2644 | op_range incoming_values() { return operands(); } |
2645 | |
2646 | const_op_range incoming_values() const { return operands(); } |
2647 | |
2648 | /// Return the number of incoming edges |
2649 | /// |
2650 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
2651 | |
2652 | /// Return incoming value number x |
2653 | /// |
2654 | Value *getIncomingValue(unsigned i) const { |
2655 | return getOperand(i); |
2656 | } |
2657 | void setIncomingValue(unsigned i, Value *V) { |
2658 | assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast< void> (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2658, __PRETTY_FUNCTION__)); |
2659 | assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2660, __PRETTY_FUNCTION__)) |
2660 | "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2660, __PRETTY_FUNCTION__)); |
2661 | setOperand(i, V); |
2662 | } |
2663 | |
2664 | static unsigned getOperandNumForIncomingValue(unsigned i) { |
2665 | return i; |
2666 | } |
2667 | |
2668 | static unsigned getIncomingValueNumForOperand(unsigned i) { |
2669 | return i; |
2670 | } |
2671 | |
2672 | /// Return incoming basic block number @p i. |
2673 | /// |
2674 | BasicBlock *getIncomingBlock(unsigned i) const { |
2675 | return block_begin()[i]; |
2676 | } |
2677 | |
2678 | /// Return incoming basic block corresponding |
2679 | /// to an operand of the PHI. |
2680 | /// |
2681 | BasicBlock *getIncomingBlock(const Use &U) const { |
2682 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2682, __PRETTY_FUNCTION__)); |
2683 | return getIncomingBlock(unsigned(&U - op_begin())); |
2684 | } |
2685 | |
2686 | /// Return incoming basic block corresponding |
2687 | /// to value use iterator. |
2688 | /// |
2689 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { |
2690 | return getIncomingBlock(I.getUse()); |
2691 | } |
2692 | |
2693 | void setIncomingBlock(unsigned i, BasicBlock *BB) { |
2694 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2694, __PRETTY_FUNCTION__)); |
2695 | block_begin()[i] = BB; |
2696 | } |
2697 | |
2698 | /// Replace every incoming basic block \p Old to basic block \p New. |
2699 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { |
2700 | assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!" ) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2700, __PRETTY_FUNCTION__)); |
2701 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2702 | if (getIncomingBlock(Op) == Old) |
2703 | setIncomingBlock(Op, New); |
2704 | } |
2705 | |
2706 | /// Add an incoming value to the end of the PHI list |
2707 | /// |
2708 | void addIncoming(Value *V, BasicBlock *BB) { |
2709 | if (getNumOperands() == ReservedSpace) |
2710 | growOperands(); // Get more space! |
2711 | // Initialize some new operands. |
2712 | setNumHungOffUseOperands(getNumOperands() + 1); |
2713 | setIncomingValue(getNumOperands() - 1, V); |
2714 | setIncomingBlock(getNumOperands() - 1, BB); |
2715 | } |
2716 | |
2717 | /// Remove an incoming value. This is useful if a |
2718 | /// predecessor basic block is deleted. The value removed is returned. |
2719 | /// |
2720 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty |
2721 | /// is true), the PHI node is destroyed and any uses of it are replaced with |
2722 | /// dummy values. The only time there should be zero incoming values to a PHI |
2723 | /// node is when the block is dead, so this strategy is sound. |
2724 | /// |
2725 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); |
2726 | |
2727 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { |
2728 | int Idx = getBasicBlockIndex(BB); |
2729 | assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!" ) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2729, __PRETTY_FUNCTION__)); |
2730 | return removeIncomingValue(Idx, DeletePHIIfEmpty); |
2731 | } |
2732 | |
2733 | /// Return the first index of the specified basic |
2734 | /// block in the value list for this PHI. Returns -1 if no instance. |
2735 | /// |
2736 | int getBasicBlockIndex(const BasicBlock *BB) const { |
2737 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
2738 | if (block_begin()[i] == BB) |
2739 | return i; |
2740 | return -1; |
2741 | } |
2742 | |
2743 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
2744 | int Idx = getBasicBlockIndex(BB); |
2745 | assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast <void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2745, __PRETTY_FUNCTION__)); |
2746 | return getIncomingValue(Idx); |
2747 | } |
2748 | |
2749 | /// Set every incoming value(s) for block \p BB to \p V. |
2750 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { |
2751 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2751, __PRETTY_FUNCTION__)); |
2752 | bool Found = false; |
2753 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2754 | if (getIncomingBlock(Op) == BB) { |
2755 | Found = true; |
2756 | setIncomingValue(Op, V); |
2757 | } |
2758 | (void)Found; |
2759 | assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast <void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2759, __PRETTY_FUNCTION__)); |
2760 | } |
2761 | |
2762 | /// If the specified PHI node always merges together the |
2763 | /// same value, return the value, otherwise return null. |
2764 | Value *hasConstantValue() const; |
2765 | |
2766 | /// Whether the specified PHI node always merges |
2767 | /// together the same value, assuming undefs are equal to a unique |
2768 | /// non-undef value. |
2769 | bool hasConstantOrUndefValue() const; |
2770 | |
2771 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
2772 | static bool classof(const Instruction *I) { |
2773 | return I->getOpcode() == Instruction::PHI; |
2774 | } |
2775 | static bool classof(const Value *V) { |
2776 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2777 | } |
2778 | |
2779 | private: |
2780 | void growOperands(); |
2781 | }; |
2782 | |
2783 | template <> |
2784 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { |
2785 | }; |
2786 | |
2787 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { ((i_nocapture < OperandTraits<PHINode>::operands (this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2787, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<PHINode>::op_begin(const_cast<PHINode *>(this))[i_nocapture].get()); } void PHINode::setOperand( unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2787, __PRETTY_FUNCTION__)); OperandTraits<PHINode>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode ::getNumOperands() const { return OperandTraits<PHINode> ::operands(this); } template <int Idx_nocapture> Use & PHINode::Op() { return this->OpFrom<Idx_nocapture>(this ); } template <int Idx_nocapture> const Use &PHINode ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2788 | |
2789 | //===----------------------------------------------------------------------===// |
2790 | // LandingPadInst Class |
2791 | //===----------------------------------------------------------------------===// |
2792 | |
2793 | //===--------------------------------------------------------------------------- |
2794 | /// The landingpad instruction holds all of the information |
2795 | /// necessary to generate correct exception handling. The landingpad instruction |
2796 | /// cannot be moved from the top of a landing pad block, which itself is |
2797 | /// accessible only from the 'unwind' edge of an invoke. This uses the |
2798 | /// SubclassData field in Value to store whether or not the landingpad is a |
2799 | /// cleanup. |
2800 | /// |
2801 | class LandingPadInst : public Instruction { |
2802 | /// The number of operands actually allocated. NumOperands is |
2803 | /// the number actually in use. |
2804 | unsigned ReservedSpace; |
2805 | |
2806 | LandingPadInst(const LandingPadInst &LP); |
2807 | |
2808 | public: |
2809 | enum ClauseType { Catch, Filter }; |
2810 | |
2811 | private: |
2812 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2813 | const Twine &NameStr, Instruction *InsertBefore); |
2814 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2815 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2816 | |
2817 | // Allocate space for exactly zero operands. |
2818 | void *operator new(size_t s) { |
2819 | return User::operator new(s); |
2820 | } |
2821 | |
2822 | void growOperands(unsigned Size); |
2823 | void init(unsigned NumReservedValues, const Twine &NameStr); |
2824 | |
2825 | protected: |
2826 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2827 | friend class Instruction; |
2828 | |
2829 | LandingPadInst *cloneImpl() const; |
2830 | |
2831 | public: |
2832 | /// Constructors - NumReservedClauses is a hint for the number of incoming |
2833 | /// clauses that this landingpad will have (use 0 if you really have no idea). |
2834 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2835 | const Twine &NameStr = "", |
2836 | Instruction *InsertBefore = nullptr); |
2837 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2838 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2839 | |
2840 | /// Provide fast operand accessors |
2841 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2842 | |
2843 | /// Return 'true' if this landingpad instruction is a |
2844 | /// cleanup. I.e., it should be run when unwinding even if its landing pad |
2845 | /// doesn't catch the exception. |
2846 | bool isCleanup() const { return getSubclassDataFromInstruction() & 1; } |
2847 | |
2848 | /// Indicate that this landingpad instruction is a cleanup. |
2849 | void setCleanup(bool V) { |
2850 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
2851 | (V ? 1 : 0)); |
2852 | } |
2853 | |
2854 | /// Add a catch or filter clause to the landing pad. |
2855 | void addClause(Constant *ClauseVal); |
2856 | |
2857 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to |
2858 | /// determine what type of clause this is. |
2859 | Constant *getClause(unsigned Idx) const { |
2860 | return cast<Constant>(getOperandList()[Idx]); |
2861 | } |
2862 | |
2863 | /// Return 'true' if the clause and index Idx is a catch clause. |
2864 | bool isCatch(unsigned Idx) const { |
2865 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); |
2866 | } |
2867 | |
2868 | /// Return 'true' if the clause and index Idx is a filter clause. |
2869 | bool isFilter(unsigned Idx) const { |
2870 | return isa<ArrayType>(getOperandList()[Idx]->getType()); |
2871 | } |
2872 | |
2873 | /// Get the number of clauses for this landing pad. |
2874 | unsigned getNumClauses() const { return getNumOperands(); } |
2875 | |
2876 | /// Grow the size of the operand list to accommodate the new |
2877 | /// number of clauses. |
2878 | void reserveClauses(unsigned Size) { growOperands(Size); } |
2879 | |
2880 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2881 | static bool classof(const Instruction *I) { |
2882 | return I->getOpcode() == Instruction::LandingPad; |
2883 | } |
2884 | static bool classof(const Value *V) { |
2885 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2886 | } |
2887 | }; |
2888 | |
2889 | template <> |
2890 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { |
2891 | }; |
2892 | |
2893 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2893, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2893, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits <LandingPadInst>::operands(this); } template <int Idx_nocapture > Use &LandingPadInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2894 | |
2895 | //===----------------------------------------------------------------------===// |
2896 | // ReturnInst Class |
2897 | //===----------------------------------------------------------------------===// |
2898 | |
2899 | //===--------------------------------------------------------------------------- |
2900 | /// Return a value (possibly void), from a function. Execution |
2901 | /// does not continue in this function any longer. |
2902 | /// |
2903 | class ReturnInst : public Instruction { |
2904 | ReturnInst(const ReturnInst &RI); |
2905 | |
2906 | private: |
2907 | // ReturnInst constructors: |
2908 | // ReturnInst() - 'ret void' instruction |
2909 | // ReturnInst( null) - 'ret void' instruction |
2910 | // ReturnInst(Value* X) - 'ret X' instruction |
2911 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I |
2912 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I |
2913 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B |
2914 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B |
2915 | // |
2916 | // NOTE: If the Value* passed is of type void then the constructor behaves as |
2917 | // if it was passed NULL. |
2918 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, |
2919 | Instruction *InsertBefore = nullptr); |
2920 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); |
2921 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
2922 | |
2923 | protected: |
2924 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2925 | friend class Instruction; |
2926 | |
2927 | ReturnInst *cloneImpl() const; |
2928 | |
2929 | public: |
2930 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, |
2931 | Instruction *InsertBefore = nullptr) { |
2932 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); |
2933 | } |
2934 | |
2935 | static ReturnInst* Create(LLVMContext &C, Value *retVal, |
2936 | BasicBlock *InsertAtEnd) { |
2937 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); |
2938 | } |
2939 | |
2940 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { |
2941 | return new(0) ReturnInst(C, InsertAtEnd); |
2942 | } |
2943 | |
2944 | /// Provide fast operand accessors |
2945 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2946 | |
2947 | /// Convenience accessor. Returns null if there is no return value. |
2948 | Value *getReturnValue() const { |
2949 | return getNumOperands() != 0 ? getOperand(0) : nullptr; |
2950 | } |
2951 | |
2952 | unsigned getNumSuccessors() const { return 0; } |
2953 | |
2954 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2955 | static bool classof(const Instruction *I) { |
2956 | return (I->getOpcode() == Instruction::Ret); |
2957 | } |
2958 | static bool classof(const Value *V) { |
2959 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2960 | } |
2961 | |
2962 | private: |
2963 | BasicBlock *getSuccessor(unsigned idx) const { |
2964 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2964); |
2965 | } |
2966 | |
2967 | void setSuccessor(unsigned idx, BasicBlock *B) { |
2968 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2968); |
2969 | } |
2970 | }; |
2971 | |
2972 | template <> |
2973 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { |
2974 | }; |
2975 | |
2976 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2976, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst *>(this))[i_nocapture].get()); } void ReturnInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 2976, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst ::getNumOperands() const { return OperandTraits<ReturnInst >::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ReturnInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
2977 | |
2978 | //===----------------------------------------------------------------------===// |
2979 | // BranchInst Class |
2980 | //===----------------------------------------------------------------------===// |
2981 | |
2982 | //===--------------------------------------------------------------------------- |
2983 | /// Conditional or Unconditional Branch instruction. |
2984 | /// |
2985 | class BranchInst : public Instruction { |
2986 | /// Ops list - Branches are strange. The operands are ordered: |
2987 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because |
2988 | /// they don't have to check for cond/uncond branchness. These are mostly |
2989 | /// accessed relative from op_end(). |
2990 | BranchInst(const BranchInst &BI); |
2991 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): |
2992 | // BranchInst(BB *B) - 'br B' |
2993 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' |
2994 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I |
2995 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I |
2996 | // BranchInst(BB* B, BB *I) - 'br B' insert at end |
2997 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end |
2998 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); |
2999 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3000 | Instruction *InsertBefore = nullptr); |
3001 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); |
3002 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3003 | BasicBlock *InsertAtEnd); |
3004 | |
3005 | void AssertOK(); |
3006 | |
3007 | protected: |
3008 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3009 | friend class Instruction; |
3010 | |
3011 | BranchInst *cloneImpl() const; |
3012 | |
3013 | public: |
3014 | /// Iterator type that casts an operand to a basic block. |
3015 | /// |
3016 | /// This only makes sense because the successors are stored as adjacent |
3017 | /// operands for branch instructions. |
3018 | struct succ_op_iterator |
3019 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3020 | std::random_access_iterator_tag, BasicBlock *, |
3021 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3022 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3023 | |
3024 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3025 | BasicBlock *operator->() const { return operator*(); } |
3026 | }; |
3027 | |
3028 | /// The const version of `succ_op_iterator`. |
3029 | struct const_succ_op_iterator |
3030 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3031 | std::random_access_iterator_tag, |
3032 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3033 | const BasicBlock *> { |
3034 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3035 | : iterator_adaptor_base(I) {} |
3036 | |
3037 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3038 | const BasicBlock *operator->() const { return operator*(); } |
3039 | }; |
3040 | |
3041 | static BranchInst *Create(BasicBlock *IfTrue, |
3042 | Instruction *InsertBefore = nullptr) { |
3043 | return new(1) BranchInst(IfTrue, InsertBefore); |
3044 | } |
3045 | |
3046 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3047 | Value *Cond, Instruction *InsertBefore = nullptr) { |
3048 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); |
3049 | } |
3050 | |
3051 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { |
3052 | return new(1) BranchInst(IfTrue, InsertAtEnd); |
3053 | } |
3054 | |
3055 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3056 | Value *Cond, BasicBlock *InsertAtEnd) { |
3057 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); |
3058 | } |
3059 | |
3060 | /// Transparently provide more efficient getOperand methods. |
3061 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3062 | |
3063 | bool isUnconditional() const { return getNumOperands() == 1; } |
3064 | bool isConditional() const { return getNumOperands() == 3; } |
3065 | |
3066 | Value *getCondition() const { |
3067 | assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3067, __PRETTY_FUNCTION__)); |
3068 | return Op<-3>(); |
3069 | } |
3070 | |
3071 | void setCondition(Value *V) { |
3072 | assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3072, __PRETTY_FUNCTION__)); |
3073 | Op<-3>() = V; |
3074 | } |
3075 | |
3076 | unsigned getNumSuccessors() const { return 1+isConditional(); } |
3077 | |
3078 | BasicBlock *getSuccessor(unsigned i) const { |
3079 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3079, __PRETTY_FUNCTION__)); |
3080 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); |
3081 | } |
3082 | |
3083 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3084 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3084, __PRETTY_FUNCTION__)); |
3085 | *(&Op<-1>() - idx) = NewSucc; |
3086 | } |
3087 | |
3088 | /// Swap the successors of this branch instruction. |
3089 | /// |
3090 | /// Swaps the successors of the branch instruction. This also swaps any |
3091 | /// branch weight metadata associated with the instruction so that it |
3092 | /// continues to map correctly to each operand. |
3093 | void swapSuccessors(); |
3094 | |
3095 | iterator_range<succ_op_iterator> successors() { |
3096 | return make_range( |
3097 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3098 | succ_op_iterator(value_op_end())); |
3099 | } |
3100 | |
3101 | iterator_range<const_succ_op_iterator> successors() const { |
3102 | return make_range(const_succ_op_iterator( |
3103 | std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3104 | const_succ_op_iterator(value_op_end())); |
3105 | } |
3106 | |
3107 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3108 | static bool classof(const Instruction *I) { |
3109 | return (I->getOpcode() == Instruction::Br); |
3110 | } |
3111 | static bool classof(const Value *V) { |
3112 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3113 | } |
3114 | }; |
3115 | |
3116 | template <> |
3117 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { |
3118 | }; |
3119 | |
3120 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3120, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst *>(this))[i_nocapture].get()); } void BranchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3120, __PRETTY_FUNCTION__)); OperandTraits<BranchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst ::getNumOperands() const { return OperandTraits<BranchInst >::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & BranchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3121 | |
3122 | //===----------------------------------------------------------------------===// |
3123 | // SwitchInst Class |
3124 | //===----------------------------------------------------------------------===// |
3125 | |
3126 | //===--------------------------------------------------------------------------- |
3127 | /// Multiway switch |
3128 | /// |
3129 | class SwitchInst : public Instruction { |
3130 | unsigned ReservedSpace; |
3131 | |
3132 | // Operand[0] = Value to switch on |
3133 | // Operand[1] = Default basic block destination |
3134 | // Operand[2n ] = Value to match |
3135 | // Operand[2n+1] = BasicBlock to go to on match |
3136 | SwitchInst(const SwitchInst &SI); |
3137 | |
3138 | /// Create a new switch instruction, specifying a value to switch on and a |
3139 | /// default destination. The number of additional cases can be specified here |
3140 | /// to make memory allocation more efficient. This constructor can also |
3141 | /// auto-insert before another instruction. |
3142 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3143 | Instruction *InsertBefore); |
3144 | |
3145 | /// Create a new switch instruction, specifying a value to switch on and a |
3146 | /// default destination. The number of additional cases can be specified here |
3147 | /// to make memory allocation more efficient. This constructor also |
3148 | /// auto-inserts at the end of the specified BasicBlock. |
3149 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3150 | BasicBlock *InsertAtEnd); |
3151 | |
3152 | // allocate space for exactly zero operands |
3153 | void *operator new(size_t s) { |
3154 | return User::operator new(s); |
3155 | } |
3156 | |
3157 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); |
3158 | void growOperands(); |
3159 | |
3160 | protected: |
3161 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3162 | friend class Instruction; |
3163 | |
3164 | SwitchInst *cloneImpl() const; |
3165 | |
3166 | public: |
3167 | // -2 |
3168 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); |
3169 | |
3170 | template <typename CaseHandleT> class CaseIteratorImpl; |
3171 | |
3172 | /// A handle to a particular switch case. It exposes a convenient interface |
3173 | /// to both the case value and the successor block. |
3174 | /// |
3175 | /// We define this as a template and instantiate it to form both a const and |
3176 | /// non-const handle. |
3177 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> |
3178 | class CaseHandleImpl { |
3179 | // Directly befriend both const and non-const iterators. |
3180 | friend class SwitchInst::CaseIteratorImpl< |
3181 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; |
3182 | |
3183 | protected: |
3184 | // Expose the switch type we're parameterized with to the iterator. |
3185 | using SwitchInstType = SwitchInstT; |
3186 | |
3187 | SwitchInstT *SI; |
3188 | ptrdiff_t Index; |
3189 | |
3190 | CaseHandleImpl() = default; |
3191 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} |
3192 | |
3193 | public: |
3194 | /// Resolves case value for current case. |
3195 | ConstantIntT *getCaseValue() const { |
3196 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3197, __PRETTY_FUNCTION__)) |
3197 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3197, __PRETTY_FUNCTION__)); |
3198 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); |
3199 | } |
3200 | |
3201 | /// Resolves successor for current case. |
3202 | BasicBlockT *getCaseSuccessor() const { |
3203 | assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3205, __PRETTY_FUNCTION__)) |
3204 | (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3205, __PRETTY_FUNCTION__)) |
3205 | "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3205, __PRETTY_FUNCTION__)); |
3206 | return SI->getSuccessor(getSuccessorIndex()); |
3207 | } |
3208 | |
3209 | /// Returns number of current case. |
3210 | unsigned getCaseIndex() const { return Index; } |
3211 | |
3212 | /// Returns successor index for current case successor. |
3213 | unsigned getSuccessorIndex() const { |
3214 | assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3216, __PRETTY_FUNCTION__)) |
3215 | (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3216, __PRETTY_FUNCTION__)) |
3216 | "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3216, __PRETTY_FUNCTION__)); |
3217 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; |
3218 | } |
3219 | |
3220 | bool operator==(const CaseHandleImpl &RHS) const { |
3221 | assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast <void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3221, __PRETTY_FUNCTION__)); |
3222 | return Index == RHS.Index; |
3223 | } |
3224 | }; |
3225 | |
3226 | using ConstCaseHandle = |
3227 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; |
3228 | |
3229 | class CaseHandle |
3230 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { |
3231 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; |
3232 | |
3233 | public: |
3234 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} |
3235 | |
3236 | /// Sets the new value for current case. |
3237 | void setValue(ConstantInt *V) { |
3238 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3239, __PRETTY_FUNCTION__)) |
3239 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3239, __PRETTY_FUNCTION__)); |
3240 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); |
3241 | } |
3242 | |
3243 | /// Sets the new successor for current case. |
3244 | void setSuccessor(BasicBlock *S) { |
3245 | SI->setSuccessor(getSuccessorIndex(), S); |
3246 | } |
3247 | }; |
3248 | |
3249 | template <typename CaseHandleT> |
3250 | class CaseIteratorImpl |
3251 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, |
3252 | std::random_access_iterator_tag, |
3253 | CaseHandleT> { |
3254 | using SwitchInstT = typename CaseHandleT::SwitchInstType; |
3255 | |
3256 | CaseHandleT Case; |
3257 | |
3258 | public: |
3259 | /// Default constructed iterator is in an invalid state until assigned to |
3260 | /// a case for a particular switch. |
3261 | CaseIteratorImpl() = default; |
3262 | |
3263 | /// Initializes case iterator for given SwitchInst and for given |
3264 | /// case number. |
3265 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} |
3266 | |
3267 | /// Initializes case iterator for given SwitchInst and for given |
3268 | /// successor index. |
3269 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, |
3270 | unsigned SuccessorIndex) { |
3271 | assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3272, __PRETTY_FUNCTION__)) |
3272 | "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3272, __PRETTY_FUNCTION__)); |
3273 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) |
3274 | : CaseIteratorImpl(SI, DefaultPseudoIndex); |
3275 | } |
3276 | |
3277 | /// Support converting to the const variant. This will be a no-op for const |
3278 | /// variant. |
3279 | operator CaseIteratorImpl<ConstCaseHandle>() const { |
3280 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); |
3281 | } |
3282 | |
3283 | CaseIteratorImpl &operator+=(ptrdiff_t N) { |
3284 | // Check index correctness after addition. |
3285 | // Note: Index == getNumCases() means end(). |
3286 | assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3288, __PRETTY_FUNCTION__)) |
3287 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3288, __PRETTY_FUNCTION__)) |
3288 | "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3288, __PRETTY_FUNCTION__)); |
3289 | Case.Index += N; |
3290 | return *this; |
3291 | } |
3292 | CaseIteratorImpl &operator-=(ptrdiff_t N) { |
3293 | // Check index correctness after subtraction. |
3294 | // Note: Case.Index == getNumCases() means end(). |
3295 | assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3297, __PRETTY_FUNCTION__)) |
3296 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3297, __PRETTY_FUNCTION__)) |
3297 | "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3297, __PRETTY_FUNCTION__)); |
3298 | Case.Index -= N; |
3299 | return *this; |
3300 | } |
3301 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { |
3302 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3302, __PRETTY_FUNCTION__)); |
3303 | return Case.Index - RHS.Case.Index; |
3304 | } |
3305 | bool operator==(const CaseIteratorImpl &RHS) const { |
3306 | return Case == RHS.Case; |
3307 | } |
3308 | bool operator<(const CaseIteratorImpl &RHS) const { |
3309 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3309, __PRETTY_FUNCTION__)); |
3310 | return Case.Index < RHS.Case.Index; |
3311 | } |
3312 | CaseHandleT &operator*() { return Case; } |
3313 | const CaseHandleT &operator*() const { return Case; } |
3314 | }; |
3315 | |
3316 | using CaseIt = CaseIteratorImpl<CaseHandle>; |
3317 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; |
3318 | |
3319 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3320 | unsigned NumCases, |
3321 | Instruction *InsertBefore = nullptr) { |
3322 | return new SwitchInst(Value, Default, NumCases, InsertBefore); |
3323 | } |
3324 | |
3325 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3326 | unsigned NumCases, BasicBlock *InsertAtEnd) { |
3327 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); |
3328 | } |
3329 | |
3330 | /// Provide fast operand accessors |
3331 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3332 | |
3333 | // Accessor Methods for Switch stmt |
3334 | Value *getCondition() const { return getOperand(0); } |
3335 | void setCondition(Value *V) { setOperand(0, V); } |
3336 | |
3337 | BasicBlock *getDefaultDest() const { |
3338 | return cast<BasicBlock>(getOperand(1)); |
3339 | } |
3340 | |
3341 | void setDefaultDest(BasicBlock *DefaultCase) { |
3342 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); |
3343 | } |
3344 | |
3345 | /// Return the number of 'cases' in this switch instruction, excluding the |
3346 | /// default case. |
3347 | unsigned getNumCases() const { |
3348 | return getNumOperands()/2 - 1; |
3349 | } |
3350 | |
3351 | /// Returns a read/write iterator that points to the first case in the |
3352 | /// SwitchInst. |
3353 | CaseIt case_begin() { |
3354 | return CaseIt(this, 0); |
3355 | } |
3356 | |
3357 | /// Returns a read-only iterator that points to the first case in the |
3358 | /// SwitchInst. |
3359 | ConstCaseIt case_begin() const { |
3360 | return ConstCaseIt(this, 0); |
3361 | } |
3362 | |
3363 | /// Returns a read/write iterator that points one past the last in the |
3364 | /// SwitchInst. |
3365 | CaseIt case_end() { |
3366 | return CaseIt(this, getNumCases()); |
3367 | } |
3368 | |
3369 | /// Returns a read-only iterator that points one past the last in the |
3370 | /// SwitchInst. |
3371 | ConstCaseIt case_end() const { |
3372 | return ConstCaseIt(this, getNumCases()); |
3373 | } |
3374 | |
3375 | /// Iteration adapter for range-for loops. |
3376 | iterator_range<CaseIt> cases() { |
3377 | return make_range(case_begin(), case_end()); |
3378 | } |
3379 | |
3380 | /// Constant iteration adapter for range-for loops. |
3381 | iterator_range<ConstCaseIt> cases() const { |
3382 | return make_range(case_begin(), case_end()); |
3383 | } |
3384 | |
3385 | /// Returns an iterator that points to the default case. |
3386 | /// Note: this iterator allows to resolve successor only. Attempt |
3387 | /// to resolve case value causes an assertion. |
3388 | /// Also note, that increment and decrement also causes an assertion and |
3389 | /// makes iterator invalid. |
3390 | CaseIt case_default() { |
3391 | return CaseIt(this, DefaultPseudoIndex); |
3392 | } |
3393 | ConstCaseIt case_default() const { |
3394 | return ConstCaseIt(this, DefaultPseudoIndex); |
3395 | } |
3396 | |
3397 | /// Search all of the case values for the specified constant. If it is |
3398 | /// explicitly handled, return the case iterator of it, otherwise return |
3399 | /// default case iterator to indicate that it is handled by the default |
3400 | /// handler. |
3401 | CaseIt findCaseValue(const ConstantInt *C) { |
3402 | CaseIt I = llvm::find_if( |
3403 | cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); |
3404 | if (I != case_end()) |
3405 | return I; |
3406 | |
3407 | return case_default(); |
3408 | } |
3409 | ConstCaseIt findCaseValue(const ConstantInt *C) const { |
3410 | ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { |
3411 | return Case.getCaseValue() == C; |
3412 | }); |
3413 | if (I != case_end()) |
3414 | return I; |
3415 | |
3416 | return case_default(); |
3417 | } |
3418 | |
3419 | /// Finds the unique case value for a given successor. Returns null if the |
3420 | /// successor is not found, not unique, or is the default case. |
3421 | ConstantInt *findCaseDest(BasicBlock *BB) { |
3422 | if (BB == getDefaultDest()) |
3423 | return nullptr; |
3424 | |
3425 | ConstantInt *CI = nullptr; |
3426 | for (auto Case : cases()) { |
3427 | if (Case.getCaseSuccessor() != BB) |
3428 | continue; |
3429 | |
3430 | if (CI) |
3431 | return nullptr; // Multiple cases lead to BB. |
3432 | |
3433 | CI = Case.getCaseValue(); |
3434 | } |
3435 | |
3436 | return CI; |
3437 | } |
3438 | |
3439 | /// Add an entry to the switch instruction. |
3440 | /// Note: |
3441 | /// This action invalidates case_end(). Old case_end() iterator will |
3442 | /// point to the added case. |
3443 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); |
3444 | |
3445 | /// This method removes the specified case and its successor from the switch |
3446 | /// instruction. Note that this operation may reorder the remaining cases at |
3447 | /// index idx and above. |
3448 | /// Note: |
3449 | /// This action invalidates iterators for all cases following the one removed, |
3450 | /// including the case_end() iterator. It returns an iterator for the next |
3451 | /// case. |
3452 | CaseIt removeCase(CaseIt I); |
3453 | |
3454 | unsigned getNumSuccessors() const { return getNumOperands()/2; } |
3455 | BasicBlock *getSuccessor(unsigned idx) const { |
3456 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3456, __PRETTY_FUNCTION__)); |
3457 | return cast<BasicBlock>(getOperand(idx*2+1)); |
3458 | } |
3459 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3460 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3460, __PRETTY_FUNCTION__)); |
3461 | setOperand(idx * 2 + 1, NewSucc); |
3462 | } |
3463 | |
3464 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3465 | static bool classof(const Instruction *I) { |
3466 | return I->getOpcode() == Instruction::Switch; |
3467 | } |
3468 | static bool classof(const Value *V) { |
3469 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3470 | } |
3471 | }; |
3472 | |
3473 | /// A wrapper class to simplify modification of SwitchInst cases along with |
3474 | /// their prof branch_weights metadata. |
3475 | class SwitchInstProfUpdateWrapper { |
3476 | SwitchInst &SI; |
3477 | Optional<SmallVector<uint32_t, 8> > Weights = None; |
3478 | bool Changed = false; |
3479 | |
3480 | protected: |
3481 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); |
3482 | |
3483 | MDNode *buildProfBranchWeightsMD(); |
3484 | |
3485 | void init(); |
3486 | |
3487 | public: |
3488 | using CaseWeightOpt = Optional<uint32_t>; |
3489 | SwitchInst *operator->() { return &SI; } |
3490 | SwitchInst &operator*() { return SI; } |
3491 | operator SwitchInst *() { return &SI; } |
3492 | |
3493 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } |
3494 | |
3495 | ~SwitchInstProfUpdateWrapper() { |
3496 | if (Changed) |
3497 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); |
3498 | } |
3499 | |
3500 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove |
3501 | /// correspondent branch weight. |
3502 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); |
3503 | |
3504 | /// Delegate the call to the underlying SwitchInst::addCase() and set the |
3505 | /// specified branch weight for the added case. |
3506 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); |
3507 | |
3508 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark |
3509 | /// this object to not touch the underlying SwitchInst in destructor. |
3510 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
3511 | |
3512 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); |
3513 | CaseWeightOpt getSuccessorWeight(unsigned idx); |
3514 | |
3515 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); |
3516 | }; |
3517 | |
3518 | template <> |
3519 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { |
3520 | }; |
3521 | |
3522 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3522, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst *>(this))[i_nocapture].get()); } void SwitchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3522, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst ::getNumOperands() const { return OperandTraits<SwitchInst >::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SwitchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3523 | |
3524 | //===----------------------------------------------------------------------===// |
3525 | // IndirectBrInst Class |
3526 | //===----------------------------------------------------------------------===// |
3527 | |
3528 | //===--------------------------------------------------------------------------- |
3529 | /// Indirect Branch Instruction. |
3530 | /// |
3531 | class IndirectBrInst : public Instruction { |
3532 | unsigned ReservedSpace; |
3533 | |
3534 | // Operand[0] = Address to jump to |
3535 | // Operand[n+1] = n-th destination |
3536 | IndirectBrInst(const IndirectBrInst &IBI); |
3537 | |
3538 | /// Create a new indirectbr instruction, specifying an |
3539 | /// Address to jump to. The number of expected destinations can be specified |
3540 | /// here to make memory allocation more efficient. This constructor can also |
3541 | /// autoinsert before another instruction. |
3542 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); |
3543 | |
3544 | /// Create a new indirectbr instruction, specifying an |
3545 | /// Address to jump to. The number of expected destinations can be specified |
3546 | /// here to make memory allocation more efficient. This constructor also |
3547 | /// autoinserts at the end of the specified BasicBlock. |
3548 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); |
3549 | |
3550 | // allocate space for exactly zero operands |
3551 | void *operator new(size_t s) { |
3552 | return User::operator new(s); |
3553 | } |
3554 | |
3555 | void init(Value *Address, unsigned NumDests); |
3556 | void growOperands(); |
3557 | |
3558 | protected: |
3559 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3560 | friend class Instruction; |
3561 | |
3562 | IndirectBrInst *cloneImpl() const; |
3563 | |
3564 | public: |
3565 | /// Iterator type that casts an operand to a basic block. |
3566 | /// |
3567 | /// This only makes sense because the successors are stored as adjacent |
3568 | /// operands for indirectbr instructions. |
3569 | struct succ_op_iterator |
3570 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3571 | std::random_access_iterator_tag, BasicBlock *, |
3572 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3573 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3574 | |
3575 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3576 | BasicBlock *operator->() const { return operator*(); } |
3577 | }; |
3578 | |
3579 | /// The const version of `succ_op_iterator`. |
3580 | struct const_succ_op_iterator |
3581 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3582 | std::random_access_iterator_tag, |
3583 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3584 | const BasicBlock *> { |
3585 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3586 | : iterator_adaptor_base(I) {} |
3587 | |
3588 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3589 | const BasicBlock *operator->() const { return operator*(); } |
3590 | }; |
3591 | |
3592 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3593 | Instruction *InsertBefore = nullptr) { |
3594 | return new IndirectBrInst(Address, NumDests, InsertBefore); |
3595 | } |
3596 | |
3597 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3598 | BasicBlock *InsertAtEnd) { |
3599 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); |
3600 | } |
3601 | |
3602 | /// Provide fast operand accessors. |
3603 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3604 | |
3605 | // Accessor Methods for IndirectBrInst instruction. |
3606 | Value *getAddress() { return getOperand(0); } |
3607 | const Value *getAddress() const { return getOperand(0); } |
3608 | void setAddress(Value *V) { setOperand(0, V); } |
3609 | |
3610 | /// return the number of possible destinations in this |
3611 | /// indirectbr instruction. |
3612 | unsigned getNumDestinations() const { return getNumOperands()-1; } |
3613 | |
3614 | /// Return the specified destination. |
3615 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } |
3616 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } |
3617 | |
3618 | /// Add a destination. |
3619 | /// |
3620 | void addDestination(BasicBlock *Dest); |
3621 | |
3622 | /// This method removes the specified successor from the |
3623 | /// indirectbr instruction. |
3624 | void removeDestination(unsigned i); |
3625 | |
3626 | unsigned getNumSuccessors() const { return getNumOperands()-1; } |
3627 | BasicBlock *getSuccessor(unsigned i) const { |
3628 | return cast<BasicBlock>(getOperand(i+1)); |
3629 | } |
3630 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3631 | setOperand(i + 1, NewSucc); |
3632 | } |
3633 | |
3634 | iterator_range<succ_op_iterator> successors() { |
3635 | return make_range(succ_op_iterator(std::next(value_op_begin())), |
3636 | succ_op_iterator(value_op_end())); |
3637 | } |
3638 | |
3639 | iterator_range<const_succ_op_iterator> successors() const { |
3640 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), |
3641 | const_succ_op_iterator(value_op_end())); |
3642 | } |
3643 | |
3644 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3645 | static bool classof(const Instruction *I) { |
3646 | return I->getOpcode() == Instruction::IndirectBr; |
3647 | } |
3648 | static bool classof(const Value *V) { |
3649 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3650 | } |
3651 | }; |
3652 | |
3653 | template <> |
3654 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { |
3655 | }; |
3656 | |
3657 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3657, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3657, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits <IndirectBrInst>::operands(this); } template <int Idx_nocapture > Use &IndirectBrInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
3658 | |
3659 | //===----------------------------------------------------------------------===// |
3660 | // InvokeInst Class |
3661 | //===----------------------------------------------------------------------===// |
3662 | |
3663 | /// Invoke instruction. The SubclassData field is used to hold the |
3664 | /// calling convention of the call. |
3665 | /// |
3666 | class InvokeInst : public CallBase { |
3667 | /// The number of operands for this call beyond the called function, |
3668 | /// arguments, and operand bundles. |
3669 | static constexpr int NumExtraOperands = 2; |
3670 | |
3671 | /// The index from the end of the operand array to the normal destination. |
3672 | static constexpr int NormalDestOpEndIdx = -3; |
3673 | |
3674 | /// The index from the end of the operand array to the unwind destination. |
3675 | static constexpr int UnwindDestOpEndIdx = -2; |
3676 | |
3677 | InvokeInst(const InvokeInst &BI); |
3678 | |
3679 | /// Construct an InvokeInst given a range of arguments. |
3680 | /// |
3681 | /// Construct an InvokeInst from a range of arguments |
3682 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3683 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3684 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3685 | const Twine &NameStr, Instruction *InsertBefore); |
3686 | |
3687 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3688 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3689 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3690 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3691 | |
3692 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3693 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3694 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3695 | |
3696 | /// Compute the number of operands to allocate. |
3697 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
3698 | // We need one operand for the called function, plus our extra operands and |
3699 | // the input operand counts provided. |
3700 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; |
3701 | } |
3702 | |
3703 | protected: |
3704 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3705 | friend class Instruction; |
3706 | |
3707 | InvokeInst *cloneImpl() const; |
3708 | |
3709 | public: |
3710 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3711 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3712 | const Twine &NameStr, |
3713 | Instruction *InsertBefore = nullptr) { |
3714 | int NumOperands = ComputeNumOperands(Args.size()); |
3715 | return new (NumOperands) |
3716 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3717 | NameStr, InsertBefore); |
3718 | } |
3719 | |
3720 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3721 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3722 | ArrayRef<OperandBundleDef> Bundles = None, |
3723 | const Twine &NameStr = "", |
3724 | Instruction *InsertBefore = nullptr) { |
3725 | int NumOperands = |
3726 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3727 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3728 | |
3729 | return new (NumOperands, DescriptorBytes) |
3730 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3731 | NameStr, InsertBefore); |
3732 | } |
3733 | |
3734 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3735 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3736 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3737 | int NumOperands = ComputeNumOperands(Args.size()); |
3738 | return new (NumOperands) |
3739 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3740 | NameStr, InsertAtEnd); |
3741 | } |
3742 | |
3743 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3744 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3745 | ArrayRef<OperandBundleDef> Bundles, |
3746 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3747 | int NumOperands = |
3748 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3749 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3750 | |
3751 | return new (NumOperands, DescriptorBytes) |
3752 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3753 | NameStr, InsertAtEnd); |
3754 | } |
3755 | |
3756 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3757 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3758 | const Twine &NameStr, |
3759 | Instruction *InsertBefore = nullptr) { |
3760 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3761 | IfException, Args, None, NameStr, InsertBefore); |
3762 | } |
3763 | |
3764 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3765 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3766 | ArrayRef<OperandBundleDef> Bundles = None, |
3767 | const Twine &NameStr = "", |
3768 | Instruction *InsertBefore = nullptr) { |
3769 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3770 | IfException, Args, Bundles, NameStr, InsertBefore); |
3771 | } |
3772 | |
3773 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3774 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3775 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3776 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3777 | IfException, Args, NameStr, InsertAtEnd); |
3778 | } |
3779 | |
3780 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3781 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3782 | ArrayRef<OperandBundleDef> Bundles, |
3783 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3784 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3785 | IfException, Args, Bundles, NameStr, InsertAtEnd); |
3786 | } |
3787 | |
3788 | // Deprecated [opaque pointer types] |
3789 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3790 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3791 | const Twine &NameStr, |
3792 | Instruction *InsertBefore = nullptr) { |
3793 | return Create(cast<FunctionType>( |
3794 | cast<PointerType>(Func->getType())->getElementType()), |
3795 | Func, IfNormal, IfException, Args, None, NameStr, |
3796 | InsertBefore); |
3797 | } |
3798 | |
3799 | // Deprecated [opaque pointer types] |
3800 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3801 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3802 | ArrayRef<OperandBundleDef> Bundles = None, |
3803 | const Twine &NameStr = "", |
3804 | Instruction *InsertBefore = nullptr) { |
3805 | return Create(cast<FunctionType>( |
3806 | cast<PointerType>(Func->getType())->getElementType()), |
3807 | Func, IfNormal, IfException, Args, Bundles, NameStr, |
3808 | InsertBefore); |
3809 | } |
3810 | |
3811 | // Deprecated [opaque pointer types] |
3812 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3813 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3814 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3815 | return Create(cast<FunctionType>( |
3816 | cast<PointerType>(Func->getType())->getElementType()), |
3817 | Func, IfNormal, IfException, Args, NameStr, InsertAtEnd); |
3818 | } |
3819 | |
3820 | // Deprecated [opaque pointer types] |
3821 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3822 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3823 | ArrayRef<OperandBundleDef> Bundles, |
3824 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3825 | return Create(cast<FunctionType>( |
3826 | cast<PointerType>(Func->getType())->getElementType()), |
3827 | Func, IfNormal, IfException, Args, Bundles, NameStr, |
3828 | InsertAtEnd); |
3829 | } |
3830 | |
3831 | /// Create a clone of \p II with a different set of operand bundles and |
3832 | /// insert it before \p InsertPt. |
3833 | /// |
3834 | /// The returned invoke instruction is identical to \p II in every way except |
3835 | /// that the operand bundles for the new instruction are set to the operand |
3836 | /// bundles in \p Bundles. |
3837 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, |
3838 | Instruction *InsertPt = nullptr); |
3839 | |
3840 | /// Determine if the call should not perform indirect branch tracking. |
3841 | bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); } |
3842 | |
3843 | /// Determine if the call cannot unwind. |
3844 | bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } |
3845 | void setDoesNotThrow() { |
3846 | addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); |
3847 | } |
3848 | |
3849 | // get*Dest - Return the destination basic blocks... |
3850 | BasicBlock *getNormalDest() const { |
3851 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); |
3852 | } |
3853 | BasicBlock *getUnwindDest() const { |
3854 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); |
3855 | } |
3856 | void setNormalDest(BasicBlock *B) { |
3857 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3858 | } |
3859 | void setUnwindDest(BasicBlock *B) { |
3860 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3861 | } |
3862 | |
3863 | /// Get the landingpad instruction from the landing pad |
3864 | /// block (the unwind destination). |
3865 | LandingPadInst *getLandingPadInst() const; |
3866 | |
3867 | BasicBlock *getSuccessor(unsigned i) const { |
3868 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3868, __PRETTY_FUNCTION__)); |
3869 | return i == 0 ? getNormalDest() : getUnwindDest(); |
3870 | } |
3871 | |
3872 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3873 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 3873, __PRETTY_FUNCTION__)); |
3874 | if (i == 0) |
3875 | setNormalDest(NewSucc); |
3876 | else |
3877 | setUnwindDest(NewSucc); |
3878 | } |
3879 | |
3880 | unsigned getNumSuccessors() const { return 2; } |
3881 | |
3882 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3883 | static bool classof(const Instruction *I) { |
3884 | return (I->getOpcode() == Instruction::Invoke); |
3885 | } |
3886 | static bool classof(const Value *V) { |
3887 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3888 | } |
3889 | |
3890 | private: |
3891 | |
3892 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
3893 | // method so that subclasses cannot accidentally use it. |
3894 | void setInstructionSubclassData(unsigned short D) { |
3895 | Instruction::setInstructionSubclassData(D); |
3896 | } |
3897 | }; |
3898 | |
3899 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3900 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3901 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3902 | const Twine &NameStr, Instruction *InsertBefore) |
3903 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3904 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3905 | InsertBefore) { |
3906 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3907 | } |
3908 | |
3909 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3910 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3911 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3912 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
3913 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3914 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3915 | InsertAtEnd) { |
3916 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3917 | } |
3918 | |
3919 | //===----------------------------------------------------------------------===// |
3920 | // CallBrInst Class |
3921 | //===----------------------------------------------------------------------===// |
3922 | |
3923 | /// CallBr instruction, tracking function calls that may not return control but |
3924 | /// instead transfer it to a third location. The SubclassData field is used to |
3925 | /// hold the calling convention of the call. |
3926 | /// |
3927 | class CallBrInst : public CallBase { |
3928 | |
3929 | unsigned NumIndirectDests; |
3930 | |
3931 | CallBrInst(const CallBrInst &BI); |
3932 | |
3933 | /// Construct a CallBrInst given a range of arguments. |
3934 | /// |
3935 | /// Construct a CallBrInst from a range of arguments |
3936 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3937 | ArrayRef<BasicBlock *> IndirectDests, |
3938 | ArrayRef<Value *> Args, |
3939 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3940 | const Twine &NameStr, Instruction *InsertBefore); |
3941 | |
3942 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3943 | ArrayRef<BasicBlock *> IndirectDests, |
3944 | ArrayRef<Value *> Args, |
3945 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3946 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3947 | |
3948 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, |
3949 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, |
3950 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3951 | |
3952 | /// Should the Indirect Destinations change, scan + update the Arg list. |
3953 | void updateArgBlockAddresses(unsigned i, BasicBlock *B); |
3954 | |
3955 | /// Compute the number of operands to allocate. |
3956 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, |
3957 | int NumBundleInputs = 0) { |
3958 | // We need one operand for the called function, plus our extra operands and |
3959 | // the input operand counts provided. |
3960 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; |
3961 | } |
3962 | |
3963 | protected: |
3964 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3965 | friend class Instruction; |
3966 | |
3967 | CallBrInst *cloneImpl() const; |
3968 | |
3969 | public: |
3970 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3971 | BasicBlock *DefaultDest, |
3972 | ArrayRef<BasicBlock *> IndirectDests, |
3973 | ArrayRef<Value *> Args, const Twine &NameStr, |
3974 | Instruction *InsertBefore = nullptr) { |
3975 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3976 | return new (NumOperands) |
3977 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3978 | NumOperands, NameStr, InsertBefore); |
3979 | } |
3980 | |
3981 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3982 | BasicBlock *DefaultDest, |
3983 | ArrayRef<BasicBlock *> IndirectDests, |
3984 | ArrayRef<Value *> Args, |
3985 | ArrayRef<OperandBundleDef> Bundles = None, |
3986 | const Twine &NameStr = "", |
3987 | Instruction *InsertBefore = nullptr) { |
3988 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3989 | CountBundleInputs(Bundles)); |
3990 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3991 | |
3992 | return new (NumOperands, DescriptorBytes) |
3993 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3994 | NumOperands, NameStr, InsertBefore); |
3995 | } |
3996 | |
3997 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3998 | BasicBlock *DefaultDest, |
3999 | ArrayRef<BasicBlock *> IndirectDests, |
4000 | ArrayRef<Value *> Args, const Twine &NameStr, |
4001 | BasicBlock *InsertAtEnd) { |
4002 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
4003 | return new (NumOperands) |
4004 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
4005 | NumOperands, NameStr, InsertAtEnd); |
4006 | } |
4007 | |
4008 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
4009 | BasicBlock *DefaultDest, |
4010 | ArrayRef<BasicBlock *> IndirectDests, |
4011 | ArrayRef<Value *> Args, |
4012 | ArrayRef<OperandBundleDef> Bundles, |
4013 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4014 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
4015 | CountBundleInputs(Bundles)); |
4016 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
4017 | |
4018 | return new (NumOperands, DescriptorBytes) |
4019 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
4020 | NumOperands, NameStr, InsertAtEnd); |
4021 | } |
4022 | |
4023 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4024 | ArrayRef<BasicBlock *> IndirectDests, |
4025 | ArrayRef<Value *> Args, const Twine &NameStr, |
4026 | Instruction *InsertBefore = nullptr) { |
4027 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4028 | IndirectDests, Args, NameStr, InsertBefore); |
4029 | } |
4030 | |
4031 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4032 | ArrayRef<BasicBlock *> IndirectDests, |
4033 | ArrayRef<Value *> Args, |
4034 | ArrayRef<OperandBundleDef> Bundles = None, |
4035 | const Twine &NameStr = "", |
4036 | Instruction *InsertBefore = nullptr) { |
4037 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4038 | IndirectDests, Args, Bundles, NameStr, InsertBefore); |
4039 | } |
4040 | |
4041 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4042 | ArrayRef<BasicBlock *> IndirectDests, |
4043 | ArrayRef<Value *> Args, const Twine &NameStr, |
4044 | BasicBlock *InsertAtEnd) { |
4045 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4046 | IndirectDests, Args, NameStr, InsertAtEnd); |
4047 | } |
4048 | |
4049 | static CallBrInst *Create(FunctionCallee Func, |
4050 | BasicBlock *DefaultDest, |
4051 | ArrayRef<BasicBlock *> IndirectDests, |
4052 | ArrayRef<Value *> Args, |
4053 | ArrayRef<OperandBundleDef> Bundles, |
4054 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4055 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4056 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); |
4057 | } |
4058 | |
4059 | /// Create a clone of \p CBI with a different set of operand bundles and |
4060 | /// insert it before \p InsertPt. |
4061 | /// |
4062 | /// The returned callbr instruction is identical to \p CBI in every way |
4063 | /// except that the operand bundles for the new instruction are set to the |
4064 | /// operand bundles in \p Bundles. |
4065 | static CallBrInst *Create(CallBrInst *CBI, |
4066 | ArrayRef<OperandBundleDef> Bundles, |
4067 | Instruction *InsertPt = nullptr); |
4068 | |
4069 | /// Return the number of callbr indirect dest labels. |
4070 | /// |
4071 | unsigned getNumIndirectDests() const { return NumIndirectDests; } |
4072 | |
4073 | /// getIndirectDestLabel - Return the i-th indirect dest label. |
4074 | /// |
4075 | Value *getIndirectDestLabel(unsigned i) const { |
4076 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4076, __PRETTY_FUNCTION__)); |
4077 | return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4078 | 1); |
4079 | } |
4080 | |
4081 | Value *getIndirectDestLabelUse(unsigned i) const { |
4082 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4082, __PRETTY_FUNCTION__)); |
4083 | return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4084 | 1); |
4085 | } |
4086 | |
4087 | // Return the destination basic blocks... |
4088 | BasicBlock *getDefaultDest() const { |
4089 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); |
4090 | } |
4091 | BasicBlock *getIndirectDest(unsigned i) const { |
4092 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); |
4093 | } |
4094 | SmallVector<BasicBlock *, 16> getIndirectDests() const { |
4095 | SmallVector<BasicBlock *, 16> IndirectDests; |
4096 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) |
4097 | IndirectDests.push_back(getIndirectDest(i)); |
4098 | return IndirectDests; |
4099 | } |
4100 | void setDefaultDest(BasicBlock *B) { |
4101 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); |
4102 | } |
4103 | void setIndirectDest(unsigned i, BasicBlock *B) { |
4104 | updateArgBlockAddresses(i, B); |
4105 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); |
4106 | } |
4107 | |
4108 | BasicBlock *getSuccessor(unsigned i) const { |
4109 | assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4110, __PRETTY_FUNCTION__)) |
4110 | "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4110, __PRETTY_FUNCTION__)); |
4111 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); |
4112 | } |
4113 | |
4114 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
4115 | assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4116, __PRETTY_FUNCTION__)) |
4116 | "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4116, __PRETTY_FUNCTION__)); |
4117 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); |
4118 | } |
4119 | |
4120 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } |
4121 | |
4122 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4123 | static bool classof(const Instruction *I) { |
4124 | return (I->getOpcode() == Instruction::CallBr); |
4125 | } |
4126 | static bool classof(const Value *V) { |
4127 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4128 | } |
4129 | |
4130 | private: |
4131 | |
4132 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4133 | // method so that subclasses cannot accidentally use it. |
4134 | void setInstructionSubclassData(unsigned short D) { |
4135 | Instruction::setInstructionSubclassData(D); |
4136 | } |
4137 | }; |
4138 | |
4139 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4140 | ArrayRef<BasicBlock *> IndirectDests, |
4141 | ArrayRef<Value *> Args, |
4142 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4143 | const Twine &NameStr, Instruction *InsertBefore) |
4144 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4145 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4146 | InsertBefore) { |
4147 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4148 | } |
4149 | |
4150 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4151 | ArrayRef<BasicBlock *> IndirectDests, |
4152 | ArrayRef<Value *> Args, |
4153 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4154 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
4155 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4156 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4157 | InsertAtEnd) { |
4158 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4159 | } |
4160 | |
4161 | //===----------------------------------------------------------------------===// |
4162 | // ResumeInst Class |
4163 | //===----------------------------------------------------------------------===// |
4164 | |
4165 | //===--------------------------------------------------------------------------- |
4166 | /// Resume the propagation of an exception. |
4167 | /// |
4168 | class ResumeInst : public Instruction { |
4169 | ResumeInst(const ResumeInst &RI); |
4170 | |
4171 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); |
4172 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); |
4173 | |
4174 | protected: |
4175 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4176 | friend class Instruction; |
4177 | |
4178 | ResumeInst *cloneImpl() const; |
4179 | |
4180 | public: |
4181 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { |
4182 | return new(1) ResumeInst(Exn, InsertBefore); |
4183 | } |
4184 | |
4185 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { |
4186 | return new(1) ResumeInst(Exn, InsertAtEnd); |
4187 | } |
4188 | |
4189 | /// Provide fast operand accessors |
4190 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4191 | |
4192 | /// Convenience accessor. |
4193 | Value *getValue() const { return Op<0>(); } |
4194 | |
4195 | unsigned getNumSuccessors() const { return 0; } |
4196 | |
4197 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4198 | static bool classof(const Instruction *I) { |
4199 | return I->getOpcode() == Instruction::Resume; |
4200 | } |
4201 | static bool classof(const Value *V) { |
4202 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4203 | } |
4204 | |
4205 | private: |
4206 | BasicBlock *getSuccessor(unsigned idx) const { |
4207 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4207); |
4208 | } |
4209 | |
4210 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4211 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4211); |
4212 | } |
4213 | }; |
4214 | |
4215 | template <> |
4216 | struct OperandTraits<ResumeInst> : |
4217 | public FixedNumOperandTraits<ResumeInst, 1> { |
4218 | }; |
4219 | |
4220 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4220, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst *>(this))[i_nocapture].get()); } void ResumeInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4220, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst ::getNumOperands() const { return OperandTraits<ResumeInst >::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ResumeInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
4221 | |
4222 | //===----------------------------------------------------------------------===// |
4223 | // CatchSwitchInst Class |
4224 | //===----------------------------------------------------------------------===// |
4225 | class CatchSwitchInst : public Instruction { |
4226 | /// The number of operands actually allocated. NumOperands is |
4227 | /// the number actually in use. |
4228 | unsigned ReservedSpace; |
4229 | |
4230 | // Operand[0] = Outer scope |
4231 | // Operand[1] = Unwind block destination |
4232 | // Operand[n] = BasicBlock to go to on match |
4233 | CatchSwitchInst(const CatchSwitchInst &CSI); |
4234 | |
4235 | /// Create a new switch instruction, specifying a |
4236 | /// default destination. The number of additional handlers can be specified |
4237 | /// here to make memory allocation more efficient. |
4238 | /// This constructor can also autoinsert before another instruction. |
4239 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4240 | unsigned NumHandlers, const Twine &NameStr, |
4241 | Instruction *InsertBefore); |
4242 | |
4243 | /// Create a new switch instruction, specifying a |
4244 | /// default destination. The number of additional handlers can be specified |
4245 | /// here to make memory allocation more efficient. |
4246 | /// This constructor also autoinserts at the end of the specified BasicBlock. |
4247 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4248 | unsigned NumHandlers, const Twine &NameStr, |
4249 | BasicBlock *InsertAtEnd); |
4250 | |
4251 | // allocate space for exactly zero operands |
4252 | void *operator new(size_t s) { return User::operator new(s); } |
4253 | |
4254 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); |
4255 | void growOperands(unsigned Size); |
4256 | |
4257 | protected: |
4258 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4259 | friend class Instruction; |
4260 | |
4261 | CatchSwitchInst *cloneImpl() const; |
4262 | |
4263 | public: |
4264 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4265 | unsigned NumHandlers, |
4266 | const Twine &NameStr = "", |
4267 | Instruction *InsertBefore = nullptr) { |
4268 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4269 | InsertBefore); |
4270 | } |
4271 | |
4272 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4273 | unsigned NumHandlers, const Twine &NameStr, |
4274 | BasicBlock *InsertAtEnd) { |
4275 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4276 | InsertAtEnd); |
4277 | } |
4278 | |
4279 | /// Provide fast operand accessors |
4280 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4281 | |
4282 | // Accessor Methods for CatchSwitch stmt |
4283 | Value *getParentPad() const { return getOperand(0); } |
4284 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } |
4285 | |
4286 | // Accessor Methods for CatchSwitch stmt |
4287 | bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } |
4288 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4289 | BasicBlock *getUnwindDest() const { |
4290 | if (hasUnwindDest()) |
4291 | return cast<BasicBlock>(getOperand(1)); |
4292 | return nullptr; |
4293 | } |
4294 | void setUnwindDest(BasicBlock *UnwindDest) { |
4295 | assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail ( "UnwindDest", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4295, __PRETTY_FUNCTION__)); |
4296 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4296, __PRETTY_FUNCTION__)); |
4297 | setOperand(1, UnwindDest); |
4298 | } |
4299 | |
4300 | /// return the number of 'handlers' in this catchswitch |
4301 | /// instruction, except the default handler |
4302 | unsigned getNumHandlers() const { |
4303 | if (hasUnwindDest()) |
4304 | return getNumOperands() - 2; |
4305 | return getNumOperands() - 1; |
4306 | } |
4307 | |
4308 | private: |
4309 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } |
4310 | static const BasicBlock *handler_helper(const Value *V) { |
4311 | return cast<BasicBlock>(V); |
4312 | } |
4313 | |
4314 | public: |
4315 | using DerefFnTy = BasicBlock *(*)(Value *); |
4316 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; |
4317 | using handler_range = iterator_range<handler_iterator>; |
4318 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); |
4319 | using const_handler_iterator = |
4320 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; |
4321 | using const_handler_range = iterator_range<const_handler_iterator>; |
4322 | |
4323 | /// Returns an iterator that points to the first handler in CatchSwitchInst. |
4324 | handler_iterator handler_begin() { |
4325 | op_iterator It = op_begin() + 1; |
4326 | if (hasUnwindDest()) |
4327 | ++It; |
4328 | return handler_iterator(It, DerefFnTy(handler_helper)); |
4329 | } |
4330 | |
4331 | /// Returns an iterator that points to the first handler in the |
4332 | /// CatchSwitchInst. |
4333 | const_handler_iterator handler_begin() const { |
4334 | const_op_iterator It = op_begin() + 1; |
4335 | if (hasUnwindDest()) |
4336 | ++It; |
4337 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); |
4338 | } |
4339 | |
4340 | /// Returns a read-only iterator that points one past the last |
4341 | /// handler in the CatchSwitchInst. |
4342 | handler_iterator handler_end() { |
4343 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); |
4344 | } |
4345 | |
4346 | /// Returns an iterator that points one past the last handler in the |
4347 | /// CatchSwitchInst. |
4348 | const_handler_iterator handler_end() const { |
4349 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); |
4350 | } |
4351 | |
4352 | /// iteration adapter for range-for loops. |
4353 | handler_range handlers() { |
4354 | return make_range(handler_begin(), handler_end()); |
4355 | } |
4356 | |
4357 | /// iteration adapter for range-for loops. |
4358 | const_handler_range handlers() const { |
4359 | return make_range(handler_begin(), handler_end()); |
4360 | } |
4361 | |
4362 | /// Add an entry to the switch instruction... |
4363 | /// Note: |
4364 | /// This action invalidates handler_end(). Old handler_end() iterator will |
4365 | /// point to the added handler. |
4366 | void addHandler(BasicBlock *Dest); |
4367 | |
4368 | void removeHandler(handler_iterator HI); |
4369 | |
4370 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } |
4371 | BasicBlock *getSuccessor(unsigned Idx) const { |
4372 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4373, __PRETTY_FUNCTION__)) |
4373 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4373, __PRETTY_FUNCTION__)); |
4374 | return cast<BasicBlock>(getOperand(Idx + 1)); |
4375 | } |
4376 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { |
4377 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4378, __PRETTY_FUNCTION__)) |
4378 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4378, __PRETTY_FUNCTION__)); |
4379 | setOperand(Idx + 1, NewSucc); |
4380 | } |
4381 | |
4382 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4383 | static bool classof(const Instruction *I) { |
4384 | return I->getOpcode() == Instruction::CatchSwitch; |
4385 | } |
4386 | static bool classof(const Value *V) { |
4387 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4388 | } |
4389 | }; |
4390 | |
4391 | template <> |
4392 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; |
4393 | |
4394 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4394, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchSwitchInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4394, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands() const { return OperandTraits <CatchSwitchInst>::operands(this); } template <int Idx_nocapture > Use &CatchSwitchInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4395 | |
4396 | //===----------------------------------------------------------------------===// |
4397 | // CleanupPadInst Class |
4398 | //===----------------------------------------------------------------------===// |
4399 | class CleanupPadInst : public FuncletPadInst { |
4400 | private: |
4401 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4402 | unsigned Values, const Twine &NameStr, |
4403 | Instruction *InsertBefore) |
4404 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4405 | NameStr, InsertBefore) {} |
4406 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4407 | unsigned Values, const Twine &NameStr, |
4408 | BasicBlock *InsertAtEnd) |
4409 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4410 | NameStr, InsertAtEnd) {} |
4411 | |
4412 | public: |
4413 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, |
4414 | const Twine &NameStr = "", |
4415 | Instruction *InsertBefore = nullptr) { |
4416 | unsigned Values = 1 + Args.size(); |
4417 | return new (Values) |
4418 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); |
4419 | } |
4420 | |
4421 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, |
4422 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4423 | unsigned Values = 1 + Args.size(); |
4424 | return new (Values) |
4425 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); |
4426 | } |
4427 | |
4428 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4429 | static bool classof(const Instruction *I) { |
4430 | return I->getOpcode() == Instruction::CleanupPad; |
4431 | } |
4432 | static bool classof(const Value *V) { |
4433 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4434 | } |
4435 | }; |
4436 | |
4437 | //===----------------------------------------------------------------------===// |
4438 | // CatchPadInst Class |
4439 | //===----------------------------------------------------------------------===// |
4440 | class CatchPadInst : public FuncletPadInst { |
4441 | private: |
4442 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4443 | unsigned Values, const Twine &NameStr, |
4444 | Instruction *InsertBefore) |
4445 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4446 | NameStr, InsertBefore) {} |
4447 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4448 | unsigned Values, const Twine &NameStr, |
4449 | BasicBlock *InsertAtEnd) |
4450 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4451 | NameStr, InsertAtEnd) {} |
4452 | |
4453 | public: |
4454 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4455 | const Twine &NameStr = "", |
4456 | Instruction *InsertBefore = nullptr) { |
4457 | unsigned Values = 1 + Args.size(); |
4458 | return new (Values) |
4459 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); |
4460 | } |
4461 | |
4462 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4463 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4464 | unsigned Values = 1 + Args.size(); |
4465 | return new (Values) |
4466 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); |
4467 | } |
4468 | |
4469 | /// Convenience accessors |
4470 | CatchSwitchInst *getCatchSwitch() const { |
4471 | return cast<CatchSwitchInst>(Op<-1>()); |
4472 | } |
4473 | void setCatchSwitch(Value *CatchSwitch) { |
4474 | assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail ( "CatchSwitch", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4474, __PRETTY_FUNCTION__)); |
4475 | Op<-1>() = CatchSwitch; |
4476 | } |
4477 | |
4478 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4479 | static bool classof(const Instruction *I) { |
4480 | return I->getOpcode() == Instruction::CatchPad; |
4481 | } |
4482 | static bool classof(const Value *V) { |
4483 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4484 | } |
4485 | }; |
4486 | |
4487 | //===----------------------------------------------------------------------===// |
4488 | // CatchReturnInst Class |
4489 | //===----------------------------------------------------------------------===// |
4490 | |
4491 | class CatchReturnInst : public Instruction { |
4492 | CatchReturnInst(const CatchReturnInst &RI); |
4493 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); |
4494 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); |
4495 | |
4496 | void init(Value *CatchPad, BasicBlock *BB); |
4497 | |
4498 | protected: |
4499 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4500 | friend class Instruction; |
4501 | |
4502 | CatchReturnInst *cloneImpl() const; |
4503 | |
4504 | public: |
4505 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4506 | Instruction *InsertBefore = nullptr) { |
4507 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4507, __PRETTY_FUNCTION__)); |
4508 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4508, __PRETTY_FUNCTION__)); |
4509 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); |
4510 | } |
4511 | |
4512 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4513 | BasicBlock *InsertAtEnd) { |
4514 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4514, __PRETTY_FUNCTION__)); |
4515 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4515, __PRETTY_FUNCTION__)); |
4516 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); |
4517 | } |
4518 | |
4519 | /// Provide fast operand accessors |
4520 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4521 | |
4522 | /// Convenience accessors. |
4523 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } |
4524 | void setCatchPad(CatchPadInst *CatchPad) { |
4525 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4525, __PRETTY_FUNCTION__)); |
4526 | Op<0>() = CatchPad; |
4527 | } |
4528 | |
4529 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } |
4530 | void setSuccessor(BasicBlock *NewSucc) { |
4531 | assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4531, __PRETTY_FUNCTION__)); |
4532 | Op<1>() = NewSucc; |
4533 | } |
4534 | unsigned getNumSuccessors() const { return 1; } |
4535 | |
4536 | /// Get the parentPad of this catchret's catchpad's catchswitch. |
4537 | /// The successor block is implicitly a member of this funclet. |
4538 | Value *getCatchSwitchParentPad() const { |
4539 | return getCatchPad()->getCatchSwitch()->getParentPad(); |
4540 | } |
4541 | |
4542 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4543 | static bool classof(const Instruction *I) { |
4544 | return (I->getOpcode() == Instruction::CatchRet); |
4545 | } |
4546 | static bool classof(const Value *V) { |
4547 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4548 | } |
4549 | |
4550 | private: |
4551 | BasicBlock *getSuccessor(unsigned Idx) const { |
4552 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4552, __PRETTY_FUNCTION__)); |
4553 | return getSuccessor(); |
4554 | } |
4555 | |
4556 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4557 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4557, __PRETTY_FUNCTION__)); |
4558 | setSuccessor(B); |
4559 | } |
4560 | }; |
4561 | |
4562 | template <> |
4563 | struct OperandTraits<CatchReturnInst> |
4564 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; |
4565 | |
4566 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4566, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchReturnInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4566, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands() const { return OperandTraits <CatchReturnInst>::operands(this); } template <int Idx_nocapture > Use &CatchReturnInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4567 | |
4568 | //===----------------------------------------------------------------------===// |
4569 | // CleanupReturnInst Class |
4570 | //===----------------------------------------------------------------------===// |
4571 | |
4572 | class CleanupReturnInst : public Instruction { |
4573 | private: |
4574 | CleanupReturnInst(const CleanupReturnInst &RI); |
4575 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4576 | Instruction *InsertBefore = nullptr); |
4577 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4578 | BasicBlock *InsertAtEnd); |
4579 | |
4580 | void init(Value *CleanupPad, BasicBlock *UnwindBB); |
4581 | |
4582 | protected: |
4583 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4584 | friend class Instruction; |
4585 | |
4586 | CleanupReturnInst *cloneImpl() const; |
4587 | |
4588 | public: |
4589 | static CleanupReturnInst *Create(Value *CleanupPad, |
4590 | BasicBlock *UnwindBB = nullptr, |
4591 | Instruction *InsertBefore = nullptr) { |
4592 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4592, __PRETTY_FUNCTION__)); |
4593 | unsigned Values = 1; |
4594 | if (UnwindBB) |
4595 | ++Values; |
4596 | return new (Values) |
4597 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); |
4598 | } |
4599 | |
4600 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, |
4601 | BasicBlock *InsertAtEnd) { |
4602 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4602, __PRETTY_FUNCTION__)); |
4603 | unsigned Values = 1; |
4604 | if (UnwindBB) |
4605 | ++Values; |
4606 | return new (Values) |
4607 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); |
4608 | } |
4609 | |
4610 | /// Provide fast operand accessors |
4611 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4612 | |
4613 | bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } |
4614 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4615 | |
4616 | /// Convenience accessor. |
4617 | CleanupPadInst *getCleanupPad() const { |
4618 | return cast<CleanupPadInst>(Op<0>()); |
4619 | } |
4620 | void setCleanupPad(CleanupPadInst *CleanupPad) { |
4621 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4621, __PRETTY_FUNCTION__)); |
4622 | Op<0>() = CleanupPad; |
4623 | } |
4624 | |
4625 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } |
4626 | |
4627 | BasicBlock *getUnwindDest() const { |
4628 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; |
4629 | } |
4630 | void setUnwindDest(BasicBlock *NewDest) { |
4631 | assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4631, __PRETTY_FUNCTION__)); |
4632 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4632, __PRETTY_FUNCTION__)); |
4633 | Op<1>() = NewDest; |
4634 | } |
4635 | |
4636 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4637 | static bool classof(const Instruction *I) { |
4638 | return (I->getOpcode() == Instruction::CleanupRet); |
4639 | } |
4640 | static bool classof(const Value *V) { |
4641 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4642 | } |
4643 | |
4644 | private: |
4645 | BasicBlock *getSuccessor(unsigned Idx) const { |
4646 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4646, __PRETTY_FUNCTION__)); |
4647 | return getUnwindDest(); |
4648 | } |
4649 | |
4650 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4651 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4651, __PRETTY_FUNCTION__)); |
4652 | setUnwindDest(B); |
4653 | } |
4654 | |
4655 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4656 | // method so that subclasses cannot accidentally use it. |
4657 | void setInstructionSubclassData(unsigned short D) { |
4658 | Instruction::setInstructionSubclassData(D); |
4659 | } |
4660 | }; |
4661 | |
4662 | template <> |
4663 | struct OperandTraits<CleanupReturnInst> |
4664 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; |
4665 | |
4666 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4666, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CleanupReturnInst>::op_begin(const_cast <CleanupReturnInst*>(this))[i_nocapture].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<CleanupReturnInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4666, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands() const { return OperandTraits <CleanupReturnInst>::operands(this); } template <int Idx_nocapture> Use &CleanupReturnInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &CleanupReturnInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
4667 | |
4668 | //===----------------------------------------------------------------------===// |
4669 | // UnreachableInst Class |
4670 | //===----------------------------------------------------------------------===// |
4671 | |
4672 | //===--------------------------------------------------------------------------- |
4673 | /// This function has undefined behavior. In particular, the |
4674 | /// presence of this instruction indicates some higher level knowledge that the |
4675 | /// end of the block cannot be reached. |
4676 | /// |
4677 | class UnreachableInst : public Instruction { |
4678 | protected: |
4679 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4680 | friend class Instruction; |
4681 | |
4682 | UnreachableInst *cloneImpl() const; |
4683 | |
4684 | public: |
4685 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); |
4686 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
4687 | |
4688 | // allocate space for exactly zero operands |
4689 | void *operator new(size_t s) { |
4690 | return User::operator new(s, 0); |
4691 | } |
4692 | |
4693 | unsigned getNumSuccessors() const { return 0; } |
4694 | |
4695 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4696 | static bool classof(const Instruction *I) { |
4697 | return I->getOpcode() == Instruction::Unreachable; |
4698 | } |
4699 | static bool classof(const Value *V) { |
4700 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4701 | } |
4702 | |
4703 | private: |
4704 | BasicBlock *getSuccessor(unsigned idx) const { |
4705 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4705); |
4706 | } |
4707 | |
4708 | void setSuccessor(unsigned idx, BasicBlock *B) { |
4709 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 4709); |
4710 | } |
4711 | }; |
4712 | |
4713 | //===----------------------------------------------------------------------===// |
4714 | // TruncInst Class |
4715 | //===----------------------------------------------------------------------===// |
4716 | |
4717 | /// This class represents a truncation of integer types. |
4718 | class TruncInst : public CastInst { |
4719 | protected: |
4720 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4721 | friend class Instruction; |
4722 | |
4723 | /// Clone an identical TruncInst |
4724 | TruncInst *cloneImpl() const; |
4725 | |
4726 | public: |
4727 | /// Constructor with insert-before-instruction semantics |
4728 | TruncInst( |
4729 | Value *S, ///< The value to be truncated |
4730 | Type *Ty, ///< The (smaller) type to truncate to |
4731 | const Twine &NameStr = "", ///< A name for the new instruction |
4732 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4733 | ); |
4734 | |
4735 | /// Constructor with insert-at-end-of-block semantics |
4736 | TruncInst( |
4737 | Value *S, ///< The value to be truncated |
4738 | Type *Ty, ///< The (smaller) type to truncate to |
4739 | const Twine &NameStr, ///< A name for the new instruction |
4740 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4741 | ); |
4742 | |
4743 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4744 | static bool classof(const Instruction *I) { |
4745 | return I->getOpcode() == Trunc; |
4746 | } |
4747 | static bool classof(const Value *V) { |
4748 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4749 | } |
4750 | }; |
4751 | |
4752 | //===----------------------------------------------------------------------===// |
4753 | // ZExtInst Class |
4754 | //===----------------------------------------------------------------------===// |
4755 | |
4756 | /// This class represents zero extension of integer types. |
4757 | class ZExtInst : public CastInst { |
4758 | protected: |
4759 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4760 | friend class Instruction; |
4761 | |
4762 | /// Clone an identical ZExtInst |
4763 | ZExtInst *cloneImpl() const; |
4764 | |
4765 | public: |
4766 | /// Constructor with insert-before-instruction semantics |
4767 | ZExtInst( |
4768 | Value *S, ///< The value to be zero extended |
4769 | Type *Ty, ///< The type to zero extend to |
4770 | const Twine &NameStr = "", ///< A name for the new instruction |
4771 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4772 | ); |
4773 | |
4774 | /// Constructor with insert-at-end semantics. |
4775 | ZExtInst( |
4776 | Value *S, ///< The value to be zero extended |
4777 | Type *Ty, ///< The type to zero extend to |
4778 | const Twine &NameStr, ///< A name for the new instruction |
4779 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4780 | ); |
4781 | |
4782 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4783 | static bool classof(const Instruction *I) { |
4784 | return I->getOpcode() == ZExt; |
4785 | } |
4786 | static bool classof(const Value *V) { |
4787 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4788 | } |
4789 | }; |
4790 | |
4791 | //===----------------------------------------------------------------------===// |
4792 | // SExtInst Class |
4793 | //===----------------------------------------------------------------------===// |
4794 | |
4795 | /// This class represents a sign extension of integer types. |
4796 | class SExtInst : public CastInst { |
4797 | protected: |
4798 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4799 | friend class Instruction; |
4800 | |
4801 | /// Clone an identical SExtInst |
4802 | SExtInst *cloneImpl() const; |
4803 | |
4804 | public: |
4805 | /// Constructor with insert-before-instruction semantics |
4806 | SExtInst( |
4807 | Value *S, ///< The value to be sign extended |
4808 | Type *Ty, ///< The type to sign extend to |
4809 | const Twine &NameStr = "", ///< A name for the new instruction |
4810 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4811 | ); |
4812 | |
4813 | /// Constructor with insert-at-end-of-block semantics |
4814 | SExtInst( |
4815 | Value *S, ///< The value to be sign extended |
4816 | Type *Ty, ///< The type to sign extend to |
4817 | const Twine &NameStr, ///< A name for the new instruction |
4818 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4819 | ); |
4820 | |
4821 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4822 | static bool classof(const Instruction *I) { |
4823 | return I->getOpcode() == SExt; |
4824 | } |
4825 | static bool classof(const Value *V) { |
4826 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4827 | } |
4828 | }; |
4829 | |
4830 | //===----------------------------------------------------------------------===// |
4831 | // FPTruncInst Class |
4832 | //===----------------------------------------------------------------------===// |
4833 | |
4834 | /// This class represents a truncation of floating point types. |
4835 | class FPTruncInst : public CastInst { |
4836 | protected: |
4837 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4838 | friend class Instruction; |
4839 | |
4840 | /// Clone an identical FPTruncInst |
4841 | FPTruncInst *cloneImpl() const; |
4842 | |
4843 | public: |
4844 | /// Constructor with insert-before-instruction semantics |
4845 | FPTruncInst( |
4846 | Value *S, ///< The value to be truncated |
4847 | Type *Ty, ///< The type to truncate to |
4848 | const Twine &NameStr = "", ///< A name for the new instruction |
4849 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4850 | ); |
4851 | |
4852 | /// Constructor with insert-before-instruction semantics |
4853 | FPTruncInst( |
4854 | Value *S, ///< The value to be truncated |
4855 | Type *Ty, ///< The type to truncate to |
4856 | const Twine &NameStr, ///< A name for the new instruction |
4857 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4858 | ); |
4859 | |
4860 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4861 | static bool classof(const Instruction *I) { |
4862 | return I->getOpcode() == FPTrunc; |
4863 | } |
4864 | static bool classof(const Value *V) { |
4865 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4866 | } |
4867 | }; |
4868 | |
4869 | //===----------------------------------------------------------------------===// |
4870 | // FPExtInst Class |
4871 | //===----------------------------------------------------------------------===// |
4872 | |
4873 | /// This class represents an extension of floating point types. |
4874 | class FPExtInst : public CastInst { |
4875 | protected: |
4876 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4877 | friend class Instruction; |
4878 | |
4879 | /// Clone an identical FPExtInst |
4880 | FPExtInst *cloneImpl() const; |
4881 | |
4882 | public: |
4883 | /// Constructor with insert-before-instruction semantics |
4884 | FPExtInst( |
4885 | Value *S, ///< The value to be extended |
4886 | Type *Ty, ///< The type to extend to |
4887 | const Twine &NameStr = "", ///< A name for the new instruction |
4888 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4889 | ); |
4890 | |
4891 | /// Constructor with insert-at-end-of-block semantics |
4892 | FPExtInst( |
4893 | Value *S, ///< The value to be extended |
4894 | Type *Ty, ///< The type to extend to |
4895 | const Twine &NameStr, ///< A name for the new instruction |
4896 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4897 | ); |
4898 | |
4899 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4900 | static bool classof(const Instruction *I) { |
4901 | return I->getOpcode() == FPExt; |
4902 | } |
4903 | static bool classof(const Value *V) { |
4904 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4905 | } |
4906 | }; |
4907 | |
4908 | //===----------------------------------------------------------------------===// |
4909 | // UIToFPInst Class |
4910 | //===----------------------------------------------------------------------===// |
4911 | |
4912 | /// This class represents a cast unsigned integer to floating point. |
4913 | class UIToFPInst : public CastInst { |
4914 | protected: |
4915 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4916 | friend class Instruction; |
4917 | |
4918 | /// Clone an identical UIToFPInst |
4919 | UIToFPInst *cloneImpl() const; |
4920 | |
4921 | public: |
4922 | /// Constructor with insert-before-instruction semantics |
4923 | UIToFPInst( |
4924 | Value *S, ///< The value to be converted |
4925 | Type *Ty, ///< The type to convert to |
4926 | const Twine &NameStr = "", ///< A name for the new instruction |
4927 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4928 | ); |
4929 | |
4930 | /// Constructor with insert-at-end-of-block semantics |
4931 | UIToFPInst( |
4932 | Value *S, ///< The value to be converted |
4933 | Type *Ty, ///< The type to convert to |
4934 | const Twine &NameStr, ///< A name for the new instruction |
4935 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4936 | ); |
4937 | |
4938 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4939 | static bool classof(const Instruction *I) { |
4940 | return I->getOpcode() == UIToFP; |
4941 | } |
4942 | static bool classof(const Value *V) { |
4943 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4944 | } |
4945 | }; |
4946 | |
4947 | //===----------------------------------------------------------------------===// |
4948 | // SIToFPInst Class |
4949 | //===----------------------------------------------------------------------===// |
4950 | |
4951 | /// This class represents a cast from signed integer to floating point. |
4952 | class SIToFPInst : public CastInst { |
4953 | protected: |
4954 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4955 | friend class Instruction; |
4956 | |
4957 | /// Clone an identical SIToFPInst |
4958 | SIToFPInst *cloneImpl() const; |
4959 | |
4960 | public: |
4961 | /// Constructor with insert-before-instruction semantics |
4962 | SIToFPInst( |
4963 | Value *S, ///< The value to be converted |
4964 | Type *Ty, ///< The type to convert to |
4965 | const Twine &NameStr = "", ///< A name for the new instruction |
4966 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4967 | ); |
4968 | |
4969 | /// Constructor with insert-at-end-of-block semantics |
4970 | SIToFPInst( |
4971 | Value *S, ///< The value to be converted |
4972 | Type *Ty, ///< The type to convert to |
4973 | const Twine &NameStr, ///< A name for the new instruction |
4974 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4975 | ); |
4976 | |
4977 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4978 | static bool classof(const Instruction *I) { |
4979 | return I->getOpcode() == SIToFP; |
4980 | } |
4981 | static bool classof(const Value *V) { |
4982 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4983 | } |
4984 | }; |
4985 | |
4986 | //===----------------------------------------------------------------------===// |
4987 | // FPToUIInst Class |
4988 | //===----------------------------------------------------------------------===// |
4989 | |
4990 | /// This class represents a cast from floating point to unsigned integer |
4991 | class FPToUIInst : public CastInst { |
4992 | protected: |
4993 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4994 | friend class Instruction; |
4995 | |
4996 | /// Clone an identical FPToUIInst |
4997 | FPToUIInst *cloneImpl() const; |
4998 | |
4999 | public: |
5000 | /// Constructor with insert-before-instruction semantics |
5001 | FPToUIInst( |
5002 | Value *S, ///< The value to be converted |
5003 | Type *Ty, ///< The type to convert to |
5004 | const Twine &NameStr = "", ///< A name for the new instruction |
5005 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5006 | ); |
5007 | |
5008 | /// Constructor with insert-at-end-of-block semantics |
5009 | FPToUIInst( |
5010 | Value *S, ///< The value to be converted |
5011 | Type *Ty, ///< The type to convert to |
5012 | const Twine &NameStr, ///< A name for the new instruction |
5013 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction |
5014 | ); |
5015 | |
5016 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5017 | static bool classof(const Instruction *I) { |
5018 | return I->getOpcode() == FPToUI; |
5019 | } |
5020 | static bool classof(const Value *V) { |
5021 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5022 | } |
5023 | }; |
5024 | |
5025 | //===----------------------------------------------------------------------===// |
5026 | // FPToSIInst Class |
5027 | //===----------------------------------------------------------------------===// |
5028 | |
5029 | /// This class represents a cast from floating point to signed integer. |
5030 | class FPToSIInst : public CastInst { |
5031 | protected: |
5032 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5033 | friend class Instruction; |
5034 | |
5035 | /// Clone an identical FPToSIInst |
5036 | FPToSIInst *cloneImpl() const; |
5037 | |
5038 | public: |
5039 | /// Constructor with insert-before-instruction semantics |
5040 | FPToSIInst( |
5041 | Value *S, ///< The value to be converted |
5042 | Type *Ty, ///< The type to convert to |
5043 | const Twine &NameStr = "", ///< A name for the new instruction |
5044 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5045 | ); |
5046 | |
5047 | /// Constructor with insert-at-end-of-block semantics |
5048 | FPToSIInst( |
5049 | Value *S, ///< The value to be converted |
5050 | Type *Ty, ///< The type to convert to |
5051 | const Twine &NameStr, ///< A name for the new instruction |
5052 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5053 | ); |
5054 | |
5055 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5056 | static bool classof(const Instruction *I) { |
5057 | return I->getOpcode() == FPToSI; |
5058 | } |
5059 | static bool classof(const Value *V) { |
5060 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5061 | } |
5062 | }; |
5063 | |
5064 | //===----------------------------------------------------------------------===// |
5065 | // IntToPtrInst Class |
5066 | //===----------------------------------------------------------------------===// |
5067 | |
5068 | /// This class represents a cast from an integer to a pointer. |
5069 | class IntToPtrInst : public CastInst { |
5070 | public: |
5071 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5072 | friend class Instruction; |
5073 | |
5074 | /// Constructor with insert-before-instruction semantics |
5075 | IntToPtrInst( |
5076 | Value *S, ///< The value to be converted |
5077 | Type *Ty, ///< The type to convert to |
5078 | const Twine &NameStr = "", ///< A name for the new instruction |
5079 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5080 | ); |
5081 | |
5082 | /// Constructor with insert-at-end-of-block semantics |
5083 | IntToPtrInst( |
5084 | Value *S, ///< The value to be converted |
5085 | Type *Ty, ///< The type to convert to |
5086 | const Twine &NameStr, ///< A name for the new instruction |
5087 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5088 | ); |
5089 | |
5090 | /// Clone an identical IntToPtrInst. |
5091 | IntToPtrInst *cloneImpl() const; |
5092 | |
5093 | /// Returns the address space of this instruction's pointer type. |
5094 | unsigned getAddressSpace() const { |
5095 | return getType()->getPointerAddressSpace(); |
5096 | } |
5097 | |
5098 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5099 | static bool classof(const Instruction *I) { |
5100 | return I->getOpcode() == IntToPtr; |
5101 | } |
5102 | static bool classof(const Value *V) { |
5103 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5104 | } |
5105 | }; |
5106 | |
5107 | //===----------------------------------------------------------------------===// |
5108 | // PtrToIntInst Class |
5109 | //===----------------------------------------------------------------------===// |
5110 | |
5111 | /// This class represents a cast from a pointer to an integer. |
5112 | class PtrToIntInst : public CastInst { |
5113 | protected: |
5114 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5115 | friend class Instruction; |
5116 | |
5117 | /// Clone an identical PtrToIntInst. |
5118 | PtrToIntInst *cloneImpl() const; |
5119 | |
5120 | public: |
5121 | /// Constructor with insert-before-instruction semantics |
5122 | PtrToIntInst( |
5123 | Value *S, ///< The value to be converted |
5124 | Type *Ty, ///< The type to convert to |
5125 | const Twine &NameStr = "", ///< A name for the new instruction |
5126 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5127 | ); |
5128 | |
5129 | /// Constructor with insert-at-end-of-block semantics |
5130 | PtrToIntInst( |
5131 | Value *S, ///< The value to be converted |
5132 | Type *Ty, ///< The type to convert to |
5133 | const Twine &NameStr, ///< A name for the new instruction |
5134 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5135 | ); |
5136 | |
5137 | /// Gets the pointer operand. |
5138 | Value *getPointerOperand() { return getOperand(0); } |
5139 | /// Gets the pointer operand. |
5140 | const Value *getPointerOperand() const { return getOperand(0); } |
5141 | /// Gets the operand index of the pointer operand. |
5142 | static unsigned getPointerOperandIndex() { return 0U; } |
5143 | |
5144 | /// Returns the address space of the pointer operand. |
5145 | unsigned getPointerAddressSpace() const { |
5146 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5147 | } |
5148 | |
5149 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5150 | static bool classof(const Instruction *I) { |
5151 | return I->getOpcode() == PtrToInt; |
5152 | } |
5153 | static bool classof(const Value *V) { |
5154 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5155 | } |
5156 | }; |
5157 | |
5158 | //===----------------------------------------------------------------------===// |
5159 | // BitCastInst Class |
5160 | //===----------------------------------------------------------------------===// |
5161 | |
5162 | /// This class represents a no-op cast from one type to another. |
5163 | class BitCastInst : public CastInst { |
5164 | protected: |
5165 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5166 | friend class Instruction; |
5167 | |
5168 | /// Clone an identical BitCastInst. |
5169 | BitCastInst *cloneImpl() const; |
5170 | |
5171 | public: |
5172 | /// Constructor with insert-before-instruction semantics |
5173 | BitCastInst( |
5174 | Value *S, ///< The value to be casted |
5175 | Type *Ty, ///< The type to casted to |
5176 | const Twine &NameStr = "", ///< A name for the new instruction |
5177 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5178 | ); |
5179 | |
5180 | /// Constructor with insert-at-end-of-block semantics |
5181 | BitCastInst( |
5182 | Value *S, ///< The value to be casted |
5183 | Type *Ty, ///< The type to casted to |
5184 | const Twine &NameStr, ///< A name for the new instruction |
5185 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5186 | ); |
5187 | |
5188 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5189 | static bool classof(const Instruction *I) { |
5190 | return I->getOpcode() == BitCast; |
5191 | } |
5192 | static bool classof(const Value *V) { |
5193 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5194 | } |
5195 | }; |
5196 | |
5197 | //===----------------------------------------------------------------------===// |
5198 | // AddrSpaceCastInst Class |
5199 | //===----------------------------------------------------------------------===// |
5200 | |
5201 | /// This class represents a conversion between pointers from one address space |
5202 | /// to another. |
5203 | class AddrSpaceCastInst : public CastInst { |
5204 | protected: |
5205 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5206 | friend class Instruction; |
5207 | |
5208 | /// Clone an identical AddrSpaceCastInst. |
5209 | AddrSpaceCastInst *cloneImpl() const; |
5210 | |
5211 | public: |
5212 | /// Constructor with insert-before-instruction semantics |
5213 | AddrSpaceCastInst( |
5214 | Value *S, ///< The value to be casted |
5215 | Type *Ty, ///< The type to casted to |
5216 | const Twine &NameStr = "", ///< A name for the new instruction |
5217 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5218 | ); |
5219 | |
5220 | /// Constructor with insert-at-end-of-block semantics |
5221 | AddrSpaceCastInst( |
5222 | Value *S, ///< The value to be casted |
5223 | Type *Ty, ///< The type to casted to |
5224 | const Twine &NameStr, ///< A name for the new instruction |
5225 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5226 | ); |
5227 | |
5228 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5229 | static bool classof(const Instruction *I) { |
5230 | return I->getOpcode() == AddrSpaceCast; |
5231 | } |
5232 | static bool classof(const Value *V) { |
5233 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5234 | } |
5235 | |
5236 | /// Gets the pointer operand. |
5237 | Value *getPointerOperand() { |
5238 | return getOperand(0); |
5239 | } |
5240 | |
5241 | /// Gets the pointer operand. |
5242 | const Value *getPointerOperand() const { |
5243 | return getOperand(0); |
5244 | } |
5245 | |
5246 | /// Gets the operand index of the pointer operand. |
5247 | static unsigned getPointerOperandIndex() { |
5248 | return 0U; |
5249 | } |
5250 | |
5251 | /// Returns the address space of the pointer operand. |
5252 | unsigned getSrcAddressSpace() const { |
5253 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5254 | } |
5255 | |
5256 | /// Returns the address space of the result. |
5257 | unsigned getDestAddressSpace() const { |
5258 | return getType()->getPointerAddressSpace(); |
5259 | } |
5260 | }; |
5261 | |
5262 | /// A helper function that returns the pointer operand of a load or store |
5263 | /// instruction. Returns nullptr if not load or store. |
5264 | inline const Value *getLoadStorePointerOperand(const Value *V) { |
5265 | if (auto *Load = dyn_cast<LoadInst>(V)) |
5266 | return Load->getPointerOperand(); |
5267 | if (auto *Store = dyn_cast<StoreInst>(V)) |
5268 | return Store->getPointerOperand(); |
5269 | return nullptr; |
5270 | } |
5271 | inline Value *getLoadStorePointerOperand(Value *V) { |
5272 | return const_cast<Value *>( |
5273 | getLoadStorePointerOperand(static_cast<const Value *>(V))); |
5274 | } |
5275 | |
5276 | /// A helper function that returns the pointer operand of a load, store |
5277 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. |
5278 | inline const Value *getPointerOperand(const Value *V) { |
5279 | if (auto *Ptr = getLoadStorePointerOperand(V)) |
5280 | return Ptr; |
5281 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) |
5282 | return Gep->getPointerOperand(); |
5283 | return nullptr; |
5284 | } |
5285 | inline Value *getPointerOperand(Value *V) { |
5286 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); |
5287 | } |
5288 | |
5289 | /// A helper function that returns the alignment of load or store instruction. |
5290 | inline MaybeAlign getLoadStoreAlignment(Value *I) { |
5291 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 5292, __PRETTY_FUNCTION__)) |
5292 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 5292, __PRETTY_FUNCTION__)); |
5293 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5294 | return MaybeAlign(LI->getAlignment()); |
5295 | return MaybeAlign(cast<StoreInst>(I)->getAlignment()); |
5296 | } |
5297 | |
5298 | /// A helper function that returns the address space of the pointer operand of |
5299 | /// load or store instruction. |
5300 | inline unsigned getLoadStoreAddressSpace(Value *I) { |
5301 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 5302, __PRETTY_FUNCTION__)) |
5302 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h" , 5302, __PRETTY_FUNCTION__)); |
5303 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5304 | return LI->getPointerAddressSpace(); |
5305 | return cast<StoreInst>(I)->getPointerAddressSpace(); |
5306 | } |
5307 | |
5308 | //===----------------------------------------------------------------------===// |
5309 | // FreezeInst Class |
5310 | //===----------------------------------------------------------------------===// |
5311 | |
5312 | /// This class represents a freeze function that returns random concrete |
5313 | /// value if an operand is either a poison value or an undef value |
5314 | class FreezeInst : public UnaryInstruction { |
5315 | protected: |
5316 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5317 | friend class Instruction; |
5318 | |
5319 | /// Clone an identical FreezeInst |
5320 | FreezeInst *cloneImpl() const; |
5321 | |
5322 | public: |
5323 | explicit FreezeInst(Value *S, |
5324 | const Twine &NameStr = "", |
5325 | Instruction *InsertBefore = nullptr); |
5326 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); |
5327 | |
5328 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5329 | static inline bool classof(const Instruction *I) { |
5330 | return I->getOpcode() == Freeze; |
5331 | } |
5332 | static inline bool classof(const Value *V) { |
5333 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5334 | } |
5335 | }; |
5336 | |
5337 | } // end namespace llvm |
5338 | |
5339 | #endif // LLVM_IR_INSTRUCTIONS_H |