Bug Summary

File:llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
Warning:line 681, column 26
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopUnrollRuntime.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Utils -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Utils -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp

1//===-- UnrollLoopRuntime.cpp - Runtime Loop unrolling utilities ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements some loop unrolling utilities for loops with run-time
10// trip counts. See LoopUnroll.cpp for unrolling loops with compile-time
11// trip counts.
12//
13// The functions in this file are used to generate extra code when the
14// run-time trip count modulo the unroll factor is not 0. When this is the
15// case, we need to generate code to execute these 'left over' iterations.
16//
17// The current strategy generates an if-then-else sequence prior to the
18// unrolled loop to execute the 'left over' iterations before or after the
19// unrolled loop.
20//
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/InstructionSimplify.h"
26#include "llvm/Analysis/LoopIterator.h"
27#include "llvm/Analysis/ScalarEvolution.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/Dominators.h"
30#include "llvm/IR/MDBuilder.h"
31#include "llvm/IR/Metadata.h"
32#include "llvm/IR/Module.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/raw_ostream.h"
36#include "llvm/Transforms/Utils.h"
37#include "llvm/Transforms/Utils/BasicBlockUtils.h"
38#include "llvm/Transforms/Utils/Cloning.h"
39#include "llvm/Transforms/Utils/Local.h"
40#include "llvm/Transforms/Utils/LoopUtils.h"
41#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
42#include "llvm/Transforms/Utils/UnrollLoop.h"
43#include <algorithm>
44
45using namespace llvm;
46
47#define DEBUG_TYPE"loop-unroll" "loop-unroll"
48
49STATISTIC(NumRuntimeUnrolled,static llvm::Statistic NumRuntimeUnrolled = {"loop-unroll", "NumRuntimeUnrolled"
, "Number of loops unrolled with run-time trip counts"}
50 "Number of loops unrolled with run-time trip counts")static llvm::Statistic NumRuntimeUnrolled = {"loop-unroll", "NumRuntimeUnrolled"
, "Number of loops unrolled with run-time trip counts"}
;
51static cl::opt<bool> UnrollRuntimeMultiExit(
52 "unroll-runtime-multi-exit", cl::init(false), cl::Hidden,
53 cl::desc("Allow runtime unrolling for loops with multiple exits, when "
54 "epilog is generated"));
55static cl::opt<bool> UnrollRuntimeOtherExitPredictable(
56 "unroll-runtime-other-exit-predictable", cl::init(false), cl::Hidden,
57 cl::desc("Assume the non latch exit block to be predictable"));
58
59/// Connect the unrolling prolog code to the original loop.
60/// The unrolling prolog code contains code to execute the
61/// 'extra' iterations if the run-time trip count modulo the
62/// unroll count is non-zero.
63///
64/// This function performs the following:
65/// - Create PHI nodes at prolog end block to combine values
66/// that exit the prolog code and jump around the prolog.
67/// - Add a PHI operand to a PHI node at the loop exit block
68/// for values that exit the prolog and go around the loop.
69/// - Branch around the original loop if the trip count is less
70/// than the unroll factor.
71///
72static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
73 BasicBlock *PrologExit,
74 BasicBlock *OriginalLoopLatchExit,
75 BasicBlock *PreHeader, BasicBlock *NewPreHeader,
76 ValueToValueMapTy &VMap, DominatorTree *DT,
77 LoopInfo *LI, bool PreserveLCSSA) {
78 // Loop structure should be the following:
79 // Preheader
80 // PrologHeader
81 // ...
82 // PrologLatch
83 // PrologExit
84 // NewPreheader
85 // Header
86 // ...
87 // Latch
88 // LatchExit
89 BasicBlock *Latch = L->getLoopLatch();
90 assert(Latch && "Loop must have a latch")(static_cast<void> (0));
91 BasicBlock *PrologLatch = cast<BasicBlock>(VMap[Latch]);
92
93 // Create a PHI node for each outgoing value from the original loop
94 // (which means it is an outgoing value from the prolog code too).
95 // The new PHI node is inserted in the prolog end basic block.
96 // The new PHI node value is added as an operand of a PHI node in either
97 // the loop header or the loop exit block.
98 for (BasicBlock *Succ : successors(Latch)) {
99 for (PHINode &PN : Succ->phis()) {
100 // Add a new PHI node to the prolog end block and add the
101 // appropriate incoming values.
102 // TODO: This code assumes that the PrologExit (or the LatchExit block for
103 // prolog loop) contains only one predecessor from the loop, i.e. the
104 // PrologLatch. When supporting multiple-exiting block loops, we can have
105 // two or more blocks that have the LatchExit as the target in the
106 // original loop.
107 PHINode *NewPN = PHINode::Create(PN.getType(), 2, PN.getName() + ".unr",
108 PrologExit->getFirstNonPHI());
109 // Adding a value to the new PHI node from the original loop preheader.
110 // This is the value that skips all the prolog code.
111 if (L->contains(&PN)) {
112 // Succ is loop header.
113 NewPN->addIncoming(PN.getIncomingValueForBlock(NewPreHeader),
114 PreHeader);
115 } else {
116 // Succ is LatchExit.
117 NewPN->addIncoming(UndefValue::get(PN.getType()), PreHeader);
118 }
119
120 Value *V = PN.getIncomingValueForBlock(Latch);
121 if (Instruction *I = dyn_cast<Instruction>(V)) {
122 if (L->contains(I)) {
123 V = VMap.lookup(I);
124 }
125 }
126 // Adding a value to the new PHI node from the last prolog block
127 // that was created.
128 NewPN->addIncoming(V, PrologLatch);
129
130 // Update the existing PHI node operand with the value from the
131 // new PHI node. How this is done depends on if the existing
132 // PHI node is in the original loop block, or the exit block.
133 if (L->contains(&PN))
134 PN.setIncomingValueForBlock(NewPreHeader, NewPN);
135 else
136 PN.addIncoming(NewPN, PrologExit);
137 }
138 }
139
140 // Make sure that created prolog loop is in simplified form
141 SmallVector<BasicBlock *, 4> PrologExitPreds;
142 Loop *PrologLoop = LI->getLoopFor(PrologLatch);
143 if (PrologLoop) {
144 for (BasicBlock *PredBB : predecessors(PrologExit))
145 if (PrologLoop->contains(PredBB))
146 PrologExitPreds.push_back(PredBB);
147
148 SplitBlockPredecessors(PrologExit, PrologExitPreds, ".unr-lcssa", DT, LI,
149 nullptr, PreserveLCSSA);
150 }
151
152 // Create a branch around the original loop, which is taken if there are no
153 // iterations remaining to be executed after running the prologue.
154 Instruction *InsertPt = PrologExit->getTerminator();
155 IRBuilder<> B(InsertPt);
156
157 assert(Count != 0 && "nonsensical Count!")(static_cast<void> (0));
158
159 // If BECount <u (Count - 1) then (BECount + 1) % Count == (BECount + 1)
160 // This means %xtraiter is (BECount + 1) and all of the iterations of this
161 // loop were executed by the prologue. Note that if BECount <u (Count - 1)
162 // then (BECount + 1) cannot unsigned-overflow.
163 Value *BrLoopExit =
164 B.CreateICmpULT(BECount, ConstantInt::get(BECount->getType(), Count - 1));
165 // Split the exit to maintain loop canonicalization guarantees
166 SmallVector<BasicBlock *, 4> Preds(predecessors(OriginalLoopLatchExit));
167 SplitBlockPredecessors(OriginalLoopLatchExit, Preds, ".unr-lcssa", DT, LI,
168 nullptr, PreserveLCSSA);
169 // Add the branch to the exit block (around the unrolled loop)
170 B.CreateCondBr(BrLoopExit, OriginalLoopLatchExit, NewPreHeader);
171 InsertPt->eraseFromParent();
172 if (DT) {
173 auto *NewDom = DT->findNearestCommonDominator(OriginalLoopLatchExit,
174 PrologExit);
175 DT->changeImmediateDominator(OriginalLoopLatchExit, NewDom);
176 }
177}
178
179/// Connect the unrolling epilog code to the original loop.
180/// The unrolling epilog code contains code to execute the
181/// 'extra' iterations if the run-time trip count modulo the
182/// unroll count is non-zero.
183///
184/// This function performs the following:
185/// - Update PHI nodes at the unrolling loop exit and epilog loop exit
186/// - Create PHI nodes at the unrolling loop exit to combine
187/// values that exit the unrolling loop code and jump around it.
188/// - Update PHI operands in the epilog loop by the new PHI nodes
189/// - Branch around the epilog loop if extra iters (ModVal) is zero.
190///
191static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit,
192 BasicBlock *Exit, BasicBlock *PreHeader,
193 BasicBlock *EpilogPreHeader, BasicBlock *NewPreHeader,
194 ValueToValueMapTy &VMap, DominatorTree *DT,
195 LoopInfo *LI, bool PreserveLCSSA) {
196 BasicBlock *Latch = L->getLoopLatch();
197 assert(Latch && "Loop must have a latch")(static_cast<void> (0));
198 BasicBlock *EpilogLatch = cast<BasicBlock>(VMap[Latch]);
199
200 // Loop structure should be the following:
201 //
202 // PreHeader
203 // NewPreHeader
204 // Header
205 // ...
206 // Latch
207 // NewExit (PN)
208 // EpilogPreHeader
209 // EpilogHeader
210 // ...
211 // EpilogLatch
212 // Exit (EpilogPN)
213
214 // Update PHI nodes at NewExit and Exit.
215 for (PHINode &PN : NewExit->phis()) {
216 // PN should be used in another PHI located in Exit block as
217 // Exit was split by SplitBlockPredecessors into Exit and NewExit
218 // Basicaly it should look like:
219 // NewExit:
220 // PN = PHI [I, Latch]
221 // ...
222 // Exit:
223 // EpilogPN = PHI [PN, EpilogPreHeader], [X, Exit2], [Y, Exit2.epil]
224 //
225 // Exits from non-latch blocks point to the original exit block and the
226 // epilogue edges have already been added.
227 //
228 // There is EpilogPreHeader incoming block instead of NewExit as
229 // NewExit was spilt 1 more time to get EpilogPreHeader.
230 assert(PN.hasOneUse() && "The phi should have 1 use")(static_cast<void> (0));
231 PHINode *EpilogPN = cast<PHINode>(PN.use_begin()->getUser());
232 assert(EpilogPN->getParent() == Exit && "EpilogPN should be in Exit block")(static_cast<void> (0));
233
234 // Add incoming PreHeader from branch around the Loop
235 PN.addIncoming(UndefValue::get(PN.getType()), PreHeader);
236
237 Value *V = PN.getIncomingValueForBlock(Latch);
238 Instruction *I = dyn_cast<Instruction>(V);
239 if (I && L->contains(I))
240 // If value comes from an instruction in the loop add VMap value.
241 V = VMap.lookup(I);
242 // For the instruction out of the loop, constant or undefined value
243 // insert value itself.
244 EpilogPN->addIncoming(V, EpilogLatch);
245
246 assert(EpilogPN->getBasicBlockIndex(EpilogPreHeader) >= 0 &&(static_cast<void> (0))
247 "EpilogPN should have EpilogPreHeader incoming block")(static_cast<void> (0));
248 // Change EpilogPreHeader incoming block to NewExit.
249 EpilogPN->setIncomingBlock(EpilogPN->getBasicBlockIndex(EpilogPreHeader),
250 NewExit);
251 // Now PHIs should look like:
252 // NewExit:
253 // PN = PHI [I, Latch], [undef, PreHeader]
254 // ...
255 // Exit:
256 // EpilogPN = PHI [PN, NewExit], [VMap[I], EpilogLatch]
257 }
258
259 // Create PHI nodes at NewExit (from the unrolling loop Latch and PreHeader).
260 // Update corresponding PHI nodes in epilog loop.
261 for (BasicBlock *Succ : successors(Latch)) {
262 // Skip this as we already updated phis in exit blocks.
263 if (!L->contains(Succ))
264 continue;
265 for (PHINode &PN : Succ->phis()) {
266 // Add new PHI nodes to the loop exit block and update epilog
267 // PHIs with the new PHI values.
268 PHINode *NewPN = PHINode::Create(PN.getType(), 2, PN.getName() + ".unr",
269 NewExit->getFirstNonPHI());
270 // Adding a value to the new PHI node from the unrolling loop preheader.
271 NewPN->addIncoming(PN.getIncomingValueForBlock(NewPreHeader), PreHeader);
272 // Adding a value to the new PHI node from the unrolling loop latch.
273 NewPN->addIncoming(PN.getIncomingValueForBlock(Latch), Latch);
274
275 // Update the existing PHI node operand with the value from the new PHI
276 // node. Corresponding instruction in epilog loop should be PHI.
277 PHINode *VPN = cast<PHINode>(VMap[&PN]);
278 VPN->setIncomingValueForBlock(EpilogPreHeader, NewPN);
279 }
280 }
281
282 Instruction *InsertPt = NewExit->getTerminator();
283 IRBuilder<> B(InsertPt);
284 Value *BrLoopExit = B.CreateIsNotNull(ModVal, "lcmp.mod");
285 assert(Exit && "Loop must have a single exit block only")(static_cast<void> (0));
286 // Split the epilogue exit to maintain loop canonicalization guarantees
287 SmallVector<BasicBlock*, 4> Preds(predecessors(Exit));
288 SplitBlockPredecessors(Exit, Preds, ".epilog-lcssa", DT, LI, nullptr,
289 PreserveLCSSA);
290 // Add the branch to the exit block (around the unrolling loop)
291 B.CreateCondBr(BrLoopExit, EpilogPreHeader, Exit);
292 InsertPt->eraseFromParent();
293 if (DT) {
294 auto *NewDom = DT->findNearestCommonDominator(Exit, NewExit);
295 DT->changeImmediateDominator(Exit, NewDom);
296 }
297
298 // Split the main loop exit to maintain canonicalization guarantees.
299 SmallVector<BasicBlock*, 4> NewExitPreds{Latch};
300 SplitBlockPredecessors(NewExit, NewExitPreds, ".loopexit", DT, LI, nullptr,
301 PreserveLCSSA);
302}
303
304/// Create a clone of the blocks in a loop and connect them together. A new
305/// loop will be created including all cloned blocks, and the iterator of the
306/// new loop switched to count NewIter down to 0.
307/// The cloned blocks should be inserted between InsertTop and InsertBot.
308/// InsertTop should be new preheader, InsertBot new loop exit.
309/// Returns the new cloned loop that is created.
310static Loop *
311CloneLoopBlocks(Loop *L, Value *NewIter, const bool UseEpilogRemainder,
312 const bool UnrollRemainder,
313 BasicBlock *InsertTop,
314 BasicBlock *InsertBot, BasicBlock *Preheader,
315 std::vector<BasicBlock *> &NewBlocks, LoopBlocksDFS &LoopBlocks,
316 ValueToValueMapTy &VMap, DominatorTree *DT, LoopInfo *LI) {
317 StringRef suffix = UseEpilogRemainder ? "epil" : "prol";
318 BasicBlock *Header = L->getHeader();
319 BasicBlock *Latch = L->getLoopLatch();
320 Function *F = Header->getParent();
321 LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
322 LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
323 Loop *ParentLoop = L->getParentLoop();
324 NewLoopsMap NewLoops;
325 NewLoops[ParentLoop] = ParentLoop;
326
327 // For each block in the original loop, create a new copy,
328 // and update the value map with the newly created values.
329 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
330 BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, "." + suffix, F);
331 NewBlocks.push_back(NewBB);
332
333 addClonedBlockToLoopInfo(*BB, NewBB, LI, NewLoops);
334
335 VMap[*BB] = NewBB;
336 if (Header == *BB) {
337 // For the first block, add a CFG connection to this newly
338 // created block.
339 InsertTop->getTerminator()->setSuccessor(0, NewBB);
340 }
341
342 if (DT) {
343 if (Header == *BB) {
344 // The header is dominated by the preheader.
345 DT->addNewBlock(NewBB, InsertTop);
346 } else {
347 // Copy information from original loop to unrolled loop.
348 BasicBlock *IDomBB = DT->getNode(*BB)->getIDom()->getBlock();
349 DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDomBB]));
350 }
351 }
352
353 if (Latch == *BB) {
354 // For the last block, create a loop back to cloned head.
355 VMap.erase((*BB)->getTerminator());
356 BasicBlock *FirstLoopBB = cast<BasicBlock>(VMap[Header]);
357 BranchInst *LatchBR = cast<BranchInst>(NewBB->getTerminator());
358 IRBuilder<> Builder(LatchBR);
359 PHINode *NewIdx = PHINode::Create(NewIter->getType(), 2,
360 suffix + ".iter",
361 FirstLoopBB->getFirstNonPHI());
362 Value *IdxSub =
363 Builder.CreateSub(NewIdx, ConstantInt::get(NewIdx->getType(), 1),
364 NewIdx->getName() + ".sub");
365 Value *IdxCmp =
366 Builder.CreateIsNotNull(IdxSub, NewIdx->getName() + ".cmp");
367 Builder.CreateCondBr(IdxCmp, FirstLoopBB, InsertBot);
368 NewIdx->addIncoming(NewIter, InsertTop);
369 NewIdx->addIncoming(IdxSub, NewBB);
370 LatchBR->eraseFromParent();
371 }
372 }
373
374 // Change the incoming values to the ones defined in the preheader or
375 // cloned loop.
376 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
377 PHINode *NewPHI = cast<PHINode>(VMap[&*I]);
378 unsigned idx = NewPHI->getBasicBlockIndex(Preheader);
379 NewPHI->setIncomingBlock(idx, InsertTop);
380 BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]);
381 idx = NewPHI->getBasicBlockIndex(Latch);
382 Value *InVal = NewPHI->getIncomingValue(idx);
383 NewPHI->setIncomingBlock(idx, NewLatch);
384 if (Value *V = VMap.lookup(InVal))
385 NewPHI->setIncomingValue(idx, V);
386 }
387
388 Loop *NewLoop = NewLoops[L];
389 assert(NewLoop && "L should have been cloned")(static_cast<void> (0));
390 MDNode *LoopID = NewLoop->getLoopID();
391
392 // Only add loop metadata if the loop is not going to be completely
393 // unrolled.
394 if (UnrollRemainder)
395 return NewLoop;
396
397 Optional<MDNode *> NewLoopID = makeFollowupLoopID(
398 LoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder});
399 if (NewLoopID.hasValue()) {
400 NewLoop->setLoopID(NewLoopID.getValue());
401
402 // Do not setLoopAlreadyUnrolled if loop attributes have been defined
403 // explicitly.
404 return NewLoop;
405 }
406
407 // Add unroll disable metadata to disable future unrolling for this loop.
408 NewLoop->setLoopAlreadyUnrolled();
409 return NewLoop;
410}
411
412/// Returns true if we can safely unroll a multi-exit/exiting loop. OtherExits
413/// is populated with all the loop exit blocks other than the LatchExit block.
414static bool canSafelyUnrollMultiExitLoop(Loop *L, BasicBlock *LatchExit,
415 bool PreserveLCSSA,
416 bool UseEpilogRemainder) {
417
418 // We currently have some correctness constrains in unrolling a multi-exit
419 // loop. Check for these below.
420
421 // We rely on LCSSA form being preserved when the exit blocks are transformed.
422 // (Note that only an off-by-default mode of the old PM disables PreserveLCCA.)
423 if (!PreserveLCSSA)
424 return false;
425
426 // All constraints have been satisfied.
427 return true;
428}
429
430/// Returns true if we can profitably unroll the multi-exit loop L. Currently,
431/// we return true only if UnrollRuntimeMultiExit is set to true.
432static bool canProfitablyUnrollMultiExitLoop(
433 Loop *L, SmallVectorImpl<BasicBlock *> &OtherExits, BasicBlock *LatchExit,
434 bool PreserveLCSSA, bool UseEpilogRemainder) {
435
436#if !defined(NDEBUG1)
437 assert(canSafelyUnrollMultiExitLoop(L, LatchExit, PreserveLCSSA,(static_cast<void> (0))
438 UseEpilogRemainder) &&(static_cast<void> (0))
439 "Should be safe to unroll before checking profitability!")(static_cast<void> (0));
440#endif
441
442 // Priority goes to UnrollRuntimeMultiExit if it's supplied.
443 if (UnrollRuntimeMultiExit.getNumOccurrences())
444 return UnrollRuntimeMultiExit;
445
446 // TODO: We used to bail out for correctness (now fixed). Under what
447 // circumstances is this case profitable to allow?
448 if (!LatchExit->getSinglePredecessor())
449 return false;
450
451 // TODO: We used to bail out for correctness (now fixed). Under what
452 // circumstances is this case profitable to allow?
453 if (UseEpilogRemainder && L->getParentLoop())
454 return false;
455
456 // The main pain point with multi-exit loop unrolling is that once unrolled,
457 // we will not be able to merge all blocks into a straight line code.
458 // There are branches within the unrolled loop that go to the OtherExits.
459 // The second point is the increase in code size, but this is true
460 // irrespective of multiple exits.
461
462 // Note: Both the heuristics below are coarse grained. We are essentially
463 // enabling unrolling of loops that have a single side exit other than the
464 // normal LatchExit (i.e. exiting into a deoptimize block).
465 // The heuristics considered are:
466 // 1. low number of branches in the unrolled version.
467 // 2. high predictability of these extra branches.
468 // We avoid unrolling loops that have more than two exiting blocks. This
469 // limits the total number of branches in the unrolled loop to be atmost
470 // the unroll factor (since one of the exiting blocks is the latch block).
471 SmallVector<BasicBlock*, 4> ExitingBlocks;
472 L->getExitingBlocks(ExitingBlocks);
473 if (ExitingBlocks.size() > 2)
474 return false;
475
476 // Allow unrolling of loops with no non latch exit blocks.
477 if (OtherExits.size() == 0)
478 return true;
479
480 // The second heuristic is that L has one exit other than the latchexit and
481 // that exit is a deoptimize block. We know that deoptimize blocks are rarely
482 // taken, which also implies the branch leading to the deoptimize block is
483 // highly predictable. When UnrollRuntimeOtherExitPredictable is specified, we
484 // assume the other exit branch is predictable even if it has no deoptimize
485 // call.
486 return (OtherExits.size() == 1 &&
487 (UnrollRuntimeOtherExitPredictable ||
488 OtherExits[0]->getTerminatingDeoptimizeCall()));
489 // TODO: These can be fine-tuned further to consider code size or deopt states
490 // that are captured by the deoptimize exit block.
491 // Also, we can extend this to support more cases, if we actually
492 // know of kinds of multiexit loops that would benefit from unrolling.
493}
494
495// Assign the maximum possible trip count as the back edge weight for the
496// remainder loop if the original loop comes with a branch weight.
497static void updateLatchBranchWeightsForRemainderLoop(Loop *OrigLoop,
498 Loop *RemainderLoop,
499 uint64_t UnrollFactor) {
500 uint64_t TrueWeight, FalseWeight;
501 BranchInst *LatchBR =
502 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
503 if (!LatchBR->extractProfMetadata(TrueWeight, FalseWeight))
504 return;
505 uint64_t ExitWeight = LatchBR->getSuccessor(0) == OrigLoop->getHeader()
506 ? FalseWeight
507 : TrueWeight;
508 assert(UnrollFactor > 1)(static_cast<void> (0));
509 uint64_t BackEdgeWeight = (UnrollFactor - 1) * ExitWeight;
510 BasicBlock *Header = RemainderLoop->getHeader();
511 BasicBlock *Latch = RemainderLoop->getLoopLatch();
512 auto *RemainderLatchBR = cast<BranchInst>(Latch->getTerminator());
513 unsigned HeaderIdx = (RemainderLatchBR->getSuccessor(0) == Header ? 0 : 1);
514 MDBuilder MDB(RemainderLatchBR->getContext());
515 MDNode *WeightNode =
516 HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight)
517 : MDB.createBranchWeights(BackEdgeWeight, ExitWeight);
518 RemainderLatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
519}
520
521/// Calculate ModVal = (BECount + 1) % Count on the abstract integer domain
522/// accounting for the possibility of unsigned overflow in the 2s complement
523/// domain. Preconditions:
524/// 1) TripCount = BECount + 1 (allowing overflow)
525/// 2) Log2(Count) <= BitWidth(BECount)
526static Value *CreateTripRemainder(IRBuilder<> &B, Value *BECount,
527 Value *TripCount, unsigned Count) {
528 // Note that TripCount is BECount + 1.
529 if (isPowerOf2_32(Count))
530 // If the expression is zero, then either:
531 // 1. There are no iterations to be run in the prolog/epilog loop.
532 // OR
533 // 2. The addition computing TripCount overflowed.
534 //
535 // If (2) is true, we know that TripCount really is (1 << BEWidth) and so
536 // the number of iterations that remain to be run in the original loop is a
537 // multiple Count == (1 << Log2(Count)) because Log2(Count) <= BEWidth (a
538 // precondition of this method).
539 return B.CreateAnd(TripCount, Count - 1, "xtraiter");
540
541 // As (BECount + 1) can potentially unsigned overflow we count
542 // (BECount % Count) + 1 which is overflow safe as BECount % Count < Count.
543 Constant *CountC = ConstantInt::get(BECount->getType(), Count);
544 Value *ModValTmp = B.CreateURem(BECount, CountC);
545 Value *ModValAdd = B.CreateAdd(ModValTmp,
546 ConstantInt::get(ModValTmp->getType(), 1));
547 // At that point (BECount % Count) + 1 could be equal to Count.
548 // To handle this case we need to take mod by Count one more time.
549 return B.CreateURem(ModValAdd, CountC, "xtraiter");
550}
551
552
553/// Insert code in the prolog/epilog code when unrolling a loop with a
554/// run-time trip-count.
555///
556/// This method assumes that the loop unroll factor is total number
557/// of loop bodies in the loop after unrolling. (Some folks refer
558/// to the unroll factor as the number of *extra* copies added).
559/// We assume also that the loop unroll factor is a power-of-two. So, after
560/// unrolling the loop, the number of loop bodies executed is 2,
561/// 4, 8, etc. Note - LLVM converts the if-then-sequence to a switch
562/// instruction in SimplifyCFG.cpp. Then, the backend decides how code for
563/// the switch instruction is generated.
564///
565/// ***Prolog case***
566/// extraiters = tripcount % loopfactor
567/// if (extraiters == 0) jump Loop:
568/// else jump Prol:
569/// Prol: LoopBody;
570/// extraiters -= 1 // Omitted if unroll factor is 2.
571/// if (extraiters != 0) jump Prol: // Omitted if unroll factor is 2.
572/// if (tripcount < loopfactor) jump End:
573/// Loop:
574/// ...
575/// End:
576///
577/// ***Epilog case***
578/// extraiters = tripcount % loopfactor
579/// if (tripcount < loopfactor) jump LoopExit:
580/// unroll_iters = tripcount - extraiters
581/// Loop: LoopBody; (executes unroll_iter times);
582/// unroll_iter -= 1
583/// if (unroll_iter != 0) jump Loop:
584/// LoopExit:
585/// if (extraiters == 0) jump EpilExit:
586/// Epil: LoopBody; (executes extraiters times)
587/// extraiters -= 1 // Omitted if unroll factor is 2.
588/// if (extraiters != 0) jump Epil: // Omitted if unroll factor is 2.
589/// EpilExit:
590
591bool llvm::UnrollRuntimeLoopRemainder(
592 Loop *L, unsigned Count, bool AllowExpensiveTripCount,
593 bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV,
594 LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
595 const TargetTransformInfo *TTI, bool PreserveLCSSA, Loop **ResultLoop) {
596 LLVM_DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n")do { } while (false);
1
Loop condition is false. Exiting loop
597 LLVM_DEBUG(L->dump())do { } while (false);
2
Loop condition is false. Exiting loop
598 LLVM_DEBUG(UseEpilogRemainder ? dbgs() << "Using epilog remainder.\n"do { } while (false)
3
Loop condition is false. Exiting loop
599 : dbgs() << "Using prolog remainder.\n")do { } while (false);
600
601 // Make sure the loop is in canonical form.
602 if (!L->isLoopSimplifyForm()) {
4
Assuming the condition is false
5
Taking false branch
603 LLVM_DEBUG(dbgs() << "Not in simplify form!\n")do { } while (false);
604 return false;
605 }
606
607 // Guaranteed by LoopSimplifyForm.
608 BasicBlock *Latch = L->getLoopLatch();
609 BasicBlock *Header = L->getHeader();
6
'Header' initialized here
610
611 BranchInst *LatchBR = cast<BranchInst>(Latch->getTerminator());
7
The object is a 'BranchInst'
612
613 if (!LatchBR
7.1
'LatchBR' is non-null
7.1
'LatchBR' is non-null
7.1
'LatchBR' is non-null
|| LatchBR->isUnconditional()) {
8
Calling 'BranchInst::isUnconditional'
11
Returning from 'BranchInst::isUnconditional'
614 // The loop-rotate pass can be helpful to avoid this in many cases.
615 LLVM_DEBUG(do { } while (false)
616 dbgs()do { } while (false)
617 << "Loop latch not terminated by a conditional branch.\n")do { } while (false);
618 return false;
619 }
620
621 unsigned ExitIndex = LatchBR->getSuccessor(0) == Header ? 1 : 0;
12
Taking false branch
13
Assuming pointer value is null
14
'?' condition is true
622 BasicBlock *LatchExit = LatchBR->getSuccessor(ExitIndex);
623
624 if (L->contains(LatchExit)) {
15
Assuming the condition is false
16
Taking false branch
625 // Cloning the loop basic blocks (`CloneLoopBlocks`) requires that one of the
626 // targets of the Latch be an exit block out of the loop.
627 LLVM_DEBUG(do { } while (false)
628 dbgs()do { } while (false)
629 << "One of the loop latch successors must be the exit block.\n")do { } while (false);
630 return false;
631 }
632
633 // These are exit blocks other than the target of the latch exiting block.
634 SmallVector<BasicBlock *, 4> OtherExits;
635 L->getUniqueNonLatchExitBlocks(OtherExits);
636 bool isMultiExitUnrollingEnabled =
637 canSafelyUnrollMultiExitLoop(L, LatchExit, PreserveLCSSA,
638 UseEpilogRemainder) &&
639 canProfitablyUnrollMultiExitLoop(L, OtherExits, LatchExit, PreserveLCSSA,
640 UseEpilogRemainder);
641 // Support only single exit and exiting block unless multi-exit loop unrolling is enabled.
642 if (!isMultiExitUnrollingEnabled
16.1
'isMultiExitUnrollingEnabled' is false
16.1
'isMultiExitUnrollingEnabled' is false
16.1
'isMultiExitUnrollingEnabled' is false
&&
19
Taking false branch
643 (!L->getExitingBlock() || OtherExits.size())) {
17
Assuming the condition is false
18
Assuming the condition is false
644 LLVM_DEBUG(do { } while (false)
645 dbgs()do { } while (false)
646 << "Multiple exit/exiting blocks in loop and multi-exit unrolling not "do { } while (false)
647 "enabled!\n")do { } while (false);
648 return false;
649 }
650 // Use Scalar Evolution to compute the trip count. This allows more loops to
651 // be unrolled than relying on induction var simplification.
652 if (!SE)
20
Assuming 'SE' is non-null
21
Taking false branch
653 return false;
654
655 // Only unroll loops with a computable trip count, and the trip count needs
656 // to be an int value (allowing a pointer type is a TODO item).
657 // We calculate the backedge count by using getExitCount on the Latch block,
658 // which is proven to be the only exiting block in this loop. This is same as
659 // calculating getBackedgeTakenCount on the loop (which computes SCEV for all
660 // exiting blocks).
661 const SCEV *BECountSC = SE->getExitCount(L, Latch);
662 if (isa<SCEVCouldNotCompute>(BECountSC) ||
22
Assuming 'BECountSC' is not a 'SCEVCouldNotCompute'
27
Taking false branch
663 !BECountSC->getType()->isIntegerTy()) {
23
Calling 'Type::isIntegerTy'
26
Returning from 'Type::isIntegerTy'
664 LLVM_DEBUG(dbgs() << "Could not compute exit block SCEV\n")do { } while (false);
665 return false;
666 }
667
668 unsigned BEWidth = cast<IntegerType>(BECountSC->getType())->getBitWidth();
28
The object is a 'IntegerType'
669
670 // Add 1 since the backedge count doesn't include the first loop iteration.
671 // (Note that overflow can occur, this is handled explicitly below)
672 const SCEV *TripCountSC =
673 SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1));
674 if (isa<SCEVCouldNotCompute>(TripCountSC)) {
29
Assuming 'TripCountSC' is not a 'SCEVCouldNotCompute'
30
Taking false branch
675 LLVM_DEBUG(dbgs() << "Could not compute trip count SCEV.\n")do { } while (false);
676 return false;
677 }
678
679 BasicBlock *PreHeader = L->getLoopPreheader();
680 BranchInst *PreHeaderBR = cast<BranchInst>(PreHeader->getTerminator());
31
The object is a 'BranchInst'
681 const DataLayout &DL = Header->getModule()->getDataLayout();
32
Called C++ object pointer is null
682 SCEVExpander Expander(*SE, DL, "loop-unroll");
683 if (!AllowExpensiveTripCount &&
684 Expander.isHighCostExpansion(TripCountSC, L, SCEVCheapExpansionBudget,
685 TTI, PreHeaderBR)) {
686 LLVM_DEBUG(dbgs() << "High cost for expanding trip count scev!\n")do { } while (false);
687 return false;
688 }
689
690 // This constraint lets us deal with an overflowing trip count easily; see the
691 // comment on ModVal below.
692 if (Log2_32(Count) > BEWidth) {
693 LLVM_DEBUG(do { } while (false)
694 dbgs()do { } while (false)
695 << "Count failed constraint on overflow trip count calculation.\n")do { } while (false);
696 return false;
697 }
698
699 // Loop structure is the following:
700 //
701 // PreHeader
702 // Header
703 // ...
704 // Latch
705 // LatchExit
706
707 BasicBlock *NewPreHeader;
708 BasicBlock *NewExit = nullptr;
709 BasicBlock *PrologExit = nullptr;
710 BasicBlock *EpilogPreHeader = nullptr;
711 BasicBlock *PrologPreHeader = nullptr;
712
713 if (UseEpilogRemainder) {
714 // If epilog remainder
715 // Split PreHeader to insert a branch around loop for unrolling.
716 NewPreHeader = SplitBlock(PreHeader, PreHeader->getTerminator(), DT, LI);
717 NewPreHeader->setName(PreHeader->getName() + ".new");
718 // Split LatchExit to create phi nodes from branch above.
719 NewExit = SplitBlockPredecessors(LatchExit, {Latch}, ".unr-lcssa", DT, LI,
720 nullptr, PreserveLCSSA);
721 // NewExit gets its DebugLoc from LatchExit, which is not part of the
722 // original Loop.
723 // Fix this by setting Loop's DebugLoc to NewExit.
724 auto *NewExitTerminator = NewExit->getTerminator();
725 NewExitTerminator->setDebugLoc(Header->getTerminator()->getDebugLoc());
726 // Split NewExit to insert epilog remainder loop.
727 EpilogPreHeader = SplitBlock(NewExit, NewExitTerminator, DT, LI);
728 EpilogPreHeader->setName(Header->getName() + ".epil.preheader");
729
730 // If the latch exits from multiple level of nested loops, then
731 // by assumption there must be another loop exit which branches to the
732 // outer loop and we must adjust the loop for the newly inserted blocks
733 // to account for the fact that our epilogue is still in the same outer
734 // loop. Note that this leaves loopinfo temporarily out of sync with the
735 // CFG until the actual epilogue loop is inserted.
736 if (auto *ParentL = L->getParentLoop())
737 if (LI->getLoopFor(LatchExit) != ParentL) {
738 LI->removeBlock(NewExit);
739 ParentL->addBasicBlockToLoop(NewExit, *LI);
740 LI->removeBlock(EpilogPreHeader);
741 ParentL->addBasicBlockToLoop(EpilogPreHeader, *LI);
742 }
743
744 } else {
745 // If prolog remainder
746 // Split the original preheader twice to insert prolog remainder loop
747 PrologPreHeader = SplitEdge(PreHeader, Header, DT, LI);
748 PrologPreHeader->setName(Header->getName() + ".prol.preheader");
749 PrologExit = SplitBlock(PrologPreHeader, PrologPreHeader->getTerminator(),
750 DT, LI);
751 PrologExit->setName(Header->getName() + ".prol.loopexit");
752 // Split PrologExit to get NewPreHeader.
753 NewPreHeader = SplitBlock(PrologExit, PrologExit->getTerminator(), DT, LI);
754 NewPreHeader->setName(PreHeader->getName() + ".new");
755 }
756 // Loop structure should be the following:
757 // Epilog Prolog
758 //
759 // PreHeader PreHeader
760 // *NewPreHeader *PrologPreHeader
761 // Header *PrologExit
762 // ... *NewPreHeader
763 // Latch Header
764 // *NewExit ...
765 // *EpilogPreHeader Latch
766 // LatchExit LatchExit
767
768 // Calculate conditions for branch around loop for unrolling
769 // in epilog case and around prolog remainder loop in prolog case.
770 // Compute the number of extra iterations required, which is:
771 // extra iterations = run-time trip count % loop unroll factor
772 PreHeaderBR = cast<BranchInst>(PreHeader->getTerminator());
773 Value *TripCount = Expander.expandCodeFor(TripCountSC, TripCountSC->getType(),
774 PreHeaderBR);
775 Value *BECount = Expander.expandCodeFor(BECountSC, BECountSC->getType(),
776 PreHeaderBR);
777 IRBuilder<> B(PreHeaderBR);
778 Value * const ModVal = CreateTripRemainder(B, BECount, TripCount, Count);
779
780 Value *BranchVal =
781 UseEpilogRemainder ? B.CreateICmpULT(BECount,
782 ConstantInt::get(BECount->getType(),
783 Count - 1)) :
784 B.CreateIsNotNull(ModVal, "lcmp.mod");
785 BasicBlock *RemainderLoop = UseEpilogRemainder ? NewExit : PrologPreHeader;
786 BasicBlock *UnrollingLoop = UseEpilogRemainder ? NewPreHeader : PrologExit;
787 // Branch to either remainder (extra iterations) loop or unrolling loop.
788 B.CreateCondBr(BranchVal, RemainderLoop, UnrollingLoop);
789 PreHeaderBR->eraseFromParent();
790 if (DT) {
791 if (UseEpilogRemainder)
792 DT->changeImmediateDominator(NewExit, PreHeader);
793 else
794 DT->changeImmediateDominator(PrologExit, PreHeader);
795 }
796 Function *F = Header->getParent();
797 // Get an ordered list of blocks in the loop to help with the ordering of the
798 // cloned blocks in the prolog/epilog code
799 LoopBlocksDFS LoopBlocks(L);
800 LoopBlocks.perform(LI);
801
802 //
803 // For each extra loop iteration, create a copy of the loop's basic blocks
804 // and generate a condition that branches to the copy depending on the
805 // number of 'left over' iterations.
806 //
807 std::vector<BasicBlock *> NewBlocks;
808 ValueToValueMapTy VMap;
809
810 // Clone all the basic blocks in the loop. If Count is 2, we don't clone
811 // the loop, otherwise we create a cloned loop to execute the extra
812 // iterations. This function adds the appropriate CFG connections.
813 BasicBlock *InsertBot = UseEpilogRemainder ? LatchExit : PrologExit;
814 BasicBlock *InsertTop = UseEpilogRemainder ? EpilogPreHeader : PrologPreHeader;
815 Loop *remainderLoop = CloneLoopBlocks(
816 L, ModVal, UseEpilogRemainder, UnrollRemainder, InsertTop, InsertBot,
817 NewPreHeader, NewBlocks, LoopBlocks, VMap, DT, LI);
818
819 // Assign the maximum possible trip count as the back edge weight for the
820 // remainder loop if the original loop comes with a branch weight.
821 if (remainderLoop && !UnrollRemainder)
822 updateLatchBranchWeightsForRemainderLoop(L, remainderLoop, Count);
823
824 // Insert the cloned blocks into the function.
825 F->getBasicBlockList().splice(InsertBot->getIterator(),
826 F->getBasicBlockList(),
827 NewBlocks[0]->getIterator(),
828 F->end());
829
830 // Now the loop blocks are cloned and the other exiting blocks from the
831 // remainder are connected to the original Loop's exit blocks. The remaining
832 // work is to update the phi nodes in the original loop, and take in the
833 // values from the cloned region.
834 for (auto *BB : OtherExits) {
835 // Given we preserve LCSSA form, we know that the values used outside the
836 // loop will be used through these phi nodes at the exit blocks that are
837 // transformed below.
838 for (PHINode &PN : BB->phis()) {
839 unsigned oldNumOperands = PN.getNumIncomingValues();
840 // Add the incoming values from the remainder code to the end of the phi
841 // node.
842 for (unsigned i = 0; i < oldNumOperands; i++){
843 auto *PredBB =PN.getIncomingBlock(i);
844 if (PredBB == Latch)
845 // The latch exit is handled seperately, see connectX
846 continue;
847 if (!L->contains(PredBB))
848 // Even if we had dedicated exits, the code above inserted an
849 // extra branch which can reach the latch exit.
850 continue;
851
852 auto *V = PN.getIncomingValue(i);
853 if (Instruction *I = dyn_cast<Instruction>(V))
854 if (L->contains(I))
855 V = VMap.lookup(I);
856 PN.addIncoming(V, cast<BasicBlock>(VMap[PredBB]));
857 }
858 }
859#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG1)
860 for (BasicBlock *SuccBB : successors(BB)) {
861 assert(!(any_of(OtherExits,(static_cast<void> (0))
862 [SuccBB](BasicBlock *EB) { return EB == SuccBB; }) ||(static_cast<void> (0))
863 SuccBB == LatchExit) &&(static_cast<void> (0))
864 "Breaks the definition of dedicated exits!")(static_cast<void> (0));
865 }
866#endif
867 }
868
869 // Update the immediate dominator of the exit blocks and blocks that are
870 // reachable from the exit blocks. This is needed because we now have paths
871 // from both the original loop and the remainder code reaching the exit
872 // blocks. While the IDom of these exit blocks were from the original loop,
873 // now the IDom is the preheader (which decides whether the original loop or
874 // remainder code should run).
875 if (DT && !L->getExitingBlock()) {
876 SmallVector<BasicBlock *, 16> ChildrenToUpdate;
877 // NB! We have to examine the dom children of all loop blocks, not just
878 // those which are the IDom of the exit blocks. This is because blocks
879 // reachable from the exit blocks can have their IDom as the nearest common
880 // dominator of the exit blocks.
881 for (auto *BB : L->blocks()) {
882 auto *DomNodeBB = DT->getNode(BB);
883 for (auto *DomChild : DomNodeBB->children()) {
884 auto *DomChildBB = DomChild->getBlock();
885 if (!L->contains(LI->getLoopFor(DomChildBB)))
886 ChildrenToUpdate.push_back(DomChildBB);
887 }
888 }
889 for (auto *BB : ChildrenToUpdate)
890 DT->changeImmediateDominator(BB, PreHeader);
891 }
892
893 // Loop structure should be the following:
894 // Epilog Prolog
895 //
896 // PreHeader PreHeader
897 // NewPreHeader PrologPreHeader
898 // Header PrologHeader
899 // ... ...
900 // Latch PrologLatch
901 // NewExit PrologExit
902 // EpilogPreHeader NewPreHeader
903 // EpilogHeader Header
904 // ... ...
905 // EpilogLatch Latch
906 // LatchExit LatchExit
907
908 // Rewrite the cloned instruction operands to use the values created when the
909 // clone is created.
910 for (BasicBlock *BB : NewBlocks) {
911 for (Instruction &I : *BB) {
912 RemapInstruction(&I, VMap,
913 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
914 }
915 }
916
917 if (UseEpilogRemainder) {
918 // Connect the epilog code to the original loop and update the
919 // PHI functions.
920 ConnectEpilog(L, ModVal, NewExit, LatchExit, PreHeader,
921 EpilogPreHeader, NewPreHeader, VMap, DT, LI,
922 PreserveLCSSA);
923
924 // Update counter in loop for unrolling.
925 // I should be multiply of Count.
926 IRBuilder<> B2(NewPreHeader->getTerminator());
927 Value *TestVal = B2.CreateSub(TripCount, ModVal, "unroll_iter");
928 BranchInst *LatchBR = cast<BranchInst>(Latch->getTerminator());
929 B2.SetInsertPoint(LatchBR);
930 PHINode *NewIdx = PHINode::Create(TestVal->getType(), 2, "niter",
931 Header->getFirstNonPHI());
932 Value *IdxSub =
933 B2.CreateSub(NewIdx, ConstantInt::get(NewIdx->getType(), 1),
934 NewIdx->getName() + ".nsub");
935 Value *IdxCmp;
936 if (LatchBR->getSuccessor(0) == Header)
937 IdxCmp = B2.CreateIsNotNull(IdxSub, NewIdx->getName() + ".ncmp");
938 else
939 IdxCmp = B2.CreateIsNull(IdxSub, NewIdx->getName() + ".ncmp");
940 NewIdx->addIncoming(TestVal, NewPreHeader);
941 NewIdx->addIncoming(IdxSub, Latch);
942 LatchBR->setCondition(IdxCmp);
943 } else {
944 // Connect the prolog code to the original loop and update the
945 // PHI functions.
946 ConnectProlog(L, BECount, Count, PrologExit, LatchExit, PreHeader,
947 NewPreHeader, VMap, DT, LI, PreserveLCSSA);
948 }
949
950 // If this loop is nested, then the loop unroller changes the code in the any
951 // of its parent loops, so the Scalar Evolution pass needs to be run again.
952 SE->forgetTopmostLoop(L);
953
954 // Verify that the Dom Tree and Loop Info are correct.
955#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG1)
956 if (DT) {
957 assert(DT->verify(DominatorTree::VerificationLevel::Full))(static_cast<void> (0));
958 LI->verify(*DT);
959 }
960#endif
961
962 // For unroll factor 2 remainder loop will have 1 iteration.
963 if (Count == 2 && DT && LI && SE) {
964 // TODO: This code could probably be pulled out into a helper function
965 // (e.g. breakLoopBackedgeAndSimplify) and reused in loop-deletion.
966 BasicBlock *RemainderLatch = remainderLoop->getLoopLatch();
967 assert(RemainderLatch)(static_cast<void> (0));
968 SmallVector<BasicBlock*> RemainderBlocks(remainderLoop->getBlocks().begin(),
969 remainderLoop->getBlocks().end());
970 breakLoopBackedge(remainderLoop, *DT, *SE, *LI, nullptr);
971 remainderLoop = nullptr;
972
973 // Simplify loop values after breaking the backedge
974 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
975 SmallVector<WeakTrackingVH, 16> DeadInsts;
976 for (BasicBlock *BB : RemainderBlocks) {
977 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
978 Instruction *Inst = &*I++;
979 if (Value *V = SimplifyInstruction(Inst, {DL, nullptr, DT, AC}))
980 if (LI->replacementPreservesLCSSAForm(Inst, V))
981 Inst->replaceAllUsesWith(V);
982 if (isInstructionTriviallyDead(Inst))
983 DeadInsts.emplace_back(Inst);
984 }
985 // We can't do recursive deletion until we're done iterating, as we might
986 // have a phi which (potentially indirectly) uses instructions later in
987 // the block we're iterating through.
988 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
989 }
990
991 // Merge latch into exit block.
992 auto *ExitBB = RemainderLatch->getSingleSuccessor();
993 assert(ExitBB && "required after breaking cond br backedge")(static_cast<void> (0));
994 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
995 MergeBlockIntoPredecessor(ExitBB, &DTU, LI);
996 }
997
998 // Canonicalize to LoopSimplifyForm both original and remainder loops. We
999 // cannot rely on the LoopUnrollPass to do this because it only does
1000 // canonicalization for parent/subloops and not the sibling loops.
1001 if (OtherExits.size() > 0) {
1002 // Generate dedicated exit blocks for the original loop, to preserve
1003 // LoopSimplifyForm.
1004 formDedicatedExitBlocks(L, DT, LI, nullptr, PreserveLCSSA);
1005 // Generate dedicated exit blocks for the remainder loop if one exists, to
1006 // preserve LoopSimplifyForm.
1007 if (remainderLoop)
1008 formDedicatedExitBlocks(remainderLoop, DT, LI, nullptr, PreserveLCSSA);
1009 }
1010
1011 auto UnrollResult = LoopUnrollResult::Unmodified;
1012 if (remainderLoop && UnrollRemainder) {
1013 LLVM_DEBUG(dbgs() << "Unrolling remainder loop\n")do { } while (false);
1014 UnrollResult =
1015 UnrollLoop(remainderLoop,
1016 {/*Count*/ Count - 1, /*Force*/ false, /*Runtime*/ false,
1017 /*AllowExpensiveTripCount*/ false,
1018 /*UnrollRemainder*/ false, ForgetAllSCEV},
1019 LI, SE, DT, AC, TTI, /*ORE*/ nullptr, PreserveLCSSA);
1020 }
1021
1022 if (ResultLoop && UnrollResult != LoopUnrollResult::FullyUnrolled)
1023 *ResultLoop = remainderLoop;
1024 NumRuntimeUnrolled++;
1025 return true;
1026}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Get allocation size in bits. Returns None if size can't be determined,
109 /// e.g. in case of a VLA.
110 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
111
112 /// Return the type that is being allocated by the instruction.
113 Type *getAllocatedType() const { return AllocatedType; }
114 /// for use only in special circumstances that need to generically
115 /// transform a whole instruction (eg: IR linking and vectorization).
116 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117
118 /// Return the alignment of the memory that is being allocated by the
119 /// instruction.
120 Align getAlign() const {
121 return Align(1ULL << getSubclassData<AlignmentField>());
122 }
123
124 void setAlignment(Align Align) {
125 setSubclassData<AlignmentField>(Log2(Align));
126 }
127
128 // FIXME: Remove this one transition to Align is over.
129 unsigned getAlignment() const { return getAlign().value(); }
130
131 /// Return true if this alloca is in the entry block of the function and is a
132 /// constant size. If so, the code generator will fold it into the
133 /// prolog/epilog code, so it is basically free.
134 bool isStaticAlloca() const;
135
136 /// Return true if this alloca is used as an inalloca argument to a call. Such
137 /// allocas are never considered static even if they are in the entry block.
138 bool isUsedWithInAlloca() const {
139 return getSubclassData<UsedWithInAllocaField>();
140 }
141
142 /// Specify whether this alloca is used to represent the arguments to a call.
143 void setUsedWithInAlloca(bool V) {
144 setSubclassData<UsedWithInAllocaField>(V);
145 }
146
147 /// Return true if this alloca is used as a swifterror argument to a call.
148 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149 /// Specify whether this alloca is used to represent a swifterror.
150 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151
152 // Methods for support type inquiry through isa, cast, and dyn_cast:
153 static bool classof(const Instruction *I) {
154 return (I->getOpcode() == Instruction::Alloca);
155 }
156 static bool classof(const Value *V) {
157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
158 }
159
160private:
161 // Shadow Instruction::setInstructionSubclassData with a private forwarding
162 // method so that subclasses cannot accidentally use it.
163 template <typename Bitfield>
164 void setSubclassData(typename Bitfield::Type Value) {
165 Instruction::setSubclassData<Bitfield>(Value);
166 }
167};
168
169//===----------------------------------------------------------------------===//
170// LoadInst Class
171//===----------------------------------------------------------------------===//
172
173/// An instruction for reading from memory. This uses the SubclassData field in
174/// Value to store whether or not the load is volatile.
175class LoadInst : public UnaryInstruction {
176 using VolatileField = BoolBitfieldElementT<0>;
177 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
178 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
179 static_assert(
180 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181 "Bitfields must be contiguous");
182
183 void AssertOK();
184
185protected:
186 // Note: Instruction needs to be a friend here to call cloneImpl.
187 friend class Instruction;
188
189 LoadInst *cloneImpl() const;
190
191public:
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193 Instruction *InsertBefore);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 Instruction *InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 BasicBlock *InsertAtEnd);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 Align Align, Instruction *InsertBefore = nullptr);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, BasicBlock *InsertAtEnd);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, AtomicOrdering Order,
205 SyncScope::ID SSID = SyncScope::System,
206 Instruction *InsertBefore = nullptr);
207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
209 BasicBlock *InsertAtEnd);
210
211 /// Return true if this is a load from a volatile memory location.
212 bool isVolatile() const { return getSubclassData<VolatileField>(); }
213
214 /// Specify whether this is a volatile load or not.
215 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216
217 /// Return the alignment of the access that is being performed.
218 /// FIXME: Remove this function once transition to Align is over.
219 /// Use getAlign() instead.
220 unsigned getAlignment() const { return getAlign().value(); }
221
222 /// Return the alignment of the access that is being performed.
223 Align getAlign() const {
224 return Align(1ULL << (getSubclassData<AlignmentField>()));
225 }
226
227 void setAlignment(Align Align) {
228 setSubclassData<AlignmentField>(Log2(Align));
229 }
230
231 /// Returns the ordering constraint of this load instruction.
232 AtomicOrdering getOrdering() const {
233 return getSubclassData<OrderingField>();
234 }
235 /// Sets the ordering constraint of this load instruction. May not be Release
236 /// or AcquireRelease.
237 void setOrdering(AtomicOrdering Ordering) {
238 setSubclassData<OrderingField>(Ordering);
239 }
240
241 /// Returns the synchronization scope ID of this load instruction.
242 SyncScope::ID getSyncScopeID() const {
243 return SSID;
244 }
245
246 /// Sets the synchronization scope ID of this load instruction.
247 void setSyncScopeID(SyncScope::ID SSID) {
248 this->SSID = SSID;
249 }
250
251 /// Sets the ordering constraint and the synchronization scope ID of this load
252 /// instruction.
253 void setAtomic(AtomicOrdering Ordering,
254 SyncScope::ID SSID = SyncScope::System) {
255 setOrdering(Ordering);
256 setSyncScopeID(SSID);
257 }
258
259 bool isSimple() const { return !isAtomic() && !isVolatile(); }
260
261 bool isUnordered() const {
262 return (getOrdering() == AtomicOrdering::NotAtomic ||
263 getOrdering() == AtomicOrdering::Unordered) &&
264 !isVolatile();
265 }
266
267 Value *getPointerOperand() { return getOperand(0); }
268 const Value *getPointerOperand() const { return getOperand(0); }
269 static unsigned getPointerOperandIndex() { return 0U; }
270 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
271
272 /// Returns the address space of the pointer operand.
273 unsigned getPointerAddressSpace() const {
274 return getPointerOperandType()->getPointerAddressSpace();
275 }
276
277 // Methods for support type inquiry through isa, cast, and dyn_cast:
278 static bool classof(const Instruction *I) {
279 return I->getOpcode() == Instruction::Load;
280 }
281 static bool classof(const Value *V) {
282 return isa<Instruction>(V) && classof(cast<Instruction>(V));
283 }
284
285private:
286 // Shadow Instruction::setInstructionSubclassData with a private forwarding
287 // method so that subclasses cannot accidentally use it.
288 template <typename Bitfield>
289 void setSubclassData(typename Bitfield::Type Value) {
290 Instruction::setSubclassData<Bitfield>(Value);
291 }
292
293 /// The synchronization scope ID of this load instruction. Not quite enough
294 /// room in SubClassData for everything, so synchronization scope ID gets its
295 /// own field.
296 SyncScope::ID SSID;
297};
298
299//===----------------------------------------------------------------------===//
300// StoreInst Class
301//===----------------------------------------------------------------------===//
302
303/// An instruction for storing to memory.
304class StoreInst : public Instruction {
305 using VolatileField = BoolBitfieldElementT<0>;
306 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
307 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
308 static_assert(
309 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310 "Bitfields must be contiguous");
311
312 void AssertOK();
313
314protected:
315 // Note: Instruction needs to be a friend here to call cloneImpl.
316 friend class Instruction;
317
318 StoreInst *cloneImpl() const;
319
320public:
321 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326 Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328 BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334
335 // allocate space for exactly two operands
336 void *operator new(size_t S) { return User::operator new(S, 2); }
337 void operator delete(void *Ptr) { User::operator delete(Ptr); }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast<void> (0));
return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast<void> (0)); OperandTraits
<StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t S) { return User::operator new(S, 0); }
466 void operator delete(void *Ptr) { User::operator delete(Ptr); }
467
468 /// Returns the ordering constraint of this fence instruction.
469 AtomicOrdering getOrdering() const {
470 return getSubclassData<OrderingField>();
471 }
472
473 /// Sets the ordering constraint of this fence instruction. May only be
474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475 void setOrdering(AtomicOrdering Ordering) {
476 setSubclassData<OrderingField>(Ordering);
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 template <typename Bitfield>
501 void setSubclassData(typename Bitfield::Type Value) {
502 Instruction::setSubclassData<Bitfield>(Value);
503 }
504
505 /// The synchronization scope ID of this fence instruction. Not quite enough
506 /// room in SubClassData for everything, so synchronization scope ID gets its
507 /// own field.
508 SyncScope::ID SSID;
509};
510
511//===----------------------------------------------------------------------===//
512// AtomicCmpXchgInst Class
513//===----------------------------------------------------------------------===//
514
515/// An instruction that atomically checks whether a
516/// specified value is in a memory location, and, if it is, stores a new value
517/// there. The value returned by this instruction is a pair containing the
518/// original value as first element, and an i1 indicating success (true) or
519/// failure (false) as second element.
520///
521class AtomicCmpXchgInst : public Instruction {
522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524 SyncScope::ID SSID);
525
526 template <unsigned Offset>
527 using AtomicOrderingBitfieldElement =
528 typename Bitfield::Element<AtomicOrdering, Offset, 3,
529 AtomicOrdering::LAST>;
530
531protected:
532 // Note: Instruction needs to be a friend here to call cloneImpl.
533 friend class Instruction;
534
535 AtomicCmpXchgInst *cloneImpl() const;
536
537public:
538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539 AtomicOrdering SuccessOrdering,
540 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541 Instruction *InsertBefore = nullptr);
542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543 AtomicOrdering SuccessOrdering,
544 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545 BasicBlock *InsertAtEnd);
546
547 // allocate space for exactly three operands
548 void *operator new(size_t S) { return User::operator new(S, 3); }
549 void operator delete(void *Ptr) { User::operator delete(Ptr); }
550
551 using VolatileField = BoolBitfieldElementT<0>;
552 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
553 using SuccessOrderingField =
554 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
555 using FailureOrderingField =
556 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
557 using AlignmentField =
558 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
559 static_assert(
560 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
561 FailureOrderingField, AlignmentField>(),
562 "Bitfields must be contiguous");
563
564 /// Return the alignment of the memory that is being allocated by the
565 /// instruction.
566 Align getAlign() const {
567 return Align(1ULL << getSubclassData<AlignmentField>());
568 }
569
570 void setAlignment(Align Align) {
571 setSubclassData<AlignmentField>(Log2(Align));
572 }
573
574 /// Return true if this is a cmpxchg from a volatile memory
575 /// location.
576 ///
577 bool isVolatile() const { return getSubclassData<VolatileField>(); }
578
579 /// Specify whether this is a volatile cmpxchg.
580 ///
581 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582
583 /// Return true if this cmpxchg may spuriously fail.
584 bool isWeak() const { return getSubclassData<WeakField>(); }
585
586 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587
588 /// Transparently provide more efficient getOperand methods.
589 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
590
591 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592 return Ordering != AtomicOrdering::NotAtomic &&
593 Ordering != AtomicOrdering::Unordered;
594 }
595
596 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered &&
599 Ordering != AtomicOrdering::AcquireRelease &&
600 Ordering != AtomicOrdering::Release;
601 }
602
603 /// Returns the success ordering constraint of this cmpxchg instruction.
604 AtomicOrdering getSuccessOrdering() const {
605 return getSubclassData<SuccessOrderingField>();
606 }
607
608 /// Sets the success ordering constraint of this cmpxchg instruction.
609 void setSuccessOrdering(AtomicOrdering Ordering) {
610 assert(isValidSuccessOrdering(Ordering) &&(static_cast<void> (0))
611 "invalid CmpXchg success ordering")(static_cast<void> (0));
612 setSubclassData<SuccessOrderingField>(Ordering);
613 }
614
615 /// Returns the failure ordering constraint of this cmpxchg instruction.
616 AtomicOrdering getFailureOrdering() const {
617 return getSubclassData<FailureOrderingField>();
618 }
619
620 /// Sets the failure ordering constraint of this cmpxchg instruction.
621 void setFailureOrdering(AtomicOrdering Ordering) {
622 assert(isValidFailureOrdering(Ordering) &&(static_cast<void> (0))
623 "invalid CmpXchg failure ordering")(static_cast<void> (0));
624 setSubclassData<FailureOrderingField>(Ordering);
625 }
626
627 /// Returns a single ordering which is at least as strong as both the
628 /// success and failure orderings for this cmpxchg.
629 AtomicOrdering getMergedOrdering() const {
630 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
631 return AtomicOrdering::SequentiallyConsistent;
632 if (getFailureOrdering() == AtomicOrdering::Acquire) {
633 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
634 return AtomicOrdering::Acquire;
635 if (getSuccessOrdering() == AtomicOrdering::Release)
636 return AtomicOrdering::AcquireRelease;
637 }
638 return getSuccessOrdering();
639 }
640
641 /// Returns the synchronization scope ID of this cmpxchg instruction.
642 SyncScope::ID getSyncScopeID() const {
643 return SSID;
644 }
645
646 /// Sets the synchronization scope ID of this cmpxchg instruction.
647 void setSyncScopeID(SyncScope::ID SSID) {
648 this->SSID = SSID;
649 }
650
651 Value *getPointerOperand() { return getOperand(0); }
652 const Value *getPointerOperand() const { return getOperand(0); }
653 static unsigned getPointerOperandIndex() { return 0U; }
654
655 Value *getCompareOperand() { return getOperand(1); }
656 const Value *getCompareOperand() const { return getOperand(1); }
657
658 Value *getNewValOperand() { return getOperand(2); }
659 const Value *getNewValOperand() const { return getOperand(2); }
660
661 /// Returns the address space of the pointer operand.
662 unsigned getPointerAddressSpace() const {
663 return getPointerOperand()->getType()->getPointerAddressSpace();
664 }
665
666 /// Returns the strongest permitted ordering on failure, given the
667 /// desired ordering on success.
668 ///
669 /// If the comparison in a cmpxchg operation fails, there is no atomic store
670 /// so release semantics cannot be provided. So this function drops explicit
671 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672 /// operation would remain SequentiallyConsistent.
673 static AtomicOrdering
674 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
675 switch (SuccessOrdering) {
676 default:
677 llvm_unreachable("invalid cmpxchg success ordering")__builtin_unreachable();
678 case AtomicOrdering::Release:
679 case AtomicOrdering::Monotonic:
680 return AtomicOrdering::Monotonic;
681 case AtomicOrdering::AcquireRelease:
682 case AtomicOrdering::Acquire:
683 return AtomicOrdering::Acquire;
684 case AtomicOrdering::SequentiallyConsistent:
685 return AtomicOrdering::SequentiallyConsistent;
686 }
687 }
688
689 // Methods for support type inquiry through isa, cast, and dyn_cast:
690 static bool classof(const Instruction *I) {
691 return I->getOpcode() == Instruction::AtomicCmpXchg;
692 }
693 static bool classof(const Value *V) {
694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
695 }
696
697private:
698 // Shadow Instruction::setInstructionSubclassData with a private forwarding
699 // method so that subclasses cannot accidentally use it.
700 template <typename Bitfield>
701 void setSubclassData(typename Bitfield::Type Value) {
702 Instruction::setSubclassData<Bitfield>(Value);
703 }
704
705 /// The synchronization scope ID of this cmpxchg instruction. Not quite
706 /// enough room in SubClassData for everything, so synchronization scope ID
707 /// gets its own field.
708 SyncScope::ID SSID;
709};
710
711template <>
712struct OperandTraits<AtomicCmpXchgInst> :
713 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714};
715
716DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
717
718//===----------------------------------------------------------------------===//
719// AtomicRMWInst Class
720//===----------------------------------------------------------------------===//
721
722/// an instruction that atomically reads a memory location,
723/// combines it with another value, and then stores the result back. Returns
724/// the old value.
725///
726class AtomicRMWInst : public Instruction {
727protected:
728 // Note: Instruction needs to be a friend here to call cloneImpl.
729 friend class Instruction;
730
731 AtomicRMWInst *cloneImpl() const;
732
733public:
734 /// This enumeration lists the possible modifications atomicrmw can make. In
735 /// the descriptions, 'p' is the pointer to the instruction's memory location,
736 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737 /// instruction. These instructions always return 'old'.
738 enum BinOp : unsigned {
739 /// *p = v
740 Xchg,
741 /// *p = old + v
742 Add,
743 /// *p = old - v
744 Sub,
745 /// *p = old & v
746 And,
747 /// *p = ~(old & v)
748 Nand,
749 /// *p = old | v
750 Or,
751 /// *p = old ^ v
752 Xor,
753 /// *p = old >signed v ? old : v
754 Max,
755 /// *p = old <signed v ? old : v
756 Min,
757 /// *p = old >unsigned v ? old : v
758 UMax,
759 /// *p = old <unsigned v ? old : v
760 UMin,
761
762 /// *p = old + v
763 FAdd,
764
765 /// *p = old - v
766 FSub,
767
768 FIRST_BINOP = Xchg,
769 LAST_BINOP = FSub,
770 BAD_BINOP
771 };
772
773private:
774 template <unsigned Offset>
775 using AtomicOrderingBitfieldElement =
776 typename Bitfield::Element<AtomicOrdering, Offset, 3,
777 AtomicOrdering::LAST>;
778
779 template <unsigned Offset>
780 using BinOpBitfieldElement =
781 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
782
783public:
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 Instruction *InsertBefore = nullptr);
787 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788 AtomicOrdering Ordering, SyncScope::ID SSID,
789 BasicBlock *InsertAtEnd);
790
791 // allocate space for exactly two operands
792 void *operator new(size_t S) { return User::operator new(S, 2); }
793 void operator delete(void *Ptr) { User::operator delete(Ptr); }
794
795 using VolatileField = BoolBitfieldElementT<0>;
796 using AtomicOrderingField =
797 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
798 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
799 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
800 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
801 OperationField, AlignmentField>(),
802 "Bitfields must be contiguous");
803
804 BinOp getOperation() const { return getSubclassData<OperationField>(); }
805
806 static StringRef getOperationName(BinOp Op);
807
808 static bool isFPOperation(BinOp Op) {
809 switch (Op) {
810 case AtomicRMWInst::FAdd:
811 case AtomicRMWInst::FSub:
812 return true;
813 default:
814 return false;
815 }
816 }
817
818 void setOperation(BinOp Operation) {
819 setSubclassData<OperationField>(Operation);
820 }
821
822 /// Return the alignment of the memory that is being allocated by the
823 /// instruction.
824 Align getAlign() const {
825 return Align(1ULL << getSubclassData<AlignmentField>());
826 }
827
828 void setAlignment(Align Align) {
829 setSubclassData<AlignmentField>(Log2(Align));
830 }
831
832 /// Return true if this is a RMW on a volatile memory location.
833 ///
834 bool isVolatile() const { return getSubclassData<VolatileField>(); }
835
836 /// Specify whether this is a volatile RMW or not.
837 ///
838 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839
840 /// Transparently provide more efficient getOperand methods.
841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
842
843 /// Returns the ordering constraint of this rmw instruction.
844 AtomicOrdering getOrdering() const {
845 return getSubclassData<AtomicOrderingField>();
846 }
847
848 /// Sets the ordering constraint of this rmw instruction.
849 void setOrdering(AtomicOrdering Ordering) {
850 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast<void> (0))
851 "atomicrmw instructions can only be atomic.")(static_cast<void> (0));
852 setSubclassData<AtomicOrderingField>(Ordering);
853 }
854
855 /// Returns the synchronization scope ID of this rmw instruction.
856 SyncScope::ID getSyncScopeID() const {
857 return SSID;
858 }
859
860 /// Sets the synchronization scope ID of this rmw instruction.
861 void setSyncScopeID(SyncScope::ID SSID) {
862 this->SSID = SSID;
863 }
864
865 Value *getPointerOperand() { return getOperand(0); }
866 const Value *getPointerOperand() const { return getOperand(0); }
867 static unsigned getPointerOperandIndex() { return 0U; }
868
869 Value *getValOperand() { return getOperand(1); }
870 const Value *getValOperand() const { return getOperand(1); }
871
872 /// Returns the address space of the pointer operand.
873 unsigned getPointerAddressSpace() const {
874 return getPointerOperand()->getType()->getPointerAddressSpace();
875 }
876
877 bool isFloatingPointOperation() const {
878 return isFPOperation(getOperation());
879 }
880
881 // Methods for support type inquiry through isa, cast, and dyn_cast:
882 static bool classof(const Instruction *I) {
883 return I->getOpcode() == Instruction::AtomicRMW;
884 }
885 static bool classof(const Value *V) {
886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
887 }
888
889private:
890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891 AtomicOrdering Ordering, SyncScope::ID SSID);
892
893 // Shadow Instruction::setInstructionSubclassData with a private forwarding
894 // method so that subclasses cannot accidentally use it.
895 template <typename Bitfield>
896 void setSubclassData(typename Bitfield::Type Value) {
897 Instruction::setSubclassData<Bitfield>(Value);
898 }
899
900 /// The synchronization scope ID of this rmw instruction. Not quite enough
901 /// room in SubClassData for everything, so synchronization scope ID gets its
902 /// own field.
903 SyncScope::ID SSID;
904};
905
906template <>
907struct OperandTraits<AtomicRMWInst>
908 : public FixedNumOperandTraits<AtomicRMWInst,2> {
909};
910
911DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast<void> (0));
return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
912
913//===----------------------------------------------------------------------===//
914// GetElementPtrInst Class
915//===----------------------------------------------------------------------===//
916
917// checkGEPType - Simple wrapper function to give a better assertion failure
918// message on bad indexes for a gep instruction.
919//
920inline Type *checkGEPType(Type *Ty) {
921 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast<void> (0));
922 return Ty;
923}
924
925/// an instruction for type-safe pointer arithmetic to
926/// access elements of arrays and structs
927///
928class GetElementPtrInst : public Instruction {
929 Type *SourceElementType;
930 Type *ResultElementType;
931
932 GetElementPtrInst(const GetElementPtrInst &GEPI);
933
934 /// Constructors - Create a getelementptr instruction with a base pointer an
935 /// list of indices. The first ctor can optionally insert before an existing
936 /// instruction, the second appends the new instruction to the specified
937 /// BasicBlock.
938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList, unsigned Values,
940 const Twine &NameStr, Instruction *InsertBefore);
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, BasicBlock *InsertAtEnd);
944
945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946
947protected:
948 // Note: Instruction needs to be a friend here to call cloneImpl.
949 friend class Instruction;
950
951 GetElementPtrInst *cloneImpl() const;
952
953public:
954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955 ArrayRef<Value *> IdxList,
956 const Twine &NameStr = "",
957 Instruction *InsertBefore = nullptr) {
958 unsigned Values = 1 + unsigned(IdxList.size());
959 assert(PointeeType && "Must specify element type")(static_cast<void> (0));
960 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast<void> (0))
961 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast<void> (0));
962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963 NameStr, InsertBefore);
964 }
965
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr,
969 BasicBlock *InsertAtEnd) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type")(static_cast<void> (0));
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast<void> (0))
973 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast<void> (0));
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertAtEnd);
976 }
977
978 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
979 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
980 Instruction *InsertBefore = nullptr),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
981 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
{
982 return CreateInBounds(
983 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984 NameStr, InsertBefore);
985 }
986
987 /// Create an "inbounds" getelementptr. See the documentation for the
988 /// "inbounds" flag in LangRef.html for details.
989 static GetElementPtrInst *
990 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991 const Twine &NameStr = "",
992 Instruction *InsertBefore = nullptr) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1000 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1001 BasicBlock *InsertAtEnd),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1002 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
{
1003 return CreateInBounds(
1004 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005 NameStr, InsertAtEnd);
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 GetElementPtrInst *GEP =
1013 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014 GEP->setIsInBounds(true);
1015 return GEP;
1016 }
1017
1018 /// Transparently provide more efficient getOperand methods.
1019 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1020
1021 Type *getSourceElementType() const { return SourceElementType; }
1022
1023 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025
1026 Type *getResultElementType() const {
1027 assert(cast<PointerType>(getType()->getScalarType())(static_cast<void> (0))
1028 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast<void> (0));
1029 return ResultElementType;
1030 }
1031
1032 /// Returns the address space of this instruction's pointer type.
1033 unsigned getAddressSpace() const {
1034 // Note that this is always the same as the pointer operand's address space
1035 // and that is cheaper to compute, so cheat here.
1036 return getPointerAddressSpace();
1037 }
1038
1039 /// Returns the result type of a getelementptr with the given source
1040 /// element type and indexes.
1041 ///
1042 /// Null is returned if the indices are invalid for the specified
1043 /// source element type.
1044 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047
1048 /// Return the type of the element at the given index of an indexable
1049 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050 ///
1051 /// Returns null if the type can't be indexed, or the given index is not
1052 /// legal for the given type.
1053 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055
1056 inline op_iterator idx_begin() { return op_begin()+1; }
1057 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058 inline op_iterator idx_end() { return op_end(); }
1059 inline const_op_iterator idx_end() const { return op_end(); }
1060
1061 inline iterator_range<op_iterator> indices() {
1062 return make_range(idx_begin(), idx_end());
1063 }
1064
1065 inline iterator_range<const_op_iterator> indices() const {
1066 return make_range(idx_begin(), idx_end());
1067 }
1068
1069 Value *getPointerOperand() {
1070 return getOperand(0);
1071 }
1072 const Value *getPointerOperand() const {
1073 return getOperand(0);
1074 }
1075 static unsigned getPointerOperandIndex() {
1076 return 0U; // get index for modifying correct operand.
1077 }
1078
1079 /// Method to return the pointer operand as a
1080 /// PointerType.
1081 Type *getPointerOperandType() const {
1082 return getPointerOperand()->getType();
1083 }
1084
1085 /// Returns the address space of the pointer operand.
1086 unsigned getPointerAddressSpace() const {
1087 return getPointerOperandType()->getPointerAddressSpace();
1088 }
1089
1090 /// Returns the pointer type returned by the GEP
1091 /// instruction, which may be a vector of pointers.
1092 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093 ArrayRef<Value *> IdxList) {
1094 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097 Type *PtrTy = OrigPtrTy->isOpaque()
1098 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099 : PointerType::get(ResultElemTy, AddrSpace);
1100 // Vector GEP
1101 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102 ElementCount EltCount = PtrVTy->getElementCount();
1103 return VectorType::get(PtrTy, EltCount);
1104 }
1105 for (Value *Index : IdxList)
1106 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107 ElementCount EltCount = IndexVTy->getElementCount();
1108 return VectorType::get(PtrTy, EltCount);
1109 }
1110 // Scalar GEP
1111 return PtrTy;
1112 }
1113
1114 unsigned getNumIndices() const { // Note: always non-negative
1115 return getNumOperands() - 1;
1116 }
1117
1118 bool hasIndices() const {
1119 return getNumOperands() > 1;
1120 }
1121
1122 /// Return true if all of the indices of this GEP are
1123 /// zeros. If so, the result pointer and the first operand have the same
1124 /// value, just potentially different types.
1125 bool hasAllZeroIndices() const;
1126
1127 /// Return true if all of the indices of this GEP are
1128 /// constant integers. If so, the result pointer and the first operand have
1129 /// a constant offset between them.
1130 bool hasAllConstantIndices() const;
1131
1132 /// Set or clear the inbounds flag on this GEP instruction.
1133 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134 void setIsInBounds(bool b = true);
1135
1136 /// Determine whether the GEP has the inbounds flag.
1137 bool isInBounds() const;
1138
1139 /// Accumulate the constant address offset of this GEP if possible.
1140 ///
1141 /// This routine accepts an APInt into which it will accumulate the constant
1142 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143 /// all-constant, it returns false and the value of the offset APInt is
1144 /// undefined (it is *not* preserved!). The APInt passed into this routine
1145 /// must be at least as wide as the IntPtr type for the address space of
1146 /// the base GEP pointer.
1147 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149 MapVector<Value *, APInt> &VariableOffsets,
1150 APInt &ConstantOffset) const;
1151 // Methods for support type inquiry through isa, cast, and dyn_cast:
1152 static bool classof(const Instruction *I) {
1153 return (I->getOpcode() == Instruction::GetElementPtr);
1154 }
1155 static bool classof(const Value *V) {
1156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157 }
1158};
1159
1160template <>
1161struct OperandTraits<GetElementPtrInst> :
1162 public VariadicOperandTraits<GetElementPtrInst, 1> {
1163};
1164
1165GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166 ArrayRef<Value *> IdxList, unsigned Values,
1167 const Twine &NameStr,
1168 Instruction *InsertBefore)
1169 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171 Values, InsertBefore),
1172 SourceElementType(PointeeType),
1173 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174 assert(cast<PointerType>(getType()->getScalarType())(static_cast<void> (0))
1175 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast<void> (0));
1176 init(Ptr, IdxList, NameStr);
1177}
1178
1179GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180 ArrayRef<Value *> IdxList, unsigned Values,
1181 const Twine &NameStr,
1182 BasicBlock *InsertAtEnd)
1183 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185 Values, InsertAtEnd),
1186 SourceElementType(PointeeType),
1187 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188 assert(cast<PointerType>(getType()->getScalarType())(static_cast<void> (0))
1189 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast<void> (0));
1190 init(Ptr, IdxList, NameStr);
1191}
1192
1193DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1194
1195//===----------------------------------------------------------------------===//
1196// ICmpInst Class
1197//===----------------------------------------------------------------------===//
1198
1199/// This instruction compares its operands according to the predicate given
1200/// to the constructor. It only operates on integers or pointers. The operands
1201/// must be identical types.
1202/// Represent an integer comparison operator.
1203class ICmpInst: public CmpInst {
1204 void AssertOK() {
1205 assert(isIntPredicate() &&(static_cast<void> (0))
1206 "Invalid ICmp predicate value")(static_cast<void> (0));
1207 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast<void> (0))
1208 "Both operands to ICmp instruction are not of the same type!")(static_cast<void> (0));
1209 // Check that the operands are the right type
1210 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast<void> (0))
1211 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast<void> (0))
1212 "Invalid operand types for ICmp instruction")(static_cast<void> (0));
1213 }
1214
1215protected:
1216 // Note: Instruction needs to be a friend here to call cloneImpl.
1217 friend class Instruction;
1218
1219 /// Clone an identical ICmpInst
1220 ICmpInst *cloneImpl() const;
1221
1222public:
1223 /// Constructor with insert-before-instruction semantics.
1224 ICmpInst(
1225 Instruction *InsertBefore, ///< Where to insert
1226 Predicate pred, ///< The predicate to use for the comparison
1227 Value *LHS, ///< The left-hand-side of the expression
1228 Value *RHS, ///< The right-hand-side of the expression
1229 const Twine &NameStr = "" ///< Name of the instruction
1230 ) : CmpInst(makeCmpResultType(LHS->getType()),
1231 Instruction::ICmp, pred, LHS, RHS, NameStr,
1232 InsertBefore) {
1233#ifndef NDEBUG1
1234 AssertOK();
1235#endif
1236 }
1237
1238 /// Constructor with insert-at-end semantics.
1239 ICmpInst(
1240 BasicBlock &InsertAtEnd, ///< Block to insert into.
1241 Predicate pred, ///< The predicate to use for the comparison
1242 Value *LHS, ///< The left-hand-side of the expression
1243 Value *RHS, ///< The right-hand-side of the expression
1244 const Twine &NameStr = "" ///< Name of the instruction
1245 ) : CmpInst(makeCmpResultType(LHS->getType()),
1246 Instruction::ICmp, pred, LHS, RHS, NameStr,
1247 &InsertAtEnd) {
1248#ifndef NDEBUG1
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// Constructor with no-insertion semantics
1254 ICmpInst(
1255 Predicate pred, ///< The predicate to use for the comparison
1256 Value *LHS, ///< The left-hand-side of the expression
1257 Value *RHS, ///< The right-hand-side of the expression
1258 const Twine &NameStr = "" ///< Name of the instruction
1259 ) : CmpInst(makeCmpResultType(LHS->getType()),
1260 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261#ifndef NDEBUG1
1262 AssertOK();
1263#endif
1264 }
1265
1266 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267 /// @returns the predicate that would be the result if the operand were
1268 /// regarded as signed.
1269 /// Return the signed version of the predicate
1270 Predicate getSignedPredicate() const {
1271 return getSignedPredicate(getPredicate());
1272 }
1273
1274 /// This is a static version that you can use without an instruction.
1275 /// Return the signed version of the predicate.
1276 static Predicate getSignedPredicate(Predicate pred);
1277
1278 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279 /// @returns the predicate that would be the result if the operand were
1280 /// regarded as unsigned.
1281 /// Return the unsigned version of the predicate
1282 Predicate getUnsignedPredicate() const {
1283 return getUnsignedPredicate(getPredicate());
1284 }
1285
1286 /// This is a static version that you can use without an instruction.
1287 /// Return the unsigned version of the predicate.
1288 static Predicate getUnsignedPredicate(Predicate pred);
1289
1290 /// Return true if this predicate is either EQ or NE. This also
1291 /// tests for commutativity.
1292 static bool isEquality(Predicate P) {
1293 return P == ICMP_EQ || P == ICMP_NE;
1294 }
1295
1296 /// Return true if this predicate is either EQ or NE. This also
1297 /// tests for commutativity.
1298 bool isEquality() const {
1299 return isEquality(getPredicate());
1300 }
1301
1302 /// @returns true if the predicate of this ICmpInst is commutative
1303 /// Determine if this relation is commutative.
1304 bool isCommutative() const { return isEquality(); }
1305
1306 /// Return true if the predicate is relational (not EQ or NE).
1307 ///
1308 bool isRelational() const {
1309 return !isEquality();
1310 }
1311
1312 /// Return true if the predicate is relational (not EQ or NE).
1313 ///
1314 static bool isRelational(Predicate P) {
1315 return !isEquality(P);
1316 }
1317
1318 /// Return true if the predicate is SGT or UGT.
1319 ///
1320 static bool isGT(Predicate P) {
1321 return P == ICMP_SGT || P == ICMP_UGT;
1322 }
1323
1324 /// Return true if the predicate is SLT or ULT.
1325 ///
1326 static bool isLT(Predicate P) {
1327 return P == ICMP_SLT || P == ICMP_ULT;
1328 }
1329
1330 /// Return true if the predicate is SGE or UGE.
1331 ///
1332 static bool isGE(Predicate P) {
1333 return P == ICMP_SGE || P == ICMP_UGE;
1334 }
1335
1336 /// Return true if the predicate is SLE or ULE.
1337 ///
1338 static bool isLE(Predicate P) {
1339 return P == ICMP_SLE || P == ICMP_ULE;
1340 }
1341
1342 /// Exchange the two operands to this instruction in such a way that it does
1343 /// not modify the semantics of the instruction. The predicate value may be
1344 /// changed to retain the same result if the predicate is order dependent
1345 /// (e.g. ult).
1346 /// Swap operands and adjust predicate.
1347 void swapOperands() {
1348 setPredicate(getSwappedPredicate());
1349 Op<0>().swap(Op<1>());
1350 }
1351
1352 // Methods for support type inquiry through isa, cast, and dyn_cast:
1353 static bool classof(const Instruction *I) {
1354 return I->getOpcode() == Instruction::ICmp;
1355 }
1356 static bool classof(const Value *V) {
1357 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358 }
1359};
1360
1361//===----------------------------------------------------------------------===//
1362// FCmpInst Class
1363//===----------------------------------------------------------------------===//
1364
1365/// This instruction compares its operands according to the predicate given
1366/// to the constructor. It only operates on floating point values or packed
1367/// vectors of floating point values. The operands must be identical types.
1368/// Represents a floating point comparison operator.
1369class FCmpInst: public CmpInst {
1370 void AssertOK() {
1371 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast<void> (0));
1372 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast<void> (0))
1373 "Both operands to FCmp instruction are not of the same type!")(static_cast<void> (0));
1374 // Check that the operands are the right type
1375 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast<void> (0))
1376 "Invalid operand types for FCmp instruction")(static_cast<void> (0));
1377 }
1378
1379protected:
1380 // Note: Instruction needs to be a friend here to call cloneImpl.
1381 friend class Instruction;
1382
1383 /// Clone an identical FCmpInst
1384 FCmpInst *cloneImpl() const;
1385
1386public:
1387 /// Constructor with insert-before-instruction semantics.
1388 FCmpInst(
1389 Instruction *InsertBefore, ///< Where to insert
1390 Predicate pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "" ///< Name of the instruction
1394 ) : CmpInst(makeCmpResultType(LHS->getType()),
1395 Instruction::FCmp, pred, LHS, RHS, NameStr,
1396 InsertBefore) {
1397 AssertOK();
1398 }
1399
1400 /// Constructor with insert-at-end semantics.
1401 FCmpInst(
1402 BasicBlock &InsertAtEnd, ///< Block to insert into.
1403 Predicate pred, ///< The predicate to use for the comparison
1404 Value *LHS, ///< The left-hand-side of the expression
1405 Value *RHS, ///< The right-hand-side of the expression
1406 const Twine &NameStr = "" ///< Name of the instruction
1407 ) : CmpInst(makeCmpResultType(LHS->getType()),
1408 Instruction::FCmp, pred, LHS, RHS, NameStr,
1409 &InsertAtEnd) {
1410 AssertOK();
1411 }
1412
1413 /// Constructor with no-insertion semantics
1414 FCmpInst(
1415 Predicate Pred, ///< The predicate to use for the comparison
1416 Value *LHS, ///< The left-hand-side of the expression
1417 Value *RHS, ///< The right-hand-side of the expression
1418 const Twine &NameStr = "", ///< Name of the instruction
1419 Instruction *FlagsSource = nullptr
1420 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421 RHS, NameStr, nullptr, FlagsSource) {
1422 AssertOK();
1423 }
1424
1425 /// @returns true if the predicate of this instruction is EQ or NE.
1426 /// Determine if this is an equality predicate.
1427 static bool isEquality(Predicate Pred) {
1428 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429 Pred == FCMP_UNE;
1430 }
1431
1432 /// @returns true if the predicate of this instruction is EQ or NE.
1433 /// Determine if this is an equality predicate.
1434 bool isEquality() const { return isEquality(getPredicate()); }
1435
1436 /// @returns true if the predicate of this instruction is commutative.
1437 /// Determine if this is a commutative predicate.
1438 bool isCommutative() const {
1439 return isEquality() ||
1440 getPredicate() == FCMP_FALSE ||
1441 getPredicate() == FCMP_TRUE ||
1442 getPredicate() == FCMP_ORD ||
1443 getPredicate() == FCMP_UNO;
1444 }
1445
1446 /// @returns true if the predicate is relational (not EQ or NE).
1447 /// Determine if this a relational predicate.
1448 bool isRelational() const { return !isEquality(); }
1449
1450 /// Exchange the two operands to this instruction in such a way that it does
1451 /// not modify the semantics of the instruction. The predicate value may be
1452 /// changed to retain the same result if the predicate is order dependent
1453 /// (e.g. ult).
1454 /// Swap operands and adjust predicate.
1455 void swapOperands() {
1456 setPredicate(getSwappedPredicate());
1457 Op<0>().swap(Op<1>());
1458 }
1459
1460 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461 static bool classof(const Instruction *I) {
1462 return I->getOpcode() == Instruction::FCmp;
1463 }
1464 static bool classof(const Value *V) {
1465 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466 }
1467};
1468
1469//===----------------------------------------------------------------------===//
1470/// This class represents a function call, abstracting a target
1471/// machine's calling convention. This class uses low bit of the SubClassData
1472/// field to indicate whether or not this is a tail call. The rest of the bits
1473/// hold the calling convention of the call.
1474///
1475class CallInst : public CallBase {
1476 CallInst(const CallInst &CI);
1477
1478 /// Construct a CallInst given a range of arguments.
1479 /// Construct a CallInst from a range of arguments
1480 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482 Instruction *InsertBefore);
1483
1484 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485 const Twine &NameStr, Instruction *InsertBefore)
1486 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487
1488 /// Construct a CallInst given a range of arguments.
1489 /// Construct a CallInst from a range of arguments
1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492 BasicBlock *InsertAtEnd);
1493
1494 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495 Instruction *InsertBefore);
1496
1497 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498 BasicBlock *InsertAtEnd);
1499
1500 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503
1504 /// Compute the number of operands to allocate.
1505 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506 // We need one operand for the called function, plus the input operand
1507 // counts provided.
1508 return 1 + NumArgs + NumBundleInputs;
1509 }
1510
1511protected:
1512 // Note: Instruction needs to be a friend here to call cloneImpl.
1513 friend class Instruction;
1514
1515 CallInst *cloneImpl() const;
1516
1517public:
1518 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519 Instruction *InsertBefore = nullptr) {
1520 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521 }
1522
1523 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1524 const Twine &NameStr,
1525 Instruction *InsertBefore = nullptr) {
1526 return new (ComputeNumOperands(Args.size()))
1527 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528 }
1529
1530 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1531 ArrayRef<OperandBundleDef> Bundles = None,
1532 const Twine &NameStr = "",
1533 Instruction *InsertBefore = nullptr) {
1534 const int NumOperands =
1535 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537
1538 return new (NumOperands, DescriptorBytes)
1539 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540 }
1541
1542 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543 BasicBlock *InsertAtEnd) {
1544 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545 }
1546
1547 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1548 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549 return new (ComputeNumOperands(Args.size()))
1550 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551 }
1552
1553 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1554 ArrayRef<OperandBundleDef> Bundles,
1555 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556 const int NumOperands =
1557 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559
1560 return new (NumOperands, DescriptorBytes)
1561 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562 }
1563
1564 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565 Instruction *InsertBefore = nullptr) {
1566 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567 InsertBefore);
1568 }
1569
1570 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1571 ArrayRef<OperandBundleDef> Bundles = None,
1572 const Twine &NameStr = "",
1573 Instruction *InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575 NameStr, InsertBefore);
1576 }
1577
1578 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1579 const Twine &NameStr,
1580 Instruction *InsertBefore = nullptr) {
1581 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582 InsertBefore);
1583 }
1584
1585 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586 BasicBlock *InsertAtEnd) {
1587 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588 InsertAtEnd);
1589 }
1590
1591 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1592 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594 InsertAtEnd);
1595 }
1596
1597 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1598 ArrayRef<OperandBundleDef> Bundles,
1599 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601 NameStr, InsertAtEnd);
1602 }
1603
1604 /// Create a clone of \p CI with a different set of operand bundles and
1605 /// insert it before \p InsertPt.
1606 ///
1607 /// The returned call instruction is identical \p CI in every way except that
1608 /// the operand bundles for the new instruction are set to the operand bundles
1609 /// in \p Bundles.
1610 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611 Instruction *InsertPt = nullptr);
1612
1613 /// Generate the IR for a call to malloc:
1614 /// 1. Compute the malloc call's argument as the specified type's size,
1615 /// possibly multiplied by the array size if the array size is not
1616 /// constant 1.
1617 /// 2. Call malloc with that argument.
1618 /// 3. Bitcast the result of the malloc call to the specified type.
1619 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620 Type *AllocTy, Value *AllocSize,
1621 Value *ArraySize = nullptr,
1622 Function *MallocF = nullptr,
1623 const Twine &Name = "");
1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625 Type *AllocTy, Value *AllocSize,
1626 Value *ArraySize = nullptr,
1627 Function *MallocF = nullptr,
1628 const Twine &Name = "");
1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630 Type *AllocTy, Value *AllocSize,
1631 Value *ArraySize = nullptr,
1632 ArrayRef<OperandBundleDef> Bundles = None,
1633 Function *MallocF = nullptr,
1634 const Twine &Name = "");
1635 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636 Type *AllocTy, Value *AllocSize,
1637 Value *ArraySize = nullptr,
1638 ArrayRef<OperandBundleDef> Bundles = None,
1639 Function *MallocF = nullptr,
1640 const Twine &Name = "");
1641 /// Generate the IR for a call to the builtin free function.
1642 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1644 static Instruction *CreateFree(Value *Source,
1645 ArrayRef<OperandBundleDef> Bundles,
1646 Instruction *InsertBefore);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 BasicBlock *InsertAtEnd);
1650
1651 // Note that 'musttail' implies 'tail'.
1652 enum TailCallKind : unsigned {
1653 TCK_None = 0,
1654 TCK_Tail = 1,
1655 TCK_MustTail = 2,
1656 TCK_NoTail = 3,
1657 TCK_LAST = TCK_NoTail
1658 };
1659
1660 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1661 static_assert(
1662 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663 "Bitfields must be contiguous");
1664
1665 TailCallKind getTailCallKind() const {
1666 return getSubclassData<TailCallKindField>();
1667 }
1668
1669 bool isTailCall() const {
1670 TailCallKind Kind = getTailCallKind();
1671 return Kind == TCK_Tail || Kind == TCK_MustTail;
1672 }
1673
1674 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675
1676 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677
1678 void setTailCallKind(TailCallKind TCK) {
1679 setSubclassData<TailCallKindField>(TCK);
1680 }
1681
1682 void setTailCall(bool IsTc = true) {
1683 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684 }
1685
1686 /// Return true if the call can return twice
1687 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1688 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1689
1690 // Methods for support type inquiry through isa, cast, and dyn_cast:
1691 static bool classof(const Instruction *I) {
1692 return I->getOpcode() == Instruction::Call;
1693 }
1694 static bool classof(const Value *V) {
1695 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1696 }
1697
1698 /// Updates profile metadata by scaling it by \p S / \p T.
1699 void updateProfWeight(uint64_t S, uint64_t T);
1700
1701private:
1702 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1703 // method so that subclasses cannot accidentally use it.
1704 template <typename Bitfield>
1705 void setSubclassData(typename Bitfield::Type Value) {
1706 Instruction::setSubclassData<Bitfield>(Value);
1707 }
1708};
1709
1710CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1711 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1712 BasicBlock *InsertAtEnd)
1713 : CallBase(Ty->getReturnType(), Instruction::Call,
1714 OperandTraits<CallBase>::op_end(this) -
1715 (Args.size() + CountBundleInputs(Bundles) + 1),
1716 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1717 InsertAtEnd) {
1718 init(Ty, Func, Args, Bundles, NameStr);
1719}
1720
1721CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1722 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1723 Instruction *InsertBefore)
1724 : CallBase(Ty->getReturnType(), Instruction::Call,
1725 OperandTraits<CallBase>::op_end(this) -
1726 (Args.size() + CountBundleInputs(Bundles) + 1),
1727 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1728 InsertBefore) {
1729 init(Ty, Func, Args, Bundles, NameStr);
1730}
1731
1732//===----------------------------------------------------------------------===//
1733// SelectInst Class
1734//===----------------------------------------------------------------------===//
1735
1736/// This class represents the LLVM 'select' instruction.
1737///
1738class SelectInst : public Instruction {
1739 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1740 Instruction *InsertBefore)
1741 : Instruction(S1->getType(), Instruction::Select,
1742 &Op<0>(), 3, InsertBefore) {
1743 init(C, S1, S2);
1744 setName(NameStr);
1745 }
1746
1747 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1748 BasicBlock *InsertAtEnd)
1749 : Instruction(S1->getType(), Instruction::Select,
1750 &Op<0>(), 3, InsertAtEnd) {
1751 init(C, S1, S2);
1752 setName(NameStr);
1753 }
1754
1755 void init(Value *C, Value *S1, Value *S2) {
1756 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast<void> (0));
1757 Op<0>() = C;
1758 Op<1>() = S1;
1759 Op<2>() = S2;
1760 }
1761
1762protected:
1763 // Note: Instruction needs to be a friend here to call cloneImpl.
1764 friend class Instruction;
1765
1766 SelectInst *cloneImpl() const;
1767
1768public:
1769 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1770 const Twine &NameStr = "",
1771 Instruction *InsertBefore = nullptr,
1772 Instruction *MDFrom = nullptr) {
1773 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1774 if (MDFrom)
1775 Sel->copyMetadata(*MDFrom);
1776 return Sel;
1777 }
1778
1779 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1780 const Twine &NameStr,
1781 BasicBlock *InsertAtEnd) {
1782 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1783 }
1784
1785 const Value *getCondition() const { return Op<0>(); }
1786 const Value *getTrueValue() const { return Op<1>(); }
1787 const Value *getFalseValue() const { return Op<2>(); }
1788 Value *getCondition() { return Op<0>(); }
1789 Value *getTrueValue() { return Op<1>(); }
1790 Value *getFalseValue() { return Op<2>(); }
1791
1792 void setCondition(Value *V) { Op<0>() = V; }
1793 void setTrueValue(Value *V) { Op<1>() = V; }
1794 void setFalseValue(Value *V) { Op<2>() = V; }
1795
1796 /// Swap the true and false values of the select instruction.
1797 /// This doesn't swap prof metadata.
1798 void swapValues() { Op<1>().swap(Op<2>()); }
1799
1800 /// Return a string if the specified operands are invalid
1801 /// for a select operation, otherwise return null.
1802 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1803
1804 /// Transparently provide more efficient getOperand methods.
1805 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1806
1807 OtherOps getOpcode() const {
1808 return static_cast<OtherOps>(Instruction::getOpcode());
1809 }
1810
1811 // Methods for support type inquiry through isa, cast, and dyn_cast:
1812 static bool classof(const Instruction *I) {
1813 return I->getOpcode() == Instruction::Select;
1814 }
1815 static bool classof(const Value *V) {
1816 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1817 }
1818};
1819
1820template <>
1821struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1822};
1823
1824DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
SelectInst>::op_begin(const_cast<SelectInst*>(this))
[i_nocapture].get()); } void SelectInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1825
1826//===----------------------------------------------------------------------===//
1827// VAArgInst Class
1828//===----------------------------------------------------------------------===//
1829
1830/// This class represents the va_arg llvm instruction, which returns
1831/// an argument of the specified type given a va_list and increments that list
1832///
1833class VAArgInst : public UnaryInstruction {
1834protected:
1835 // Note: Instruction needs to be a friend here to call cloneImpl.
1836 friend class Instruction;
1837
1838 VAArgInst *cloneImpl() const;
1839
1840public:
1841 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1842 Instruction *InsertBefore = nullptr)
1843 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1844 setName(NameStr);
1845 }
1846
1847 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1848 BasicBlock *InsertAtEnd)
1849 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1850 setName(NameStr);
1851 }
1852
1853 Value *getPointerOperand() { return getOperand(0); }
1854 const Value *getPointerOperand() const { return getOperand(0); }
1855 static unsigned getPointerOperandIndex() { return 0U; }
1856
1857 // Methods for support type inquiry through isa, cast, and dyn_cast:
1858 static bool classof(const Instruction *I) {
1859 return I->getOpcode() == VAArg;
1860 }
1861 static bool classof(const Value *V) {
1862 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1863 }
1864};
1865
1866//===----------------------------------------------------------------------===//
1867// ExtractElementInst Class
1868//===----------------------------------------------------------------------===//
1869
1870/// This instruction extracts a single (scalar)
1871/// element from a VectorType value
1872///
1873class ExtractElementInst : public Instruction {
1874 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1875 Instruction *InsertBefore = nullptr);
1876 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1877 BasicBlock *InsertAtEnd);
1878
1879protected:
1880 // Note: Instruction needs to be a friend here to call cloneImpl.
1881 friend class Instruction;
1882
1883 ExtractElementInst *cloneImpl() const;
1884
1885public:
1886 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1887 const Twine &NameStr = "",
1888 Instruction *InsertBefore = nullptr) {
1889 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1890 }
1891
1892 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1893 const Twine &NameStr,
1894 BasicBlock *InsertAtEnd) {
1895 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1896 }
1897
1898 /// Return true if an extractelement instruction can be
1899 /// formed with the specified operands.
1900 static bool isValidOperands(const Value *Vec, const Value *Idx);
1901
1902 Value *getVectorOperand() { return Op<0>(); }
1903 Value *getIndexOperand() { return Op<1>(); }
1904 const Value *getVectorOperand() const { return Op<0>(); }
1905 const Value *getIndexOperand() const { return Op<1>(); }
1906
1907 VectorType *getVectorOperandType() const {
1908 return cast<VectorType>(getVectorOperand()->getType());
1909 }
1910
1911 /// Transparently provide more efficient getOperand methods.
1912 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1913
1914 // Methods for support type inquiry through isa, cast, and dyn_cast:
1915 static bool classof(const Instruction *I) {
1916 return I->getOpcode() == Instruction::ExtractElement;
1917 }
1918 static bool classof(const Value *V) {
1919 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1920 }
1921};
1922
1923template <>
1924struct OperandTraits<ExtractElementInst> :
1925 public FixedNumOperandTraits<ExtractElementInst, 2> {
1926};
1927
1928DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast<void> (0)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { (static_cast<void> (0)); OperandTraits
<ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1929
1930//===----------------------------------------------------------------------===//
1931// InsertElementInst Class
1932//===----------------------------------------------------------------------===//
1933
1934/// This instruction inserts a single (scalar)
1935/// element into a VectorType value
1936///
1937class InsertElementInst : public Instruction {
1938 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1939 const Twine &NameStr = "",
1940 Instruction *InsertBefore = nullptr);
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1942 BasicBlock *InsertAtEnd);
1943
1944protected:
1945 // Note: Instruction needs to be a friend here to call cloneImpl.
1946 friend class Instruction;
1947
1948 InsertElementInst *cloneImpl() const;
1949
1950public:
1951 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1952 const Twine &NameStr = "",
1953 Instruction *InsertBefore = nullptr) {
1954 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1955 }
1956
1957 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1958 const Twine &NameStr,
1959 BasicBlock *InsertAtEnd) {
1960 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1961 }
1962
1963 /// Return true if an insertelement instruction can be
1964 /// formed with the specified operands.
1965 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1966 const Value *Idx);
1967
1968 /// Overload to return most specific vector type.
1969 ///
1970 VectorType *getType() const {
1971 return cast<VectorType>(Instruction::getType());
1972 }
1973
1974 /// Transparently provide more efficient getOperand methods.
1975 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1976
1977 // Methods for support type inquiry through isa, cast, and dyn_cast:
1978 static bool classof(const Instruction *I) {
1979 return I->getOpcode() == Instruction::InsertElement;
1980 }
1981 static bool classof(const Value *V) {
1982 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1983 }
1984};
1985
1986template <>
1987struct OperandTraits<InsertElementInst> :
1988 public FixedNumOperandTraits<InsertElementInst, 3> {
1989};
1990
1991DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1992
1993//===----------------------------------------------------------------------===//
1994// ShuffleVectorInst Class
1995//===----------------------------------------------------------------------===//
1996
1997constexpr int UndefMaskElem = -1;
1998
1999/// This instruction constructs a fixed permutation of two
2000/// input vectors.
2001///
2002/// For each element of the result vector, the shuffle mask selects an element
2003/// from one of the input vectors to copy to the result. Non-negative elements
2004/// in the mask represent an index into the concatenated pair of input vectors.
2005/// UndefMaskElem (-1) specifies that the result element is undefined.
2006///
2007/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2008/// requirement may be relaxed in the future.
2009class ShuffleVectorInst : public Instruction {
2010 SmallVector<int, 4> ShuffleMask;
2011 Constant *ShuffleMaskForBitcode;
2012
2013protected:
2014 // Note: Instruction needs to be a friend here to call cloneImpl.
2015 friend class Instruction;
2016
2017 ShuffleVectorInst *cloneImpl() const;
2018
2019public:
2020 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2021 const Twine &NameStr = "",
2022 Instruction *InsertBefor = nullptr);
2023 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2024 const Twine &NameStr, BasicBlock *InsertAtEnd);
2025 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2026 const Twine &NameStr = "",
2027 Instruction *InsertBefor = nullptr);
2028 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2029 const Twine &NameStr, BasicBlock *InsertAtEnd);
2030
2031 void *operator new(size_t S) { return User::operator new(S, 2); }
2032 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2033
2034 /// Swap the operands and adjust the mask to preserve the semantics
2035 /// of the instruction.
2036 void commute();
2037
2038 /// Return true if a shufflevector instruction can be
2039 /// formed with the specified operands.
2040 static bool isValidOperands(const Value *V1, const Value *V2,
2041 const Value *Mask);
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 ArrayRef<int> Mask);
2044
2045 /// Overload to return most specific vector type.
2046 ///
2047 VectorType *getType() const {
2048 return cast<VectorType>(Instruction::getType());
2049 }
2050
2051 /// Transparently provide more efficient getOperand methods.
2052 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2053
2054 /// Return the shuffle mask value of this instruction for the given element
2055 /// index. Return UndefMaskElem if the element is undef.
2056 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2057
2058 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2059 /// elements of the mask are returned as UndefMaskElem.
2060 static void getShuffleMask(const Constant *Mask,
2061 SmallVectorImpl<int> &Result);
2062
2063 /// Return the mask for this instruction as a vector of integers. Undefined
2064 /// elements of the mask are returned as UndefMaskElem.
2065 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2066 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2067 }
2068
2069 /// Return the mask for this instruction, for use in bitcode.
2070 ///
2071 /// TODO: This is temporary until we decide a new bitcode encoding for
2072 /// shufflevector.
2073 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2074
2075 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2076 Type *ResultTy);
2077
2078 void setShuffleMask(ArrayRef<int> Mask);
2079
2080 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2081
2082 /// Return true if this shuffle returns a vector with a different number of
2083 /// elements than its source vectors.
2084 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2085 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2086 bool changesLength() const {
2087 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2088 ->getElementCount()
2089 .getKnownMinValue();
2090 unsigned NumMaskElts = ShuffleMask.size();
2091 return NumSourceElts != NumMaskElts;
2092 }
2093
2094 /// Return true if this shuffle returns a vector with a greater number of
2095 /// elements than its source vectors.
2096 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2097 bool increasesLength() const {
2098 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2099 ->getElementCount()
2100 .getKnownMinValue();
2101 unsigned NumMaskElts = ShuffleMask.size();
2102 return NumSourceElts < NumMaskElts;
2103 }
2104
2105 /// Return true if this shuffle mask chooses elements from exactly one source
2106 /// vector.
2107 /// Example: <7,5,undef,7>
2108 /// This assumes that vector operands are the same length as the mask.
2109 static bool isSingleSourceMask(ArrayRef<int> Mask);
2110 static bool isSingleSourceMask(const Constant *Mask) {
2111 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2112 SmallVector<int, 16> MaskAsInts;
2113 getShuffleMask(Mask, MaskAsInts);
2114 return isSingleSourceMask(MaskAsInts);
2115 }
2116
2117 /// Return true if this shuffle chooses elements from exactly one source
2118 /// vector without changing the length of that vector.
2119 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2120 /// TODO: Optionally allow length-changing shuffles.
2121 bool isSingleSource() const {
2122 return !changesLength() && isSingleSourceMask(ShuffleMask);
2123 }
2124
2125 /// Return true if this shuffle mask chooses elements from exactly one source
2126 /// vector without lane crossings. A shuffle using this mask is not
2127 /// necessarily a no-op because it may change the number of elements from its
2128 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2129 /// Example: <undef,undef,2,3>
2130 static bool isIdentityMask(ArrayRef<int> Mask);
2131 static bool isIdentityMask(const Constant *Mask) {
2132 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2133 SmallVector<int, 16> MaskAsInts;
2134 getShuffleMask(Mask, MaskAsInts);
2135 return isIdentityMask(MaskAsInts);
2136 }
2137
2138 /// Return true if this shuffle chooses elements from exactly one source
2139 /// vector without lane crossings and does not change the number of elements
2140 /// from its input vectors.
2141 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2142 bool isIdentity() const {
2143 return !changesLength() && isIdentityMask(ShuffleMask);
2144 }
2145
2146 /// Return true if this shuffle lengthens exactly one source vector with
2147 /// undefs in the high elements.
2148 bool isIdentityWithPadding() const;
2149
2150 /// Return true if this shuffle extracts the first N elements of exactly one
2151 /// source vector.
2152 bool isIdentityWithExtract() const;
2153
2154 /// Return true if this shuffle concatenates its 2 source vectors. This
2155 /// returns false if either input is undefined. In that case, the shuffle is
2156 /// is better classified as an identity with padding operation.
2157 bool isConcat() const;
2158
2159 /// Return true if this shuffle mask chooses elements from its source vectors
2160 /// without lane crossings. A shuffle using this mask would be
2161 /// equivalent to a vector select with a constant condition operand.
2162 /// Example: <4,1,6,undef>
2163 /// This returns false if the mask does not choose from both input vectors.
2164 /// In that case, the shuffle is better classified as an identity shuffle.
2165 /// This assumes that vector operands are the same length as the mask
2166 /// (a length-changing shuffle can never be equivalent to a vector select).
2167 static bool isSelectMask(ArrayRef<int> Mask);
2168 static bool isSelectMask(const Constant *Mask) {
2169 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2170 SmallVector<int, 16> MaskAsInts;
2171 getShuffleMask(Mask, MaskAsInts);
2172 return isSelectMask(MaskAsInts);
2173 }
2174
2175 /// Return true if this shuffle chooses elements from its source vectors
2176 /// without lane crossings and all operands have the same number of elements.
2177 /// In other words, this shuffle is equivalent to a vector select with a
2178 /// constant condition operand.
2179 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2180 /// This returns false if the mask does not choose from both input vectors.
2181 /// In that case, the shuffle is better classified as an identity shuffle.
2182 /// TODO: Optionally allow length-changing shuffles.
2183 bool isSelect() const {
2184 return !changesLength() && isSelectMask(ShuffleMask);
2185 }
2186
2187 /// Return true if this shuffle mask swaps the order of elements from exactly
2188 /// one source vector.
2189 /// Example: <7,6,undef,4>
2190 /// This assumes that vector operands are the same length as the mask.
2191 static bool isReverseMask(ArrayRef<int> Mask);
2192 static bool isReverseMask(const Constant *Mask) {
2193 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2194 SmallVector<int, 16> MaskAsInts;
2195 getShuffleMask(Mask, MaskAsInts);
2196 return isReverseMask(MaskAsInts);
2197 }
2198
2199 /// Return true if this shuffle swaps the order of elements from exactly
2200 /// one source vector.
2201 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2202 /// TODO: Optionally allow length-changing shuffles.
2203 bool isReverse() const {
2204 return !changesLength() && isReverseMask(ShuffleMask);
2205 }
2206
2207 /// Return true if this shuffle mask chooses all elements with the same value
2208 /// as the first element of exactly one source vector.
2209 /// Example: <4,undef,undef,4>
2210 /// This assumes that vector operands are the same length as the mask.
2211 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2212 static bool isZeroEltSplatMask(const Constant *Mask) {
2213 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2214 SmallVector<int, 16> MaskAsInts;
2215 getShuffleMask(Mask, MaskAsInts);
2216 return isZeroEltSplatMask(MaskAsInts);
2217 }
2218
2219 /// Return true if all elements of this shuffle are the same value as the
2220 /// first element of exactly one source vector without changing the length
2221 /// of that vector.
2222 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2223 /// TODO: Optionally allow length-changing shuffles.
2224 /// TODO: Optionally allow splats from other elements.
2225 bool isZeroEltSplat() const {
2226 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2227 }
2228
2229 /// Return true if this shuffle mask is a transpose mask.
2230 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2231 /// even- or odd-numbered vector elements from two n-dimensional source
2232 /// vectors and write each result into consecutive elements of an
2233 /// n-dimensional destination vector. Two shuffles are necessary to complete
2234 /// the transpose, one for the even elements and another for the odd elements.
2235 /// This description closely follows how the TRN1 and TRN2 AArch64
2236 /// instructions operate.
2237 ///
2238 /// For example, a simple 2x2 matrix can be transposed with:
2239 ///
2240 /// ; Original matrix
2241 /// m0 = < a, b >
2242 /// m1 = < c, d >
2243 ///
2244 /// ; Transposed matrix
2245 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2246 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2247 ///
2248 /// For matrices having greater than n columns, the resulting nx2 transposed
2249 /// matrix is stored in two result vectors such that one vector contains
2250 /// interleaved elements from all the even-numbered rows and the other vector
2251 /// contains interleaved elements from all the odd-numbered rows. For example,
2252 /// a 2x4 matrix can be transposed with:
2253 ///
2254 /// ; Original matrix
2255 /// m0 = < a, b, c, d >
2256 /// m1 = < e, f, g, h >
2257 ///
2258 /// ; Transposed matrix
2259 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2260 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2261 static bool isTransposeMask(ArrayRef<int> Mask);
2262 static bool isTransposeMask(const Constant *Mask) {
2263 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2264 SmallVector<int, 16> MaskAsInts;
2265 getShuffleMask(Mask, MaskAsInts);
2266 return isTransposeMask(MaskAsInts);
2267 }
2268
2269 /// Return true if this shuffle transposes the elements of its inputs without
2270 /// changing the length of the vectors. This operation may also be known as a
2271 /// merge or interleave. See the description for isTransposeMask() for the
2272 /// exact specification.
2273 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2274 bool isTranspose() const {
2275 return !changesLength() && isTransposeMask(ShuffleMask);
2276 }
2277
2278 /// Return true if this shuffle mask is an extract subvector mask.
2279 /// A valid extract subvector mask returns a smaller vector from a single
2280 /// source operand. The base extraction index is returned as well.
2281 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2282 int &Index);
2283 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2284 int &Index) {
2285 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2286 // Not possible to express a shuffle mask for a scalable vector for this
2287 // case.
2288 if (isa<ScalableVectorType>(Mask->getType()))
2289 return false;
2290 SmallVector<int, 16> MaskAsInts;
2291 getShuffleMask(Mask, MaskAsInts);
2292 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2293 }
2294
2295 /// Return true if this shuffle mask is an extract subvector mask.
2296 bool isExtractSubvectorMask(int &Index) const {
2297 // Not possible to express a shuffle mask for a scalable vector for this
2298 // case.
2299 if (isa<ScalableVectorType>(getType()))
2300 return false;
2301
2302 int NumSrcElts =
2303 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2304 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2305 }
2306
2307 /// Return true if this shuffle mask is an insert subvector mask.
2308 /// A valid insert subvector mask inserts the lowest elements of a second
2309 /// source operand into an in-place first source operand operand.
2310 /// Both the sub vector width and the insertion index is returned.
2311 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2312 int &NumSubElts, int &Index);
2313 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2314 int &NumSubElts, int &Index) {
2315 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast<void> (0));
2316 // Not possible to express a shuffle mask for a scalable vector for this
2317 // case.
2318 if (isa<ScalableVectorType>(Mask->getType()))
2319 return false;
2320 SmallVector<int, 16> MaskAsInts;
2321 getShuffleMask(Mask, MaskAsInts);
2322 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2323 }
2324
2325 /// Return true if this shuffle mask is an insert subvector mask.
2326 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2327 // Not possible to express a shuffle mask for a scalable vector for this
2328 // case.
2329 if (isa<ScalableVectorType>(getType()))
2330 return false;
2331
2332 int NumSrcElts =
2333 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2334 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2335 }
2336
2337 /// Change values in a shuffle permute mask assuming the two vector operands
2338 /// of length InVecNumElts have swapped position.
2339 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2340 unsigned InVecNumElts) {
2341 for (int &Idx : Mask) {
2342 if (Idx == -1)
2343 continue;
2344 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2345 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast<void> (0))
2346 "shufflevector mask index out of range")(static_cast<void> (0));
2347 }
2348 }
2349
2350 // Methods for support type inquiry through isa, cast, and dyn_cast:
2351 static bool classof(const Instruction *I) {
2352 return I->getOpcode() == Instruction::ShuffleVector;
2353 }
2354 static bool classof(const Value *V) {
2355 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2356 }
2357};
2358
2359template <>
2360struct OperandTraits<ShuffleVectorInst>
2361 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2362
2363DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2364
2365//===----------------------------------------------------------------------===//
2366// ExtractValueInst Class
2367//===----------------------------------------------------------------------===//
2368
2369/// This instruction extracts a struct member or array
2370/// element value from an aggregate value.
2371///
2372class ExtractValueInst : public UnaryInstruction {
2373 SmallVector<unsigned, 4> Indices;
2374
2375 ExtractValueInst(const ExtractValueInst &EVI);
2376
2377 /// Constructors - Create a extractvalue instruction with a base aggregate
2378 /// value and a list of indices. The first ctor can optionally insert before
2379 /// an existing instruction, the second appends the new instruction to the
2380 /// specified BasicBlock.
2381 inline ExtractValueInst(Value *Agg,
2382 ArrayRef<unsigned> Idxs,
2383 const Twine &NameStr,
2384 Instruction *InsertBefore);
2385 inline ExtractValueInst(Value *Agg,
2386 ArrayRef<unsigned> Idxs,
2387 const Twine &NameStr, BasicBlock *InsertAtEnd);
2388
2389 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2390
2391protected:
2392 // Note: Instruction needs to be a friend here to call cloneImpl.
2393 friend class Instruction;
2394
2395 ExtractValueInst *cloneImpl() const;
2396
2397public:
2398 static ExtractValueInst *Create(Value *Agg,
2399 ArrayRef<unsigned> Idxs,
2400 const Twine &NameStr = "",
2401 Instruction *InsertBefore = nullptr) {
2402 return new
2403 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2404 }
2405
2406 static ExtractValueInst *Create(Value *Agg,
2407 ArrayRef<unsigned> Idxs,
2408 const Twine &NameStr,
2409 BasicBlock *InsertAtEnd) {
2410 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2411 }
2412
2413 /// Returns the type of the element that would be extracted
2414 /// with an extractvalue instruction with the specified parameters.
2415 ///
2416 /// Null is returned if the indices are invalid for the specified type.
2417 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2418
2419 using idx_iterator = const unsigned*;
2420
2421 inline idx_iterator idx_begin() const { return Indices.begin(); }
2422 inline idx_iterator idx_end() const { return Indices.end(); }
2423 inline iterator_range<idx_iterator> indices() const {
2424 return make_range(idx_begin(), idx_end());
2425 }
2426
2427 Value *getAggregateOperand() {
2428 return getOperand(0);
2429 }
2430 const Value *getAggregateOperand() const {
2431 return getOperand(0);
2432 }
2433 static unsigned getAggregateOperandIndex() {
2434 return 0U; // get index for modifying correct operand
2435 }
2436
2437 ArrayRef<unsigned> getIndices() const {
2438 return Indices;
2439 }
2440
2441 unsigned getNumIndices() const {
2442 return (unsigned)Indices.size();
2443 }
2444
2445 bool hasIndices() const {
2446 return true;
2447 }
2448
2449 // Methods for support type inquiry through isa, cast, and dyn_cast:
2450 static bool classof(const Instruction *I) {
2451 return I->getOpcode() == Instruction::ExtractValue;
2452 }
2453 static bool classof(const Value *V) {
2454 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2455 }
2456};
2457
2458ExtractValueInst::ExtractValueInst(Value *Agg,
2459 ArrayRef<unsigned> Idxs,
2460 const Twine &NameStr,
2461 Instruction *InsertBefore)
2462 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2463 ExtractValue, Agg, InsertBefore) {
2464 init(Idxs, NameStr);
2465}
2466
2467ExtractValueInst::ExtractValueInst(Value *Agg,
2468 ArrayRef<unsigned> Idxs,
2469 const Twine &NameStr,
2470 BasicBlock *InsertAtEnd)
2471 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2472 ExtractValue, Agg, InsertAtEnd) {
2473 init(Idxs, NameStr);
2474}
2475
2476//===----------------------------------------------------------------------===//
2477// InsertValueInst Class
2478//===----------------------------------------------------------------------===//
2479
2480/// This instruction inserts a struct field of array element
2481/// value into an aggregate value.
2482///
2483class InsertValueInst : public Instruction {
2484 SmallVector<unsigned, 4> Indices;
2485
2486 InsertValueInst(const InsertValueInst &IVI);
2487
2488 /// Constructors - Create a insertvalue instruction with a base aggregate
2489 /// value, a value to insert, and a list of indices. The first ctor can
2490 /// optionally insert before an existing instruction, the second appends
2491 /// the new instruction to the specified BasicBlock.
2492 inline InsertValueInst(Value *Agg, Value *Val,
2493 ArrayRef<unsigned> Idxs,
2494 const Twine &NameStr,
2495 Instruction *InsertBefore);
2496 inline InsertValueInst(Value *Agg, Value *Val,
2497 ArrayRef<unsigned> Idxs,
2498 const Twine &NameStr, BasicBlock *InsertAtEnd);
2499
2500 /// Constructors - These two constructors are convenience methods because one
2501 /// and two index insertvalue instructions are so common.
2502 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2503 const Twine &NameStr = "",
2504 Instruction *InsertBefore = nullptr);
2505 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2506 BasicBlock *InsertAtEnd);
2507
2508 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2509 const Twine &NameStr);
2510
2511protected:
2512 // Note: Instruction needs to be a friend here to call cloneImpl.
2513 friend class Instruction;
2514
2515 InsertValueInst *cloneImpl() const;
2516
2517public:
2518 // allocate space for exactly two operands
2519 void *operator new(size_t S) { return User::operator new(S, 2); }
2520 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2521
2522 static InsertValueInst *Create(Value *Agg, Value *Val,
2523 ArrayRef<unsigned> Idxs,
2524 const Twine &NameStr = "",
2525 Instruction *InsertBefore = nullptr) {
2526 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2527 }
2528
2529 static InsertValueInst *Create(Value *Agg, Value *Val,
2530 ArrayRef<unsigned> Idxs,
2531 const Twine &NameStr,
2532 BasicBlock *InsertAtEnd) {
2533 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2534 }
2535
2536 /// Transparently provide more efficient getOperand methods.
2537 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2538
2539 using idx_iterator = const unsigned*;
2540
2541 inline idx_iterator idx_begin() const { return Indices.begin(); }
2542 inline idx_iterator idx_end() const { return Indices.end(); }
2543 inline iterator_range<idx_iterator> indices() const {
2544 return make_range(idx_begin(), idx_end());
2545 }
2546
2547 Value *getAggregateOperand() {
2548 return getOperand(0);
2549 }
2550 const Value *getAggregateOperand() const {
2551 return getOperand(0);
2552 }
2553 static unsigned getAggregateOperandIndex() {
2554 return 0U; // get index for modifying correct operand
2555 }
2556
2557 Value *getInsertedValueOperand() {
2558 return getOperand(1);
2559 }
2560 const Value *getInsertedValueOperand() const {
2561 return getOperand(1);
2562 }
2563 static unsigned getInsertedValueOperandIndex() {
2564 return 1U; // get index for modifying correct operand
2565 }
2566
2567 ArrayRef<unsigned> getIndices() const {
2568 return Indices;
2569 }
2570
2571 unsigned getNumIndices() const {
2572 return (unsigned)Indices.size();
2573 }
2574
2575 bool hasIndices() const {
2576 return true;
2577 }
2578
2579 // Methods for support type inquiry through isa, cast, and dyn_cast:
2580 static bool classof(const Instruction *I) {
2581 return I->getOpcode() == Instruction::InsertValue;
2582 }
2583 static bool classof(const Value *V) {
2584 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2585 }
2586};
2587
2588template <>
2589struct OperandTraits<InsertValueInst> :
2590 public FixedNumOperandTraits<InsertValueInst, 2> {
2591};
2592
2593InsertValueInst::InsertValueInst(Value *Agg,
2594 Value *Val,
2595 ArrayRef<unsigned> Idxs,
2596 const Twine &NameStr,
2597 Instruction *InsertBefore)
2598 : Instruction(Agg->getType(), InsertValue,
2599 OperandTraits<InsertValueInst>::op_begin(this),
2600 2, InsertBefore) {
2601 init(Agg, Val, Idxs, NameStr);
2602}
2603
2604InsertValueInst::InsertValueInst(Value *Agg,
2605 Value *Val,
2606 ArrayRef<unsigned> Idxs,
2607 const Twine &NameStr,
2608 BasicBlock *InsertAtEnd)
2609 : Instruction(Agg->getType(), InsertValue,
2610 OperandTraits<InsertValueInst>::op_begin(this),
2611 2, InsertAtEnd) {
2612 init(Agg, Val, Idxs, NameStr);
2613}
2614
2615DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
InsertValueInst>::op_begin(const_cast<InsertValueInst*>
(this))[i_nocapture].get()); } void InsertValueInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<InsertValueInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned InsertValueInst
::getNumOperands() const { return OperandTraits<InsertValueInst
>::operands(this); } template <int Idx_nocapture> Use
&InsertValueInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
InsertValueInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2616
2617//===----------------------------------------------------------------------===//
2618// PHINode Class
2619//===----------------------------------------------------------------------===//
2620
2621// PHINode - The PHINode class is used to represent the magical mystical PHI
2622// node, that can not exist in nature, but can be synthesized in a computer
2623// scientist's overactive imagination.
2624//
2625class PHINode : public Instruction {
2626 /// The number of operands actually allocated. NumOperands is
2627 /// the number actually in use.
2628 unsigned ReservedSpace;
2629
2630 PHINode(const PHINode &PN);
2631
2632 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2633 const Twine &NameStr = "",
2634 Instruction *InsertBefore = nullptr)
2635 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2636 ReservedSpace(NumReservedValues) {
2637 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast<void> (0));
2638 setName(NameStr);
2639 allocHungoffUses(ReservedSpace);
2640 }
2641
2642 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2643 BasicBlock *InsertAtEnd)
2644 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2645 ReservedSpace(NumReservedValues) {
2646 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast<void> (0));
2647 setName(NameStr);
2648 allocHungoffUses(ReservedSpace);
2649 }
2650
2651protected:
2652 // Note: Instruction needs to be a friend here to call cloneImpl.
2653 friend class Instruction;
2654
2655 PHINode *cloneImpl() const;
2656
2657 // allocHungoffUses - this is more complicated than the generic
2658 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2659 // values and pointers to the incoming blocks, all in one allocation.
2660 void allocHungoffUses(unsigned N) {
2661 User::allocHungoffUses(N, /* IsPhi */ true);
2662 }
2663
2664public:
2665 /// Constructors - NumReservedValues is a hint for the number of incoming
2666 /// edges that this phi node will have (use 0 if you really have no idea).
2667 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2668 const Twine &NameStr = "",
2669 Instruction *InsertBefore = nullptr) {
2670 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2671 }
2672
2673 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2674 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2675 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2676 }
2677
2678 /// Provide fast operand accessors
2679 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2680
2681 // Block iterator interface. This provides access to the list of incoming
2682 // basic blocks, which parallels the list of incoming values.
2683
2684 using block_iterator = BasicBlock **;
2685 using const_block_iterator = BasicBlock * const *;
2686
2687 block_iterator block_begin() {
2688 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2689 }
2690
2691 const_block_iterator block_begin() const {
2692 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2693 }
2694
2695 block_iterator block_end() {
2696 return block_begin() + getNumOperands();
2697 }
2698
2699 const_block_iterator block_end() const {
2700 return block_begin() + getNumOperands();
2701 }
2702
2703 iterator_range<block_iterator> blocks() {
2704 return make_range(block_begin(), block_end());
2705 }
2706
2707 iterator_range<const_block_iterator> blocks() const {
2708 return make_range(block_begin(), block_end());
2709 }
2710
2711 op_range incoming_values() { return operands(); }
2712
2713 const_op_range incoming_values() const { return operands(); }
2714
2715 /// Return the number of incoming edges
2716 ///
2717 unsigned getNumIncomingValues() const { return getNumOperands(); }
2718
2719 /// Return incoming value number x
2720 ///
2721 Value *getIncomingValue(unsigned i) const {
2722 return getOperand(i);
2723 }
2724 void setIncomingValue(unsigned i, Value *V) {
2725 assert(V && "PHI node got a null value!")(static_cast<void> (0));
2726 assert(getType() == V->getType() &&(static_cast<void> (0))
2727 "All operands to PHI node must be the same type as the PHI node!")(static_cast<void> (0));
2728 setOperand(i, V);
2729 }
2730
2731 static unsigned getOperandNumForIncomingValue(unsigned i) {
2732 return i;
2733 }
2734
2735 static unsigned getIncomingValueNumForOperand(unsigned i) {
2736 return i;
2737 }
2738
2739 /// Return incoming basic block number @p i.
2740 ///
2741 BasicBlock *getIncomingBlock(unsigned i) const {
2742 return block_begin()[i];
2743 }
2744
2745 /// Return incoming basic block corresponding
2746 /// to an operand of the PHI.
2747 ///
2748 BasicBlock *getIncomingBlock(const Use &U) const {
2749 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast<void> (0));
2750 return getIncomingBlock(unsigned(&U - op_begin()));
2751 }
2752
2753 /// Return incoming basic block corresponding
2754 /// to value use iterator.
2755 ///
2756 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2757 return getIncomingBlock(I.getUse());
2758 }
2759
2760 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2761 assert(BB && "PHI node got a null basic block!")(static_cast<void> (0));
2762 block_begin()[i] = BB;
2763 }
2764
2765 /// Replace every incoming basic block \p Old to basic block \p New.
2766 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2767 assert(New && Old && "PHI node got a null basic block!")(static_cast<void> (0));
2768 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2769 if (getIncomingBlock(Op) == Old)
2770 setIncomingBlock(Op, New);
2771 }
2772
2773 /// Add an incoming value to the end of the PHI list
2774 ///
2775 void addIncoming(Value *V, BasicBlock *BB) {
2776 if (getNumOperands() == ReservedSpace)
2777 growOperands(); // Get more space!
2778 // Initialize some new operands.
2779 setNumHungOffUseOperands(getNumOperands() + 1);
2780 setIncomingValue(getNumOperands() - 1, V);
2781 setIncomingBlock(getNumOperands() - 1, BB);
2782 }
2783
2784 /// Remove an incoming value. This is useful if a
2785 /// predecessor basic block is deleted. The value removed is returned.
2786 ///
2787 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2788 /// is true), the PHI node is destroyed and any uses of it are replaced with
2789 /// dummy values. The only time there should be zero incoming values to a PHI
2790 /// node is when the block is dead, so this strategy is sound.
2791 ///
2792 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2793
2794 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2795 int Idx = getBasicBlockIndex(BB);
2796 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast<void> (0));
2797 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2798 }
2799
2800 /// Return the first index of the specified basic
2801 /// block in the value list for this PHI. Returns -1 if no instance.
2802 ///
2803 int getBasicBlockIndex(const BasicBlock *BB) const {
2804 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2805 if (block_begin()[i] == BB)
2806 return i;
2807 return -1;
2808 }
2809
2810 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2811 int Idx = getBasicBlockIndex(BB);
2812 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast<void> (0));
2813 return getIncomingValue(Idx);
2814 }
2815
2816 /// Set every incoming value(s) for block \p BB to \p V.
2817 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2818 assert(BB && "PHI node got a null basic block!")(static_cast<void> (0));
2819 bool Found = false;
2820 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2821 if (getIncomingBlock(Op) == BB) {
2822 Found = true;
2823 setIncomingValue(Op, V);
2824 }
2825 (void)Found;
2826 assert(Found && "Invalid basic block argument to set!")(static_cast<void> (0));
2827 }
2828
2829 /// If the specified PHI node always merges together the
2830 /// same value, return the value, otherwise return null.
2831 Value *hasConstantValue() const;
2832
2833 /// Whether the specified PHI node always merges
2834 /// together the same value, assuming undefs are equal to a unique
2835 /// non-undef value.
2836 bool hasConstantOrUndefValue() const;
2837
2838 /// If the PHI node is complete which means all of its parent's predecessors
2839 /// have incoming value in this PHI, return true, otherwise return false.
2840 bool isComplete() const {
2841 return llvm::all_of(predecessors(getParent()),
2842 [this](const BasicBlock *Pred) {
2843 return getBasicBlockIndex(Pred) >= 0;
2844 });
2845 }
2846
2847 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2848 static bool classof(const Instruction *I) {
2849 return I->getOpcode() == Instruction::PHI;
2850 }
2851 static bool classof(const Value *V) {
2852 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2853 }
2854
2855private:
2856 void growOperands();
2857};
2858
2859template <>
2860struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2861};
2862
2863DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast<void> (0)); return cast_or_null<
Value>( OperandTraits<PHINode>::op_begin(const_cast<
PHINode*>(this))[i_nocapture].get()); } void PHINode::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<PHINode>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned PHINode::getNumOperands() const
{ return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2864
2865//===----------------------------------------------------------------------===//
2866// LandingPadInst Class
2867//===----------------------------------------------------------------------===//
2868
2869//===---------------------------------------------------------------------------
2870/// The landingpad instruction holds all of the information
2871/// necessary to generate correct exception handling. The landingpad instruction
2872/// cannot be moved from the top of a landing pad block, which itself is
2873/// accessible only from the 'unwind' edge of an invoke. This uses the
2874/// SubclassData field in Value to store whether or not the landingpad is a
2875/// cleanup.
2876///
2877class LandingPadInst : public Instruction {
2878 using CleanupField = BoolBitfieldElementT<0>;
2879
2880 /// The number of operands actually allocated. NumOperands is
2881 /// the number actually in use.
2882 unsigned ReservedSpace;
2883
2884 LandingPadInst(const LandingPadInst &LP);
2885
2886public:
2887 enum ClauseType { Catch, Filter };
2888
2889private:
2890 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2891 const Twine &NameStr, Instruction *InsertBefore);
2892 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2893 const Twine &NameStr, BasicBlock *InsertAtEnd);
2894
2895 // Allocate space for exactly zero operands.
2896 void *operator new(size_t S) { return User::operator new(S); }
2897
2898 void growOperands(unsigned Size);
2899 void init(unsigned NumReservedValues, const Twine &NameStr);
2900
2901protected:
2902 // Note: Instruction needs to be a friend here to call cloneImpl.
2903 friend class Instruction;
2904
2905 LandingPadInst *cloneImpl() const;
2906
2907public:
2908 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2909
2910 /// Constructors - NumReservedClauses is a hint for the number of incoming
2911 /// clauses that this landingpad will have (use 0 if you really have no idea).
2912 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2913 const Twine &NameStr = "",
2914 Instruction *InsertBefore = nullptr);
2915 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2916 const Twine &NameStr, BasicBlock *InsertAtEnd);
2917
2918 /// Provide fast operand accessors
2919 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2920
2921 /// Return 'true' if this landingpad instruction is a
2922 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2923 /// doesn't catch the exception.
2924 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2925
2926 /// Indicate that this landingpad instruction is a cleanup.
2927 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2928
2929 /// Add a catch or filter clause to the landing pad.
2930 void addClause(Constant *ClauseVal);
2931
2932 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2933 /// determine what type of clause this is.
2934 Constant *getClause(unsigned Idx) const {
2935 return cast<Constant>(getOperandList()[Idx]);
2936 }
2937
2938 /// Return 'true' if the clause and index Idx is a catch clause.
2939 bool isCatch(unsigned Idx) const {
2940 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2941 }
2942
2943 /// Return 'true' if the clause and index Idx is a filter clause.
2944 bool isFilter(unsigned Idx) const {
2945 return isa<ArrayType>(getOperandList()[Idx]->getType());
2946 }
2947
2948 /// Get the number of clauses for this landing pad.
2949 unsigned getNumClauses() const { return getNumOperands(); }
2950
2951 /// Grow the size of the operand list to accommodate the new
2952 /// number of clauses.
2953 void reserveClauses(unsigned Size) { growOperands(Size); }
2954
2955 // Methods for support type inquiry through isa, cast, and dyn_cast:
2956 static bool classof(const Instruction *I) {
2957 return I->getOpcode() == Instruction::LandingPad;
2958 }
2959 static bool classof(const Value *V) {
2960 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2961 }
2962};
2963
2964template <>
2965struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2966};
2967
2968DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
LandingPadInst>::op_begin(const_cast<LandingPadInst*>
(this))[i_nocapture].get()); } void LandingPadInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<LandingPadInst>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned LandingPadInst
::getNumOperands() const { return OperandTraits<LandingPadInst
>::operands(this); } template <int Idx_nocapture> Use
&LandingPadInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
LandingPadInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2969
2970//===----------------------------------------------------------------------===//
2971// ReturnInst Class
2972//===----------------------------------------------------------------------===//
2973
2974//===---------------------------------------------------------------------------
2975/// Return a value (possibly void), from a function. Execution
2976/// does not continue in this function any longer.
2977///
2978class ReturnInst : public Instruction {
2979 ReturnInst(const ReturnInst &RI);
2980
2981private:
2982 // ReturnInst constructors:
2983 // ReturnInst() - 'ret void' instruction
2984 // ReturnInst( null) - 'ret void' instruction
2985 // ReturnInst(Value* X) - 'ret X' instruction
2986 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2987 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2988 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2989 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2990 //
2991 // NOTE: If the Value* passed is of type void then the constructor behaves as
2992 // if it was passed NULL.
2993 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2994 Instruction *InsertBefore = nullptr);
2995 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2996 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2997
2998protected:
2999 // Note: Instruction needs to be a friend here to call cloneImpl.
3000 friend class Instruction;
3001
3002 ReturnInst *cloneImpl() const;
3003
3004public:
3005 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3006 Instruction *InsertBefore = nullptr) {
3007 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3008 }
3009
3010 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3011 BasicBlock *InsertAtEnd) {
3012 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3013 }
3014
3015 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3016 return new(0) ReturnInst(C, InsertAtEnd);
3017 }
3018
3019 /// Provide fast operand accessors
3020 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3021
3022 /// Convenience accessor. Returns null if there is no return value.
3023 Value *getReturnValue() const {
3024 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3025 }
3026
3027 unsigned getNumSuccessors() const { return 0; }
3028
3029 // Methods for support type inquiry through isa, cast, and dyn_cast:
3030 static bool classof(const Instruction *I) {
3031 return (I->getOpcode() == Instruction::Ret);
3032 }
3033 static bool classof(const Value *V) {
3034 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3035 }
3036
3037private:
3038 BasicBlock *getSuccessor(unsigned idx) const {
3039 llvm_unreachable("ReturnInst has no successors!")__builtin_unreachable();
3040 }
3041
3042 void setSuccessor(unsigned idx, BasicBlock *B) {
3043 llvm_unreachable("ReturnInst has no successors!")__builtin_unreachable();
3044 }
3045};
3046
3047template <>
3048struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3049};
3050
3051DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
ReturnInst>::op_begin(const_cast<ReturnInst*>(this))
[i_nocapture].get()); } void ReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3052
3053//===----------------------------------------------------------------------===//
3054// BranchInst Class
3055//===----------------------------------------------------------------------===//
3056
3057//===---------------------------------------------------------------------------
3058/// Conditional or Unconditional Branch instruction.
3059///
3060class BranchInst : public Instruction {
3061 /// Ops list - Branches are strange. The operands are ordered:
3062 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3063 /// they don't have to check for cond/uncond branchness. These are mostly
3064 /// accessed relative from op_end().
3065 BranchInst(const BranchInst &BI);
3066 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3067 // BranchInst(BB *B) - 'br B'
3068 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3069 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3070 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3071 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3072 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3073 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3074 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3075 Instruction *InsertBefore = nullptr);
3076 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3077 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3078 BasicBlock *InsertAtEnd);
3079
3080 void AssertOK();
3081
3082protected:
3083 // Note: Instruction needs to be a friend here to call cloneImpl.
3084 friend class Instruction;
3085
3086 BranchInst *cloneImpl() const;
3087
3088public:
3089 /// Iterator type that casts an operand to a basic block.
3090 ///
3091 /// This only makes sense because the successors are stored as adjacent
3092 /// operands for branch instructions.
3093 struct succ_op_iterator
3094 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3095 std::random_access_iterator_tag, BasicBlock *,
3096 ptrdiff_t, BasicBlock *, BasicBlock *> {
3097 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3098
3099 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3100 BasicBlock *operator->() const { return operator*(); }
3101 };
3102
3103 /// The const version of `succ_op_iterator`.
3104 struct const_succ_op_iterator
3105 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3106 std::random_access_iterator_tag,
3107 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3108 const BasicBlock *> {
3109 explicit const_succ_op_iterator(const_value_op_iterator I)
3110 : iterator_adaptor_base(I) {}
3111
3112 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3113 const BasicBlock *operator->() const { return operator*(); }
3114 };
3115
3116 static BranchInst *Create(BasicBlock *IfTrue,
3117 Instruction *InsertBefore = nullptr) {
3118 return new(1) BranchInst(IfTrue, InsertBefore);
3119 }
3120
3121 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3122 Value *Cond, Instruction *InsertBefore = nullptr) {
3123 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3124 }
3125
3126 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3127 return new(1) BranchInst(IfTrue, InsertAtEnd);
3128 }
3129
3130 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3131 Value *Cond, BasicBlock *InsertAtEnd) {
3132 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3133 }
3134
3135 /// Transparently provide more efficient getOperand methods.
3136 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3137
3138 bool isUnconditional() const { return getNumOperands() == 1; }
9
Assuming the condition is false
10
Returning zero, which participates in a condition later
3139 bool isConditional() const { return getNumOperands() == 3; }
3140
3141 Value *getCondition() const {
3142 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast<void> (0));
3143 return Op<-3>();
3144 }
3145
3146 void setCondition(Value *V) {
3147 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast<void> (0));
3148 Op<-3>() = V;
3149 }
3150
3151 unsigned getNumSuccessors() const { return 1+isConditional(); }
3152
3153 BasicBlock *getSuccessor(unsigned i) const {
3154 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast<void> (0));
3155 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3156 }
3157
3158 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3159 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast<void> (0));
3160 *(&Op<-1>() - idx) = NewSucc;
3161 }
3162
3163 /// Swap the successors of this branch instruction.
3164 ///
3165 /// Swaps the successors of the branch instruction. This also swaps any
3166 /// branch weight metadata associated with the instruction so that it
3167 /// continues to map correctly to each operand.
3168 void swapSuccessors();
3169
3170 iterator_range<succ_op_iterator> successors() {
3171 return make_range(
3172 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3173 succ_op_iterator(value_op_end()));
3174 }
3175
3176 iterator_range<const_succ_op_iterator> successors() const {
3177 return make_range(const_succ_op_iterator(
3178 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3179 const_succ_op_iterator(value_op_end()));
3180 }
3181
3182 // Methods for support type inquiry through isa, cast, and dyn_cast:
3183 static bool classof(const Instruction *I) {
3184 return (I->getOpcode() == Instruction::Br);
3185 }
3186 static bool classof(const Value *V) {
3187 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3188 }
3189};
3190
3191template <>
3192struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3193};
3194
3195DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
BranchInst>::op_begin(const_cast<BranchInst*>(this))
[i_nocapture].get()); } void BranchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3196
3197//===----------------------------------------------------------------------===//
3198// SwitchInst Class
3199//===----------------------------------------------------------------------===//
3200
3201//===---------------------------------------------------------------------------
3202/// Multiway switch
3203///
3204class SwitchInst : public Instruction {
3205 unsigned ReservedSpace;
3206
3207 // Operand[0] = Value to switch on
3208 // Operand[1] = Default basic block destination
3209 // Operand[2n ] = Value to match
3210 // Operand[2n+1] = BasicBlock to go to on match
3211 SwitchInst(const SwitchInst &SI);
3212
3213 /// Create a new switch instruction, specifying a value to switch on and a
3214 /// default destination. The number of additional cases can be specified here
3215 /// to make memory allocation more efficient. This constructor can also
3216 /// auto-insert before another instruction.
3217 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3218 Instruction *InsertBefore);
3219
3220 /// Create a new switch instruction, specifying a value to switch on and a
3221 /// default destination. The number of additional cases can be specified here
3222 /// to make memory allocation more efficient. This constructor also
3223 /// auto-inserts at the end of the specified BasicBlock.
3224 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3225 BasicBlock *InsertAtEnd);
3226
3227 // allocate space for exactly zero operands
3228 void *operator new(size_t S) { return User::operator new(S); }
3229
3230 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3231 void growOperands();
3232
3233protected:
3234 // Note: Instruction needs to be a friend here to call cloneImpl.
3235 friend class Instruction;
3236
3237 SwitchInst *cloneImpl() const;
3238
3239public:
3240 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3241
3242 // -2
3243 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3244
3245 template <typename CaseHandleT> class CaseIteratorImpl;
3246
3247 /// A handle to a particular switch case. It exposes a convenient interface
3248 /// to both the case value and the successor block.
3249 ///
3250 /// We define this as a template and instantiate it to form both a const and
3251 /// non-const handle.
3252 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3253 class CaseHandleImpl {
3254 // Directly befriend both const and non-const iterators.
3255 friend class SwitchInst::CaseIteratorImpl<
3256 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3257
3258 protected:
3259 // Expose the switch type we're parameterized with to the iterator.
3260 using SwitchInstType = SwitchInstT;
3261
3262 SwitchInstT *SI;
3263 ptrdiff_t Index;
3264
3265 CaseHandleImpl() = default;
3266 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3267
3268 public:
3269 /// Resolves case value for current case.
3270 ConstantIntT *getCaseValue() const {
3271 assert((unsigned)Index < SI->getNumCases() &&(static_cast<void> (0))
3272 "Index out the number of cases.")(static_cast<void> (0));
3273 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3274 }
3275
3276 /// Resolves successor for current case.
3277 BasicBlockT *getCaseSuccessor() const {
3278 assert(((unsigned)Index < SI->getNumCases() ||(static_cast<void> (0))
3279 (unsigned)Index == DefaultPseudoIndex) &&(static_cast<void> (0))
3280 "Index out the number of cases.")(static_cast<void> (0));
3281 return SI->getSuccessor(getSuccessorIndex());
3282 }
3283
3284 /// Returns number of current case.
3285 unsigned getCaseIndex() const { return Index; }
3286
3287 /// Returns successor index for current case successor.
3288 unsigned getSuccessorIndex() const {
3289 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast<void> (0))
3290 (unsigned)Index < SI->getNumCases()) &&(static_cast<void> (0))
3291 "Index out the number of cases.")(static_cast<void> (0));
3292 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3293 }
3294
3295 bool operator==(const CaseHandleImpl &RHS) const {
3296 assert(SI == RHS.SI && "Incompatible operators.")(static_cast<void> (0));
3297 return Index == RHS.Index;
3298 }
3299 };
3300
3301 using ConstCaseHandle =
3302 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3303
3304 class CaseHandle
3305 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3306 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3307
3308 public:
3309 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3310
3311 /// Sets the new value for current case.
3312 void setValue(ConstantInt *V) {
3313 assert((unsigned)Index < SI->getNumCases() &&(static_cast<void> (0))
3314 "Index out the number of cases.")(static_cast<void> (0));
3315 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3316 }
3317
3318 /// Sets the new successor for current case.
3319 void setSuccessor(BasicBlock *S) {
3320 SI->setSuccessor(getSuccessorIndex(), S);
3321 }
3322 };
3323
3324 template <typename CaseHandleT>
3325 class CaseIteratorImpl
3326 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3327 std::random_access_iterator_tag,
3328 CaseHandleT> {
3329 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3330
3331 CaseHandleT Case;
3332
3333 public:
3334 /// Default constructed iterator is in an invalid state until assigned to
3335 /// a case for a particular switch.
3336 CaseIteratorImpl() = default;
3337
3338 /// Initializes case iterator for given SwitchInst and for given
3339 /// case number.
3340 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3341
3342 /// Initializes case iterator for given SwitchInst and for given
3343 /// successor index.
3344 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3345 unsigned SuccessorIndex) {
3346 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast<void> (0))
3347 "Successor index # out of range!")(static_cast<void> (0));
3348 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3349 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3350 }
3351
3352 /// Support converting to the const variant. This will be a no-op for const
3353 /// variant.
3354 operator CaseIteratorImpl<ConstCaseHandle>() const {
3355 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3356 }
3357
3358 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3359 // Check index correctness after addition.
3360 // Note: Index == getNumCases() means end().
3361 assert(Case.Index + N >= 0 &&(static_cast<void> (0))
3362 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast<void> (0))
3363 "Case.Index out the number of cases.")(static_cast<void> (0));
3364 Case.Index += N;
3365 return *this;
3366 }
3367 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3368 // Check index correctness after subtraction.
3369 // Note: Case.Index == getNumCases() means end().
3370 assert(Case.Index - N >= 0 &&(static_cast<void> (0))
3371 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast<void> (0))
3372 "Case.Index out the number of cases.")(static_cast<void> (0));
3373 Case.Index -= N;
3374 return *this;
3375 }
3376 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3377 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast<void> (0));
3378 return Case.Index - RHS.Case.Index;
3379 }
3380 bool operator==(const CaseIteratorImpl &RHS) const {
3381 return Case == RHS.Case;
3382 }
3383 bool operator<(const CaseIteratorImpl &RHS) const {
3384 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast<void> (0));
3385 return Case.Index < RHS.Case.Index;
3386 }
3387 CaseHandleT &operator*() { return Case; }
3388 const CaseHandleT &operator*() const { return Case; }
3389 };
3390
3391 using CaseIt = CaseIteratorImpl<CaseHandle>;
3392 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3393
3394 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3395 unsigned NumCases,
3396 Instruction *InsertBefore = nullptr) {
3397 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3398 }
3399
3400 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3401 unsigned NumCases, BasicBlock *InsertAtEnd) {
3402 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3403 }
3404
3405 /// Provide fast operand accessors
3406 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3407
3408 // Accessor Methods for Switch stmt
3409 Value *getCondition() const { return getOperand(0); }
3410 void setCondition(Value *V) { setOperand(0, V); }
3411
3412 BasicBlock *getDefaultDest() const {
3413 return cast<BasicBlock>(getOperand(1));
3414 }
3415
3416 void setDefaultDest(BasicBlock *DefaultCase) {
3417 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3418 }
3419
3420 /// Return the number of 'cases' in this switch instruction, excluding the
3421 /// default case.
3422 unsigned getNumCases() const {
3423 return getNumOperands()/2 - 1;
3424 }
3425
3426 /// Returns a read/write iterator that points to the first case in the
3427 /// SwitchInst.
3428 CaseIt case_begin() {
3429 return CaseIt(this, 0);
3430 }
3431
3432 /// Returns a read-only iterator that points to the first case in the
3433 /// SwitchInst.
3434 ConstCaseIt case_begin() const {
3435 return ConstCaseIt(this, 0);
3436 }
3437
3438 /// Returns a read/write iterator that points one past the last in the
3439 /// SwitchInst.
3440 CaseIt case_end() {
3441 return CaseIt(this, getNumCases());
3442 }
3443
3444 /// Returns a read-only iterator that points one past the last in the
3445 /// SwitchInst.
3446 ConstCaseIt case_end() const {
3447 return ConstCaseIt(this, getNumCases());
3448 }
3449
3450 /// Iteration adapter for range-for loops.
3451 iterator_range<CaseIt> cases() {
3452 return make_range(case_begin(), case_end());
3453 }
3454
3455 /// Constant iteration adapter for range-for loops.
3456 iterator_range<ConstCaseIt> cases() const {
3457 return make_range(case_begin(), case_end());
3458 }
3459
3460 /// Returns an iterator that points to the default case.
3461 /// Note: this iterator allows to resolve successor only. Attempt
3462 /// to resolve case value causes an assertion.
3463 /// Also note, that increment and decrement also causes an assertion and
3464 /// makes iterator invalid.
3465 CaseIt case_default() {
3466 return CaseIt(this, DefaultPseudoIndex);
3467 }
3468 ConstCaseIt case_default() const {
3469 return ConstCaseIt(this, DefaultPseudoIndex);
3470 }
3471
3472 /// Search all of the case values for the specified constant. If it is
3473 /// explicitly handled, return the case iterator of it, otherwise return
3474 /// default case iterator to indicate that it is handled by the default
3475 /// handler.
3476 CaseIt findCaseValue(const ConstantInt *C) {
3477 CaseIt I = llvm::find_if(
3478 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3479 if (I != case_end())
3480 return I;
3481
3482 return case_default();
3483 }
3484 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3485 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3486 return Case.getCaseValue() == C;
3487 });
3488 if (I != case_end())
3489 return I;
3490
3491 return case_default();
3492 }
3493
3494 /// Finds the unique case value for a given successor. Returns null if the
3495 /// successor is not found, not unique, or is the default case.
3496 ConstantInt *findCaseDest(BasicBlock *BB) {
3497 if (BB == getDefaultDest())
3498 return nullptr;
3499
3500 ConstantInt *CI = nullptr;
3501 for (auto Case : cases()) {
3502 if (Case.getCaseSuccessor() != BB)
3503 continue;
3504
3505 if (CI)
3506 return nullptr; // Multiple cases lead to BB.
3507
3508 CI = Case.getCaseValue();
3509 }
3510
3511 return CI;
3512 }
3513
3514 /// Add an entry to the switch instruction.
3515 /// Note:
3516 /// This action invalidates case_end(). Old case_end() iterator will
3517 /// point to the added case.
3518 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3519
3520 /// This method removes the specified case and its successor from the switch
3521 /// instruction. Note that this operation may reorder the remaining cases at
3522 /// index idx and above.
3523 /// Note:
3524 /// This action invalidates iterators for all cases following the one removed,
3525 /// including the case_end() iterator. It returns an iterator for the next
3526 /// case.
3527 CaseIt removeCase(CaseIt I);
3528
3529 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3530 BasicBlock *getSuccessor(unsigned idx) const {
3531 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast<void> (0));
3532 return cast<BasicBlock>(getOperand(idx*2+1));
3533 }
3534 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3535 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast<void> (0));
3536 setOperand(idx * 2 + 1, NewSucc);
3537 }
3538
3539 // Methods for support type inquiry through isa, cast, and dyn_cast:
3540 static bool classof(const Instruction *I) {
3541 return I->getOpcode() == Instruction::Switch;
3542 }
3543 static bool classof(const Value *V) {
3544 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3545 }
3546};
3547
3548/// A wrapper class to simplify modification of SwitchInst cases along with
3549/// their prof branch_weights metadata.
3550class SwitchInstProfUpdateWrapper {
3551 SwitchInst &SI;
3552 Optional<SmallVector<uint32_t, 8> > Weights = None;
3553 bool Changed = false;
3554
3555protected:
3556 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3557
3558 MDNode *buildProfBranchWeightsMD();
3559
3560 void init();
3561
3562public:
3563 using CaseWeightOpt = Optional<uint32_t>;
3564 SwitchInst *operator->() { return &SI; }
3565 SwitchInst &operator*() { return SI; }
3566 operator SwitchInst *() { return &SI; }
3567
3568 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3569
3570 ~SwitchInstProfUpdateWrapper() {
3571 if (Changed)
3572 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3573 }
3574
3575 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3576 /// correspondent branch weight.
3577 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3578
3579 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3580 /// specified branch weight for the added case.
3581 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3582
3583 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3584 /// this object to not touch the underlying SwitchInst in destructor.
3585 SymbolTableList<Instruction>::iterator eraseFromParent();
3586
3587 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3588 CaseWeightOpt getSuccessorWeight(unsigned idx);
3589
3590 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3591};
3592
3593template <>
3594struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3595};
3596
3597DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
SwitchInst>::op_begin(const_cast<SwitchInst*>(this))
[i_nocapture].get()); } void SwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3598
3599//===----------------------------------------------------------------------===//
3600// IndirectBrInst Class
3601//===----------------------------------------------------------------------===//
3602
3603//===---------------------------------------------------------------------------
3604/// Indirect Branch Instruction.
3605///
3606class IndirectBrInst : public Instruction {
3607 unsigned ReservedSpace;
3608
3609 // Operand[0] = Address to jump to
3610 // Operand[n+1] = n-th destination
3611 IndirectBrInst(const IndirectBrInst &IBI);
3612
3613 /// Create a new indirectbr instruction, specifying an
3614 /// Address to jump to. The number of expected destinations can be specified
3615 /// here to make memory allocation more efficient. This constructor can also
3616 /// autoinsert before another instruction.
3617 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3618
3619 /// Create a new indirectbr instruction, specifying an
3620 /// Address to jump to. The number of expected destinations can be specified
3621 /// here to make memory allocation more efficient. This constructor also
3622 /// autoinserts at the end of the specified BasicBlock.
3623 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3624
3625 // allocate space for exactly zero operands
3626 void *operator new(size_t S) { return User::operator new(S); }
3627
3628 void init(Value *Address, unsigned NumDests);
3629 void growOperands();
3630
3631protected:
3632 // Note: Instruction needs to be a friend here to call cloneImpl.
3633 friend class Instruction;
3634
3635 IndirectBrInst *cloneImpl() const;
3636
3637public:
3638 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3639
3640 /// Iterator type that casts an operand to a basic block.
3641 ///
3642 /// This only makes sense because the successors are stored as adjacent
3643 /// operands for indirectbr instructions.
3644 struct succ_op_iterator
3645 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3646 std::random_access_iterator_tag, BasicBlock *,
3647 ptrdiff_t, BasicBlock *, BasicBlock *> {
3648 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3649
3650 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3651 BasicBlock *operator->() const { return operator*(); }
3652 };
3653
3654 /// The const version of `succ_op_iterator`.
3655 struct const_succ_op_iterator
3656 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3657 std::random_access_iterator_tag,
3658 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3659 const BasicBlock *> {
3660 explicit const_succ_op_iterator(const_value_op_iterator I)
3661 : iterator_adaptor_base(I) {}
3662
3663 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3664 const BasicBlock *operator->() const { return operator*(); }
3665 };
3666
3667 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3668 Instruction *InsertBefore = nullptr) {
3669 return new IndirectBrInst(Address, NumDests, InsertBefore);
3670 }
3671
3672 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3673 BasicBlock *InsertAtEnd) {
3674 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3675 }
3676
3677 /// Provide fast operand accessors.
3678 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3679
3680 // Accessor Methods for IndirectBrInst instruction.
3681 Value *getAddress() { return getOperand(0); }
3682 const Value *getAddress() const { return getOperand(0); }
3683 void setAddress(Value *V) { setOperand(0, V); }
3684
3685 /// return the number of possible destinations in this
3686 /// indirectbr instruction.
3687 unsigned getNumDestinations() const { return getNumOperands()-1; }
3688
3689 /// Return the specified destination.
3690 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3691 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3692
3693 /// Add a destination.
3694 ///
3695 void addDestination(BasicBlock *Dest);
3696
3697 /// This method removes the specified successor from the
3698 /// indirectbr instruction.
3699 void removeDestination(unsigned i);
3700
3701 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3702 BasicBlock *getSuccessor(unsigned i) const {
3703 return cast<BasicBlock>(getOperand(i+1));
3704 }
3705 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3706 setOperand(i + 1, NewSucc);
3707 }
3708
3709 iterator_range<succ_op_iterator> successors() {
3710 return make_range(succ_op_iterator(std::next(value_op_begin())),
3711 succ_op_iterator(value_op_end()));
3712 }
3713
3714 iterator_range<const_succ_op_iterator> successors() const {
3715 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3716 const_succ_op_iterator(value_op_end()));
3717 }
3718
3719 // Methods for support type inquiry through isa, cast, and dyn_cast:
3720 static bool classof(const Instruction *I) {
3721 return I->getOpcode() == Instruction::IndirectBr;
3722 }
3723 static bool classof(const Value *V) {
3724 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3725 }
3726};
3727
3728template <>
3729struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3730};
3731
3732DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
IndirectBrInst>::op_begin(const_cast<IndirectBrInst*>
(this))[i_nocapture].get()); } void IndirectBrInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<IndirectBrInst>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned IndirectBrInst
::getNumOperands() const { return OperandTraits<IndirectBrInst
>::operands(this); } template <int Idx_nocapture> Use
&IndirectBrInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
IndirectBrInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3733
3734//===----------------------------------------------------------------------===//
3735// InvokeInst Class
3736//===----------------------------------------------------------------------===//
3737
3738/// Invoke instruction. The SubclassData field is used to hold the
3739/// calling convention of the call.
3740///
3741class InvokeInst : public CallBase {
3742 /// The number of operands for this call beyond the called function,
3743 /// arguments, and operand bundles.
3744 static constexpr int NumExtraOperands = 2;
3745
3746 /// The index from the end of the operand array to the normal destination.
3747 static constexpr int NormalDestOpEndIdx = -3;
3748
3749 /// The index from the end of the operand array to the unwind destination.
3750 static constexpr int UnwindDestOpEndIdx = -2;
3751
3752 InvokeInst(const InvokeInst &BI);
3753
3754 /// Construct an InvokeInst given a range of arguments.
3755 ///
3756 /// Construct an InvokeInst from a range of arguments
3757 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3758 BasicBlock *IfException, ArrayRef<Value *> Args,
3759 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3760 const Twine &NameStr, Instruction *InsertBefore);
3761
3762 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3763 BasicBlock *IfException, ArrayRef<Value *> Args,
3764 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3765 const Twine &NameStr, BasicBlock *InsertAtEnd);
3766
3767 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3768 BasicBlock *IfException, ArrayRef<Value *> Args,
3769 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3770
3771 /// Compute the number of operands to allocate.
3772 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3773 // We need one operand for the called function, plus our extra operands and
3774 // the input operand counts provided.
3775 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3776 }
3777
3778protected:
3779 // Note: Instruction needs to be a friend here to call cloneImpl.
3780 friend class Instruction;
3781
3782 InvokeInst *cloneImpl() const;
3783
3784public:
3785 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3786 BasicBlock *IfException, ArrayRef<Value *> Args,
3787 const Twine &NameStr,
3788 Instruction *InsertBefore = nullptr) {
3789 int NumOperands = ComputeNumOperands(Args.size());
3790 return new (NumOperands)
3791 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3792 NameStr, InsertBefore);
3793 }
3794
3795 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3796 BasicBlock *IfException, ArrayRef<Value *> Args,
3797 ArrayRef<OperandBundleDef> Bundles = None,
3798 const Twine &NameStr = "",
3799 Instruction *InsertBefore = nullptr) {
3800 int NumOperands =
3801 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3802 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3803
3804 return new (NumOperands, DescriptorBytes)
3805 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3806 NameStr, InsertBefore);
3807 }
3808
3809 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3810 BasicBlock *IfException, ArrayRef<Value *> Args,
3811 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3812 int NumOperands = ComputeNumOperands(Args.size());
3813 return new (NumOperands)
3814 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3815 NameStr, InsertAtEnd);
3816 }
3817
3818 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3819 BasicBlock *IfException, ArrayRef<Value *> Args,
3820 ArrayRef<OperandBundleDef> Bundles,
3821 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3822 int NumOperands =
3823 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3824 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3825
3826 return new (NumOperands, DescriptorBytes)
3827 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3828 NameStr, InsertAtEnd);
3829 }
3830
3831 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3832 BasicBlock *IfException, ArrayRef<Value *> Args,
3833 const Twine &NameStr,
3834 Instruction *InsertBefore = nullptr) {
3835 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3836 IfException, Args, None, NameStr, InsertBefore);
3837 }
3838
3839 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3840 BasicBlock *IfException, ArrayRef<Value *> Args,
3841 ArrayRef<OperandBundleDef> Bundles = None,
3842 const Twine &NameStr = "",
3843 Instruction *InsertBefore = nullptr) {
3844 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3845 IfException, Args, Bundles, NameStr, InsertBefore);
3846 }
3847
3848 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3849 BasicBlock *IfException, ArrayRef<Value *> Args,
3850 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3851 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3852 IfException, Args, NameStr, InsertAtEnd);
3853 }
3854
3855 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3856 BasicBlock *IfException, ArrayRef<Value *> Args,
3857 ArrayRef<OperandBundleDef> Bundles,
3858 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3859 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3860 IfException, Args, Bundles, NameStr, InsertAtEnd);
3861 }
3862
3863 /// Create a clone of \p II with a different set of operand bundles and
3864 /// insert it before \p InsertPt.
3865 ///
3866 /// The returned invoke instruction is identical to \p II in every way except
3867 /// that the operand bundles for the new instruction are set to the operand
3868 /// bundles in \p Bundles.
3869 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3870 Instruction *InsertPt = nullptr);
3871
3872 // get*Dest - Return the destination basic blocks...
3873 BasicBlock *getNormalDest() const {
3874 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3875 }
3876 BasicBlock *getUnwindDest() const {
3877 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3878 }
3879 void setNormalDest(BasicBlock *B) {
3880 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3881 }
3882 void setUnwindDest(BasicBlock *B) {
3883 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3884 }
3885
3886 /// Get the landingpad instruction from the landing pad
3887 /// block (the unwind destination).
3888 LandingPadInst *getLandingPadInst() const;
3889
3890 BasicBlock *getSuccessor(unsigned i) const {
3891 assert(i < 2 && "Successor # out of range for invoke!")(static_cast<void> (0));
3892 return i == 0 ? getNormalDest() : getUnwindDest();
3893 }
3894
3895 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3896 assert(i < 2 && "Successor # out of range for invoke!")(static_cast<void> (0));
3897 if (i == 0)
3898 setNormalDest(NewSucc);
3899 else
3900 setUnwindDest(NewSucc);
3901 }
3902
3903 unsigned getNumSuccessors() const { return 2; }
3904
3905 // Methods for support type inquiry through isa, cast, and dyn_cast:
3906 static bool classof(const Instruction *I) {
3907 return (I->getOpcode() == Instruction::Invoke);
3908 }
3909 static bool classof(const Value *V) {
3910 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3911 }
3912
3913private:
3914 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3915 // method so that subclasses cannot accidentally use it.
3916 template <typename Bitfield>
3917 void setSubclassData(typename Bitfield::Type Value) {
3918 Instruction::setSubclassData<Bitfield>(Value);
3919 }
3920};
3921
3922InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3923 BasicBlock *IfException, ArrayRef<Value *> Args,
3924 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3925 const Twine &NameStr, Instruction *InsertBefore)
3926 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3927 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3928 InsertBefore) {
3929 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3930}
3931
3932InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3933 BasicBlock *IfException, ArrayRef<Value *> Args,
3934 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3935 const Twine &NameStr, BasicBlock *InsertAtEnd)
3936 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3937 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3938 InsertAtEnd) {
3939 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3940}
3941
3942//===----------------------------------------------------------------------===//
3943// CallBrInst Class
3944//===----------------------------------------------------------------------===//
3945
3946/// CallBr instruction, tracking function calls that may not return control but
3947/// instead transfer it to a third location. The SubclassData field is used to
3948/// hold the calling convention of the call.
3949///
3950class CallBrInst : public CallBase {
3951
3952 unsigned NumIndirectDests;
3953
3954 CallBrInst(const CallBrInst &BI);
3955
3956 /// Construct a CallBrInst given a range of arguments.
3957 ///
3958 /// Construct a CallBrInst from a range of arguments
3959 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3960 ArrayRef<BasicBlock *> IndirectDests,
3961 ArrayRef<Value *> Args,
3962 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3963 const Twine &NameStr, Instruction *InsertBefore);
3964
3965 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3966 ArrayRef<BasicBlock *> IndirectDests,
3967 ArrayRef<Value *> Args,
3968 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3969 const Twine &NameStr, BasicBlock *InsertAtEnd);
3970
3971 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3972 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3973 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3974
3975 /// Should the Indirect Destinations change, scan + update the Arg list.
3976 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3977
3978 /// Compute the number of operands to allocate.
3979 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3980 int NumBundleInputs = 0) {
3981 // We need one operand for the called function, plus our extra operands and
3982 // the input operand counts provided.
3983 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3984 }
3985
3986protected:
3987 // Note: Instruction needs to be a friend here to call cloneImpl.
3988 friend class Instruction;
3989
3990 CallBrInst *cloneImpl() const;
3991
3992public:
3993 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3994 BasicBlock *DefaultDest,
3995 ArrayRef<BasicBlock *> IndirectDests,
3996 ArrayRef<Value *> Args, const Twine &NameStr,
3997 Instruction *InsertBefore = nullptr) {
3998 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3999 return new (NumOperands)
4000 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4001 NumOperands, NameStr, InsertBefore);
4002 }
4003
4004 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4005 BasicBlock *DefaultDest,
4006 ArrayRef<BasicBlock *> IndirectDests,
4007 ArrayRef<Value *> Args,
4008 ArrayRef<OperandBundleDef> Bundles = None,
4009 const Twine &NameStr = "",
4010 Instruction *InsertBefore = nullptr) {
4011 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4012 CountBundleInputs(Bundles));
4013 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4014
4015 return new (NumOperands, DescriptorBytes)
4016 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4017 NumOperands, NameStr, InsertBefore);
4018 }
4019
4020 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4021 BasicBlock *DefaultDest,
4022 ArrayRef<BasicBlock *> IndirectDests,
4023 ArrayRef<Value *> Args, const Twine &NameStr,
4024 BasicBlock *InsertAtEnd) {
4025 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4026 return new (NumOperands)
4027 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4028 NumOperands, NameStr, InsertAtEnd);
4029 }
4030
4031 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4032 BasicBlock *DefaultDest,
4033 ArrayRef<BasicBlock *> IndirectDests,
4034 ArrayRef<Value *> Args,
4035 ArrayRef<OperandBundleDef> Bundles,
4036 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4037 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4038 CountBundleInputs(Bundles));
4039 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4040
4041 return new (NumOperands, DescriptorBytes)
4042 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4043 NumOperands, NameStr, InsertAtEnd);
4044 }
4045
4046 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4047 ArrayRef<BasicBlock *> IndirectDests,
4048 ArrayRef<Value *> Args, const Twine &NameStr,
4049 Instruction *InsertBefore = nullptr) {
4050 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4051 IndirectDests, Args, NameStr, InsertBefore);
4052 }
4053
4054 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4055 ArrayRef<BasicBlock *> IndirectDests,
4056 ArrayRef<Value *> Args,
4057 ArrayRef<OperandBundleDef> Bundles = None,
4058 const Twine &NameStr = "",
4059 Instruction *InsertBefore = nullptr) {
4060 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4061 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4062 }
4063
4064 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4065 ArrayRef<BasicBlock *> IndirectDests,
4066 ArrayRef<Value *> Args, const Twine &NameStr,
4067 BasicBlock *InsertAtEnd) {
4068 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4069 IndirectDests, Args, NameStr, InsertAtEnd);
4070 }
4071
4072 static CallBrInst *Create(FunctionCallee Func,
4073 BasicBlock *DefaultDest,
4074 ArrayRef<BasicBlock *> IndirectDests,
4075 ArrayRef<Value *> Args,
4076 ArrayRef<OperandBundleDef> Bundles,
4077 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4078 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4079 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4080 }
4081
4082 /// Create a clone of \p CBI with a different set of operand bundles and
4083 /// insert it before \p InsertPt.
4084 ///
4085 /// The returned callbr instruction is identical to \p CBI in every way
4086 /// except that the operand bundles for the new instruction are set to the
4087 /// operand bundles in \p Bundles.
4088 static CallBrInst *Create(CallBrInst *CBI,
4089 ArrayRef<OperandBundleDef> Bundles,
4090 Instruction *InsertPt = nullptr);
4091
4092 /// Return the number of callbr indirect dest labels.
4093 ///
4094 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4095
4096 /// getIndirectDestLabel - Return the i-th indirect dest label.
4097 ///
4098 Value *getIndirectDestLabel(unsigned i) const {
4099 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast<void> (0));
4100 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4101 1);
4102 }
4103
4104 Value *getIndirectDestLabelUse(unsigned i) const {
4105 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast<void> (0));
4106 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4107 1);
4108 }
4109
4110 // Return the destination basic blocks...
4111 BasicBlock *getDefaultDest() const {
4112 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4113 }
4114 BasicBlock *getIndirectDest(unsigned i) const {
4115 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4116 }
4117 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4118 SmallVector<BasicBlock *, 16> IndirectDests;
4119 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4120 IndirectDests.push_back(getIndirectDest(i));
4121 return IndirectDests;
4122 }
4123 void setDefaultDest(BasicBlock *B) {
4124 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4125 }
4126 void setIndirectDest(unsigned i, BasicBlock *B) {
4127 updateArgBlockAddresses(i, B);
4128 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4129 }
4130
4131 BasicBlock *getSuccessor(unsigned i) const {
4132 assert(i < getNumSuccessors() + 1 &&(static_cast<void> (0))
4133 "Successor # out of range for callbr!")(static_cast<void> (0));
4134 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4135 }
4136
4137 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4138 assert(i < getNumIndirectDests() + 1 &&(static_cast<void> (0))
4139 "Successor # out of range for callbr!")(static_cast<void> (0));
4140 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4141 }
4142
4143 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4144
4145 // Methods for support type inquiry through isa, cast, and dyn_cast:
4146 static bool classof(const Instruction *I) {
4147 return (I->getOpcode() == Instruction::CallBr);
4148 }
4149 static bool classof(const Value *V) {
4150 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4151 }
4152
4153private:
4154 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4155 // method so that subclasses cannot accidentally use it.
4156 template <typename Bitfield>
4157 void setSubclassData(typename Bitfield::Type Value) {
4158 Instruction::setSubclassData<Bitfield>(Value);
4159 }
4160};
4161
4162CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4163 ArrayRef<BasicBlock *> IndirectDests,
4164 ArrayRef<Value *> Args,
4165 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4166 const Twine &NameStr, Instruction *InsertBefore)
4167 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4168 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4169 InsertBefore) {
4170 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4171}
4172
4173CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4174 ArrayRef<BasicBlock *> IndirectDests,
4175 ArrayRef<Value *> Args,
4176 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4177 const Twine &NameStr, BasicBlock *InsertAtEnd)
4178 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4179 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4180 InsertAtEnd) {
4181 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4182}
4183
4184//===----------------------------------------------------------------------===//
4185// ResumeInst Class
4186//===----------------------------------------------------------------------===//
4187
4188//===---------------------------------------------------------------------------
4189/// Resume the propagation of an exception.
4190///
4191class ResumeInst : public Instruction {
4192 ResumeInst(const ResumeInst &RI);
4193
4194 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4195 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4196
4197protected:
4198 // Note: Instruction needs to be a friend here to call cloneImpl.
4199 friend class Instruction;
4200
4201 ResumeInst *cloneImpl() const;
4202
4203public:
4204 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4205 return new(1) ResumeInst(Exn, InsertBefore);
4206 }
4207
4208 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4209 return new(1) ResumeInst(Exn, InsertAtEnd);
4210 }
4211
4212 /// Provide fast operand accessors
4213 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4214
4215 /// Convenience accessor.
4216 Value *getValue() const { return Op<0>(); }
4217
4218 unsigned getNumSuccessors() const { return 0; }
4219
4220 // Methods for support type inquiry through isa, cast, and dyn_cast:
4221 static bool classof(const Instruction *I) {
4222 return I->getOpcode() == Instruction::Resume;
4223 }
4224 static bool classof(const Value *V) {
4225 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4226 }
4227
4228private:
4229 BasicBlock *getSuccessor(unsigned idx) const {
4230 llvm_unreachable("ResumeInst has no successors!")__builtin_unreachable();
4231 }
4232
4233 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4234 llvm_unreachable("ResumeInst has no successors!")__builtin_unreachable();
4235 }
4236};
4237
4238template <>
4239struct OperandTraits<ResumeInst> :
4240 public FixedNumOperandTraits<ResumeInst, 1> {
4241};
4242
4243DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
ResumeInst>::op_begin(const_cast<ResumeInst*>(this))
[i_nocapture].get()); } void ResumeInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<ResumeInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ResumeInst::getNumOperands() const { return OperandTraits
<ResumeInst>::operands(this); } template <int Idx_nocapture
> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4244
4245//===----------------------------------------------------------------------===//
4246// CatchSwitchInst Class
4247//===----------------------------------------------------------------------===//
4248class CatchSwitchInst : public Instruction {
4249 using UnwindDestField = BoolBitfieldElementT<0>;
4250
4251 /// The number of operands actually allocated. NumOperands is
4252 /// the number actually in use.
4253 unsigned ReservedSpace;
4254
4255 // Operand[0] = Outer scope
4256 // Operand[1] = Unwind block destination
4257 // Operand[n] = BasicBlock to go to on match
4258 CatchSwitchInst(const CatchSwitchInst &CSI);
4259
4260 /// Create a new switch instruction, specifying a
4261 /// default destination. The number of additional handlers can be specified
4262 /// here to make memory allocation more efficient.
4263 /// This constructor can also autoinsert before another instruction.
4264 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4265 unsigned NumHandlers, const Twine &NameStr,
4266 Instruction *InsertBefore);
4267
4268 /// Create a new switch instruction, specifying a
4269 /// default destination. The number of additional handlers can be specified
4270 /// here to make memory allocation more efficient.
4271 /// This constructor also autoinserts at the end of the specified BasicBlock.
4272 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4273 unsigned NumHandlers, const Twine &NameStr,
4274 BasicBlock *InsertAtEnd);
4275
4276 // allocate space for exactly zero operands
4277 void *operator new(size_t S) { return User::operator new(S); }
4278
4279 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4280 void growOperands(unsigned Size);
4281
4282protected:
4283 // Note: Instruction needs to be a friend here to call cloneImpl.
4284 friend class Instruction;
4285
4286 CatchSwitchInst *cloneImpl() const;
4287
4288public:
4289 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4290
4291 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4292 unsigned NumHandlers,
4293 const Twine &NameStr = "",
4294 Instruction *InsertBefore = nullptr) {
4295 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4296 InsertBefore);
4297 }
4298
4299 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4300 unsigned NumHandlers, const Twine &NameStr,
4301 BasicBlock *InsertAtEnd) {
4302 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4303 InsertAtEnd);
4304 }
4305
4306 /// Provide fast operand accessors
4307 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4308
4309 // Accessor Methods for CatchSwitch stmt
4310 Value *getParentPad() const { return getOperand(0); }
4311 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4312
4313 // Accessor Methods for CatchSwitch stmt
4314 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4315 bool unwindsToCaller() const { return !hasUnwindDest(); }
4316 BasicBlock *getUnwindDest() const {
4317 if (hasUnwindDest())
4318 return cast<BasicBlock>(getOperand(1));
4319 return nullptr;
4320 }
4321 void setUnwindDest(BasicBlock *UnwindDest) {
4322 assert(UnwindDest)(static_cast<void> (0));
4323 assert(hasUnwindDest())(static_cast<void> (0));
4324 setOperand(1, UnwindDest);
4325 }
4326
4327 /// return the number of 'handlers' in this catchswitch
4328 /// instruction, except the default handler
4329 unsigned getNumHandlers() const {
4330 if (hasUnwindDest())
4331 return getNumOperands() - 2;
4332 return getNumOperands() - 1;
4333 }
4334
4335private:
4336 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4337 static const BasicBlock *handler_helper(const Value *V) {
4338 return cast<BasicBlock>(V);
4339 }
4340
4341public:
4342 using DerefFnTy = BasicBlock *(*)(Value *);
4343 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4344 using handler_range = iterator_range<handler_iterator>;
4345 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4346 using const_handler_iterator =
4347 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4348 using const_handler_range = iterator_range<const_handler_iterator>;
4349
4350 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4351 handler_iterator handler_begin() {
4352 op_iterator It = op_begin() + 1;
4353 if (hasUnwindDest())
4354 ++It;
4355 return handler_iterator(It, DerefFnTy(handler_helper));
4356 }
4357
4358 /// Returns an iterator that points to the first handler in the
4359 /// CatchSwitchInst.
4360 const_handler_iterator handler_begin() const {
4361 const_op_iterator It = op_begin() + 1;
4362 if (hasUnwindDest())
4363 ++It;
4364 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4365 }
4366
4367 /// Returns a read-only iterator that points one past the last
4368 /// handler in the CatchSwitchInst.
4369 handler_iterator handler_end() {
4370 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4371 }
4372
4373 /// Returns an iterator that points one past the last handler in the
4374 /// CatchSwitchInst.
4375 const_handler_iterator handler_end() const {
4376 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4377 }
4378
4379 /// iteration adapter for range-for loops.
4380 handler_range handlers() {
4381 return make_range(handler_begin(), handler_end());
4382 }
4383
4384 /// iteration adapter for range-for loops.
4385 const_handler_range handlers() const {
4386 return make_range(handler_begin(), handler_end());
4387 }
4388
4389 /// Add an entry to the switch instruction...
4390 /// Note:
4391 /// This action invalidates handler_end(). Old handler_end() iterator will
4392 /// point to the added handler.
4393 void addHandler(BasicBlock *Dest);
4394
4395 void removeHandler(handler_iterator HI);
4396
4397 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4398 BasicBlock *getSuccessor(unsigned Idx) const {
4399 assert(Idx < getNumSuccessors() &&(static_cast<void> (0))
4400 "Successor # out of range for catchswitch!")(static_cast<void> (0));
4401 return cast<BasicBlock>(getOperand(Idx + 1));
4402 }
4403 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4404 assert(Idx < getNumSuccessors() &&(static_cast<void> (0))
4405 "Successor # out of range for catchswitch!")(static_cast<void> (0));
4406 setOperand(Idx + 1, NewSucc);
4407 }
4408
4409 // Methods for support type inquiry through isa, cast, and dyn_cast:
4410 static bool classof(const Instruction *I) {
4411 return I->getOpcode() == Instruction::CatchSwitch;
4412 }
4413 static bool classof(const Value *V) {
4414 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4415 }
4416};
4417
4418template <>
4419struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4420
4421DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
CatchSwitchInst>::op_begin(const_cast<CatchSwitchInst*>
(this))[i_nocapture].get()); } void CatchSwitchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<CatchSwitchInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CatchSwitchInst
::getNumOperands() const { return OperandTraits<CatchSwitchInst
>::operands(this); } template <int Idx_nocapture> Use
&CatchSwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CatchSwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4422
4423//===----------------------------------------------------------------------===//
4424// CleanupPadInst Class
4425//===----------------------------------------------------------------------===//
4426class CleanupPadInst : public FuncletPadInst {
4427private:
4428 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4429 unsigned Values, const Twine &NameStr,
4430 Instruction *InsertBefore)
4431 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4432 NameStr, InsertBefore) {}
4433 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4434 unsigned Values, const Twine &NameStr,
4435 BasicBlock *InsertAtEnd)
4436 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4437 NameStr, InsertAtEnd) {}
4438
4439public:
4440 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4441 const Twine &NameStr = "",
4442 Instruction *InsertBefore = nullptr) {
4443 unsigned Values = 1 + Args.size();
4444 return new (Values)
4445 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4446 }
4447
4448 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4449 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4450 unsigned Values = 1 + Args.size();
4451 return new (Values)
4452 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4453 }
4454
4455 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4456 static bool classof(const Instruction *I) {
4457 return I->getOpcode() == Instruction::CleanupPad;
4458 }
4459 static bool classof(const Value *V) {
4460 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4461 }
4462};
4463
4464//===----------------------------------------------------------------------===//
4465// CatchPadInst Class
4466//===----------------------------------------------------------------------===//
4467class CatchPadInst : public FuncletPadInst {
4468private:
4469 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4470 unsigned Values, const Twine &NameStr,
4471 Instruction *InsertBefore)
4472 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4473 NameStr, InsertBefore) {}
4474 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4475 unsigned Values, const Twine &NameStr,
4476 BasicBlock *InsertAtEnd)
4477 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4478 NameStr, InsertAtEnd) {}
4479
4480public:
4481 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4482 const Twine &NameStr = "",
4483 Instruction *InsertBefore = nullptr) {
4484 unsigned Values = 1 + Args.size();
4485 return new (Values)
4486 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4487 }
4488
4489 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4490 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4491 unsigned Values = 1 + Args.size();
4492 return new (Values)
4493 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4494 }
4495
4496 /// Convenience accessors
4497 CatchSwitchInst *getCatchSwitch() const {
4498 return cast<CatchSwitchInst>(Op<-1>());
4499 }
4500 void setCatchSwitch(Value *CatchSwitch) {
4501 assert(CatchSwitch)(static_cast<void> (0));
4502 Op<-1>() = CatchSwitch;
4503 }
4504
4505 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4506 static bool classof(const Instruction *I) {
4507 return I->getOpcode() == Instruction::CatchPad;
4508 }
4509 static bool classof(const Value *V) {
4510 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4511 }
4512};
4513
4514//===----------------------------------------------------------------------===//
4515// CatchReturnInst Class
4516//===----------------------------------------------------------------------===//
4517
4518class CatchReturnInst : public Instruction {
4519 CatchReturnInst(const CatchReturnInst &RI);
4520 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4521 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4522
4523 void init(Value *CatchPad, BasicBlock *BB);
4524
4525protected:
4526 // Note: Instruction needs to be a friend here to call cloneImpl.
4527 friend class Instruction;
4528
4529 CatchReturnInst *cloneImpl() const;
4530
4531public:
4532 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4533 Instruction *InsertBefore = nullptr) {
4534 assert(CatchPad)(static_cast<void> (0));
4535 assert(BB)(static_cast<void> (0));
4536 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4537 }
4538
4539 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4540 BasicBlock *InsertAtEnd) {
4541 assert(CatchPad)(static_cast<void> (0));
4542 assert(BB)(static_cast<void> (0));
4543 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4544 }
4545
4546 /// Provide fast operand accessors
4547 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4548
4549 /// Convenience accessors.
4550 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4551 void setCatchPad(CatchPadInst *CatchPad) {
4552 assert(CatchPad)(static_cast<void> (0));
4553 Op<0>() = CatchPad;
4554 }
4555
4556 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4557 void setSuccessor(BasicBlock *NewSucc) {
4558 assert(NewSucc)(static_cast<void> (0));
4559 Op<1>() = NewSucc;
4560 }
4561 unsigned getNumSuccessors() const { return 1; }
4562
4563 /// Get the parentPad of this catchret's catchpad's catchswitch.
4564 /// The successor block is implicitly a member of this funclet.
4565 Value *getCatchSwitchParentPad() const {
4566 return getCatchPad()->getCatchSwitch()->getParentPad();
4567 }
4568
4569 // Methods for support type inquiry through isa, cast, and dyn_cast:
4570 static bool classof(const Instruction *I) {
4571 return (I->getOpcode() == Instruction::CatchRet);
4572 }
4573 static bool classof(const Value *V) {
4574 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4575 }
4576
4577private:
4578 BasicBlock *getSuccessor(unsigned Idx) const {
4579 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast<void> (0));
4580 return getSuccessor();
4581 }
4582
4583 void setSuccessor(unsigned Idx, BasicBlock *B) {
4584 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast<void> (0));
4585 setSuccessor(B);
4586 }
4587};
4588
4589template <>
4590struct OperandTraits<CatchReturnInst>
4591 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4592
4593DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
CatchReturnInst>::op_begin(const_cast<CatchReturnInst*>
(this))[i_nocapture].get()); } void CatchReturnInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<CatchReturnInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CatchReturnInst
::getNumOperands() const { return OperandTraits<CatchReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&CatchReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CatchReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4594
4595//===----------------------------------------------------------------------===//
4596// CleanupReturnInst Class
4597//===----------------------------------------------------------------------===//
4598
4599class CleanupReturnInst : public Instruction {
4600 using UnwindDestField = BoolBitfieldElementT<0>;
4601
4602private:
4603 CleanupReturnInst(const CleanupReturnInst &RI);
4604 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4605 Instruction *InsertBefore = nullptr);
4606 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4607 BasicBlock *InsertAtEnd);
4608
4609 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4610
4611protected:
4612 // Note: Instruction needs to be a friend here to call cloneImpl.
4613 friend class Instruction;
4614
4615 CleanupReturnInst *cloneImpl() const;
4616
4617public:
4618 static CleanupReturnInst *Create(Value *CleanupPad,
4619 BasicBlock *UnwindBB = nullptr,
4620 Instruction *InsertBefore = nullptr) {
4621 assert(CleanupPad)(static_cast<void> (0));
4622 unsigned Values = 1;
4623 if (UnwindBB)
4624 ++Values;
4625 return new (Values)
4626 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4627 }
4628
4629 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4630 BasicBlock *InsertAtEnd) {
4631 assert(CleanupPad)(static_cast<void> (0));
4632 unsigned Values = 1;
4633 if (UnwindBB)
4634 ++Values;
4635 return new (Values)
4636 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4637 }
4638
4639 /// Provide fast operand accessors
4640 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4641
4642 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4643 bool unwindsToCaller() const { return !hasUnwindDest(); }
4644
4645 /// Convenience accessor.
4646 CleanupPadInst *getCleanupPad() const {
4647 return cast<CleanupPadInst>(Op<0>());
4648 }
4649 void setCleanupPad(CleanupPadInst *CleanupPad) {
4650 assert(CleanupPad)(static_cast<void> (0));
4651 Op<0>() = CleanupPad;
4652 }
4653
4654 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4655
4656 BasicBlock *getUnwindDest() const {
4657 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4658 }
4659 void setUnwindDest(BasicBlock *NewDest) {
4660 assert(NewDest)(static_cast<void> (0));
4661 assert(hasUnwindDest())(static_cast<void> (0));
4662 Op<1>() = NewDest;
4663 }
4664
4665 // Methods for support type inquiry through isa, cast, and dyn_cast:
4666 static bool classof(const Instruction *I) {
4667 return (I->getOpcode() == Instruction::CleanupRet);
4668 }
4669 static bool classof(const Value *V) {
4670 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4671 }
4672
4673private:
4674 BasicBlock *getSuccessor(unsigned Idx) const {
4675 assert(Idx == 0)(static_cast<void> (0));
4676 return getUnwindDest();
4677 }
4678
4679 void setSuccessor(unsigned Idx, BasicBlock *B) {
4680 assert(Idx == 0)(static_cast<void> (0));
4681 setUnwindDest(B);
4682 }
4683
4684 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4685 // method so that subclasses cannot accidentally use it.
4686 template <typename Bitfield>
4687 void setSubclassData(typename Bitfield::Type Value) {
4688 Instruction::setSubclassData<Bitfield>(Value);
4689 }
4690};
4691
4692template <>
4693struct OperandTraits<CleanupReturnInst>
4694 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4695
4696DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<CleanupReturnInst>::op_begin
(const_cast<CleanupReturnInst*>(this))[i_nocapture].get
()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast<void> (0)); OperandTraits
<CleanupReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CleanupReturnInst::getNumOperands() const { return
OperandTraits<CleanupReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CleanupReturnInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CleanupReturnInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4697
4698//===----------------------------------------------------------------------===//
4699// UnreachableInst Class
4700//===----------------------------------------------------------------------===//
4701
4702//===---------------------------------------------------------------------------
4703/// This function has undefined behavior. In particular, the
4704/// presence of this instruction indicates some higher level knowledge that the
4705/// end of the block cannot be reached.
4706///
4707class UnreachableInst : public Instruction {
4708protected:
4709 // Note: Instruction needs to be a friend here to call cloneImpl.
4710 friend class Instruction;
4711
4712 UnreachableInst *cloneImpl() const;
4713
4714public:
4715 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4716 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4717
4718 // allocate space for exactly zero operands
4719 void *operator new(size_t S) { return User::operator new(S, 0); }
4720 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4721
4722 unsigned getNumSuccessors() const { return 0; }
4723
4724 // Methods for support type inquiry through isa, cast, and dyn_cast:
4725 static bool classof(const Instruction *I) {
4726 return I->getOpcode() == Instruction::Unreachable;
4727 }
4728 static bool classof(const Value *V) {
4729 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4730 }
4731
4732private:
4733 BasicBlock *getSuccessor(unsigned idx) const {
4734 llvm_unreachable("UnreachableInst has no successors!")__builtin_unreachable();
4735 }
4736
4737 void setSuccessor(unsigned idx, BasicBlock *B) {
4738 llvm_unreachable("UnreachableInst has no successors!")__builtin_unreachable();
4739 }
4740};
4741
4742//===----------------------------------------------------------------------===//
4743// TruncInst Class
4744//===----------------------------------------------------------------------===//
4745
4746/// This class represents a truncation of integer types.
4747class TruncInst : public CastInst {
4748protected:
4749 // Note: Instruction needs to be a friend here to call cloneImpl.
4750 friend class Instruction;
4751
4752 /// Clone an identical TruncInst
4753 TruncInst *cloneImpl() const;
4754
4755public:
4756 /// Constructor with insert-before-instruction semantics
4757 TruncInst(
4758 Value *S, ///< The value to be truncated
4759 Type *Ty, ///< The (smaller) type to truncate to
4760 const Twine &NameStr = "", ///< A name for the new instruction
4761 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4762 );
4763
4764 /// Constructor with insert-at-end-of-block semantics
4765 TruncInst(
4766 Value *S, ///< The value to be truncated
4767 Type *Ty, ///< The (smaller) type to truncate to
4768 const Twine &NameStr, ///< A name for the new instruction
4769 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4770 );
4771
4772 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4773 static bool classof(const Instruction *I) {
4774 return I->getOpcode() == Trunc;
4775 }
4776 static bool classof(const Value *V) {
4777 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4778 }
4779};
4780
4781//===----------------------------------------------------------------------===//
4782// ZExtInst Class
4783//===----------------------------------------------------------------------===//
4784
4785/// This class represents zero extension of integer types.
4786class ZExtInst : public CastInst {
4787protected:
4788 // Note: Instruction needs to be a friend here to call cloneImpl.
4789 friend class Instruction;
4790
4791 /// Clone an identical ZExtInst
4792 ZExtInst *cloneImpl() const;
4793
4794public:
4795 /// Constructor with insert-before-instruction semantics
4796 ZExtInst(
4797 Value *S, ///< The value to be zero extended
4798 Type *Ty, ///< The type to zero extend to
4799 const Twine &NameStr = "", ///< A name for the new instruction
4800 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4801 );
4802
4803 /// Constructor with insert-at-end semantics.
4804 ZExtInst(
4805 Value *S, ///< The value to be zero extended
4806 Type *Ty, ///< The type to zero extend to
4807 const Twine &NameStr, ///< A name for the new instruction
4808 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4809 );
4810
4811 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4812 static bool classof(const Instruction *I) {
4813 return I->getOpcode() == ZExt;
4814 }
4815 static bool classof(const Value *V) {
4816 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4817 }
4818};
4819
4820//===----------------------------------------------------------------------===//
4821// SExtInst Class
4822//===----------------------------------------------------------------------===//
4823
4824/// This class represents a sign extension of integer types.
4825class SExtInst : public CastInst {
4826protected:
4827 // Note: Instruction needs to be a friend here to call cloneImpl.
4828 friend class Instruction;
4829
4830 /// Clone an identical SExtInst
4831 SExtInst *cloneImpl() const;
4832
4833public:
4834 /// Constructor with insert-before-instruction semantics
4835 SExtInst(
4836 Value *S, ///< The value to be sign extended
4837 Type *Ty, ///< The type to sign extend to
4838 const Twine &NameStr = "", ///< A name for the new instruction
4839 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4840 );
4841
4842 /// Constructor with insert-at-end-of-block semantics
4843 SExtInst(
4844 Value *S, ///< The value to be sign extended
4845 Type *Ty, ///< The type to sign extend to
4846 const Twine &NameStr, ///< A name for the new instruction
4847 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4848 );
4849
4850 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4851 static bool classof(const Instruction *I) {
4852 return I->getOpcode() == SExt;
4853 }
4854 static bool classof(const Value *V) {
4855 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4856 }
4857};
4858
4859//===----------------------------------------------------------------------===//
4860// FPTruncInst Class
4861//===----------------------------------------------------------------------===//
4862
4863/// This class represents a truncation of floating point types.
4864class FPTruncInst : public CastInst {
4865protected:
4866 // Note: Instruction needs to be a friend here to call cloneImpl.
4867 friend class Instruction;
4868
4869 /// Clone an identical FPTruncInst
4870 FPTruncInst *cloneImpl() const;
4871
4872public:
4873 /// Constructor with insert-before-instruction semantics
4874 FPTruncInst(
4875 Value *S, ///< The value to be truncated
4876 Type *Ty, ///< The type to truncate to
4877 const Twine &NameStr = "", ///< A name for the new instruction
4878 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4879 );
4880
4881 /// Constructor with insert-before-instruction semantics
4882 FPTruncInst(
4883 Value *S, ///< The value to be truncated
4884 Type *Ty, ///< The type to truncate to
4885 const Twine &NameStr, ///< A name for the new instruction
4886 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4887 );
4888
4889 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4890 static bool classof(const Instruction *I) {
4891 return I->getOpcode() == FPTrunc;
4892 }
4893 static bool classof(const Value *V) {
4894 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4895 }
4896};
4897
4898//===----------------------------------------------------------------------===//
4899// FPExtInst Class
4900//===----------------------------------------------------------------------===//
4901
4902/// This class represents an extension of floating point types.
4903class FPExtInst : public CastInst {
4904protected:
4905 // Note: Instruction needs to be a friend here to call cloneImpl.
4906 friend class Instruction;
4907
4908 /// Clone an identical FPExtInst
4909 FPExtInst *cloneImpl() const;
4910
4911public:
4912 /// Constructor with insert-before-instruction semantics
4913 FPExtInst(
4914 Value *S, ///< The value to be extended
4915 Type *Ty, ///< The type to extend to
4916 const Twine &NameStr = "", ///< A name for the new instruction
4917 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4918 );
4919
4920 /// Constructor with insert-at-end-of-block semantics
4921 FPExtInst(
4922 Value *S, ///< The value to be extended
4923 Type *Ty, ///< The type to extend to
4924 const Twine &NameStr, ///< A name for the new instruction
4925 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4926 );
4927
4928 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4929 static bool classof(const Instruction *I) {
4930 return I->getOpcode() == FPExt;
4931 }
4932 static bool classof(const Value *V) {
4933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4934 }
4935};
4936
4937//===----------------------------------------------------------------------===//
4938// UIToFPInst Class
4939//===----------------------------------------------------------------------===//
4940
4941/// This class represents a cast unsigned integer to floating point.
4942class UIToFPInst : public CastInst {
4943protected:
4944 // Note: Instruction needs to be a friend here to call cloneImpl.
4945 friend class Instruction;
4946
4947 /// Clone an identical UIToFPInst
4948 UIToFPInst *cloneImpl() const;
4949
4950public:
4951 /// Constructor with insert-before-instruction semantics
4952 UIToFPInst(
4953 Value *S, ///< The value to be converted
4954 Type *Ty, ///< The type to convert to
4955 const Twine &NameStr = "", ///< A name for the new instruction
4956 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4957 );
4958
4959 /// Constructor with insert-at-end-of-block semantics
4960 UIToFPInst(
4961 Value *S, ///< The value to be converted
4962 Type *Ty, ///< The type to convert to
4963 const Twine &NameStr, ///< A name for the new instruction
4964 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4965 );
4966
4967 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4968 static bool classof(const Instruction *I) {
4969 return I->getOpcode() == UIToFP;
4970 }
4971 static bool classof(const Value *V) {
4972 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4973 }
4974};
4975
4976//===----------------------------------------------------------------------===//
4977// SIToFPInst Class
4978//===----------------------------------------------------------------------===//
4979
4980/// This class represents a cast from signed integer to floating point.
4981class SIToFPInst : public CastInst {
4982protected:
4983 // Note: Instruction needs to be a friend here to call cloneImpl.
4984 friend class Instruction;
4985
4986 /// Clone an identical SIToFPInst
4987 SIToFPInst *cloneImpl() const;
4988
4989public:
4990 /// Constructor with insert-before-instruction semantics
4991 SIToFPInst(
4992 Value *S, ///< The value to be converted
4993 Type *Ty, ///< The type to convert to
4994 const Twine &NameStr = "", ///< A name for the new instruction
4995 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4996 );
4997
4998 /// Constructor with insert-at-end-of-block semantics
4999 SIToFPInst(
5000 Value *S, ///< The value to be converted
5001 Type *Ty, ///< The type to convert to
5002 const Twine &NameStr, ///< A name for the new instruction
5003 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5004 );
5005
5006 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5007 static bool classof(const Instruction *I) {
5008 return I->getOpcode() == SIToFP;
5009 }
5010 static bool classof(const Value *V) {
5011 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5012 }
5013};
5014
5015//===----------------------------------------------------------------------===//
5016// FPToUIInst Class
5017//===----------------------------------------------------------------------===//
5018
5019/// This class represents a cast from floating point to unsigned integer
5020class FPToUIInst : public CastInst {
5021protected:
5022 // Note: Instruction needs to be a friend here to call cloneImpl.
5023 friend class Instruction;
5024
5025 /// Clone an identical FPToUIInst
5026 FPToUIInst *cloneImpl() const;
5027
5028public:
5029 /// Constructor with insert-before-instruction semantics
5030 FPToUIInst(
5031 Value *S, ///< The value to be converted
5032 Type *Ty, ///< The type to convert to
5033 const Twine &NameStr = "", ///< A name for the new instruction
5034 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5035 );
5036
5037 /// Constructor with insert-at-end-of-block semantics
5038 FPToUIInst(
5039 Value *S, ///< The value to be converted
5040 Type *Ty, ///< The type to convert to
5041 const Twine &NameStr, ///< A name for the new instruction
5042 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5043 );
5044
5045 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5046 static bool classof(const Instruction *I) {
5047 return I->getOpcode() == FPToUI;
5048 }
5049 static bool classof(const Value *V) {
5050 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5051 }
5052};
5053
5054//===----------------------------------------------------------------------===//
5055// FPToSIInst Class
5056//===----------------------------------------------------------------------===//
5057
5058/// This class represents a cast from floating point to signed integer.
5059class FPToSIInst : public CastInst {
5060protected:
5061 // Note: Instruction needs to be a friend here to call cloneImpl.
5062 friend class Instruction;
5063
5064 /// Clone an identical FPToSIInst
5065 FPToSIInst *cloneImpl() const;
5066
5067public:
5068 /// Constructor with insert-before-instruction semantics
5069 FPToSIInst(
5070 Value *S, ///< The value to be converted
5071 Type *Ty, ///< The type to convert to
5072 const Twine &NameStr = "", ///< A name for the new instruction
5073 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5074 );
5075
5076 /// Constructor with insert-at-end-of-block semantics
5077 FPToSIInst(
5078 Value *S, ///< The value to be converted
5079 Type *Ty, ///< The type to convert to
5080 const Twine &NameStr, ///< A name for the new instruction
5081 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5082 );
5083
5084 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5085 static bool classof(const Instruction *I) {
5086 return I->getOpcode() == FPToSI;
5087 }
5088 static bool classof(const Value *V) {
5089 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5090 }
5091};
5092
5093//===----------------------------------------------------------------------===//
5094// IntToPtrInst Class
5095//===----------------------------------------------------------------------===//
5096
5097/// This class represents a cast from an integer to a pointer.
5098class IntToPtrInst : public CastInst {
5099public:
5100 // Note: Instruction needs to be a friend here to call cloneImpl.
5101 friend class Instruction;
5102
5103 /// Constructor with insert-before-instruction semantics
5104 IntToPtrInst(
5105 Value *S, ///< The value to be converted
5106 Type *Ty, ///< The type to convert to
5107 const Twine &NameStr = "", ///< A name for the new instruction
5108 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5109 );
5110
5111 /// Constructor with insert-at-end-of-block semantics
5112 IntToPtrInst(
5113 Value *S, ///< The value to be converted
5114 Type *Ty, ///< The type to convert to
5115 const Twine &NameStr, ///< A name for the new instruction
5116 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5117 );
5118
5119 /// Clone an identical IntToPtrInst.
5120 IntToPtrInst *cloneImpl() const;
5121
5122 /// Returns the address space of this instruction's pointer type.
5123 unsigned getAddressSpace() const {
5124 return getType()->getPointerAddressSpace();
5125 }
5126
5127 // Methods for support type inquiry through isa, cast, and dyn_cast:
5128 static bool classof(const Instruction *I) {
5129 return I->getOpcode() == IntToPtr;
5130 }
5131 static bool classof(const Value *V) {
5132 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5133 }
5134};
5135
5136//===----------------------------------------------------------------------===//
5137// PtrToIntInst Class
5138//===----------------------------------------------------------------------===//
5139
5140/// This class represents a cast from a pointer to an integer.
5141class PtrToIntInst : public CastInst {
5142protected:
5143 // Note: Instruction needs to be a friend here to call cloneImpl.
5144 friend class Instruction;
5145
5146 /// Clone an identical PtrToIntInst.
5147 PtrToIntInst *cloneImpl() const;
5148
5149public:
5150 /// Constructor with insert-before-instruction semantics
5151 PtrToIntInst(
5152 Value *S, ///< The value to be converted
5153 Type *Ty, ///< The type to convert to
5154 const Twine &NameStr = "", ///< A name for the new instruction
5155 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5156 );
5157
5158 /// Constructor with insert-at-end-of-block semantics
5159 PtrToIntInst(
5160 Value *S, ///< The value to be converted
5161 Type *Ty, ///< The type to convert to
5162 const Twine &NameStr, ///< A name for the new instruction
5163 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5164 );
5165
5166 /// Gets the pointer operand.
5167 Value *getPointerOperand() { return getOperand(0); }
5168 /// Gets the pointer operand.
5169 const Value *getPointerOperand() const { return getOperand(0); }
5170 /// Gets the operand index of the pointer operand.
5171 static unsigned getPointerOperandIndex() { return 0U; }
5172
5173 /// Returns the address space of the pointer operand.
5174 unsigned getPointerAddressSpace() const {
5175 return getPointerOperand()->getType()->getPointerAddressSpace();
5176 }
5177
5178 // Methods for support type inquiry through isa, cast, and dyn_cast:
5179 static bool classof(const Instruction *I) {
5180 return I->getOpcode() == PtrToInt;
5181 }
5182 static bool classof(const Value *V) {
5183 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5184 }
5185};
5186
5187//===----------------------------------------------------------------------===//
5188// BitCastInst Class
5189//===----------------------------------------------------------------------===//
5190
5191/// This class represents a no-op cast from one type to another.
5192class BitCastInst : public CastInst {
5193protected:
5194 // Note: Instruction needs to be a friend here to call cloneImpl.
5195 friend class Instruction;
5196
5197 /// Clone an identical BitCastInst.
5198 BitCastInst *cloneImpl() const;
5199
5200public:
5201 /// Constructor with insert-before-instruction semantics
5202 BitCastInst(
5203 Value *S, ///< The value to be casted
5204 Type *Ty, ///< The type to casted to
5205 const Twine &NameStr = "", ///< A name for the new instruction
5206 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5207 );
5208
5209 /// Constructor with insert-at-end-of-block semantics
5210 BitCastInst(
5211 Value *S, ///< The value to be casted
5212 Type *Ty, ///< The type to casted to
5213 const Twine &NameStr, ///< A name for the new instruction
5214 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5215 );
5216
5217 // Methods for support type inquiry through isa, cast, and dyn_cast:
5218 static bool classof(const Instruction *I) {
5219 return I->getOpcode() == BitCast;
5220 }
5221 static bool classof(const Value *V) {
5222 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5223 }
5224};
5225
5226//===----------------------------------------------------------------------===//
5227// AddrSpaceCastInst Class
5228//===----------------------------------------------------------------------===//
5229
5230/// This class represents a conversion between pointers from one address space
5231/// to another.
5232class AddrSpaceCastInst : public CastInst {
5233protected:
5234 // Note: Instruction needs to be a friend here to call cloneImpl.
5235 friend class Instruction;
5236
5237 /// Clone an identical AddrSpaceCastInst.
5238 AddrSpaceCastInst *cloneImpl() const;
5239
5240public:
5241 /// Constructor with insert-before-instruction semantics
5242 AddrSpaceCastInst(
5243 Value *S, ///< The value to be casted
5244 Type *Ty, ///< The type to casted to
5245 const Twine &NameStr = "", ///< A name for the new instruction
5246 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5247 );
5248
5249 /// Constructor with insert-at-end-of-block semantics
5250 AddrSpaceCastInst(
5251 Value *S, ///< The value to be casted
5252 Type *Ty, ///< The type to casted to
5253 const Twine &NameStr, ///< A name for the new instruction
5254 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5255 );
5256
5257 // Methods for support type inquiry through isa, cast, and dyn_cast:
5258 static bool classof(const Instruction *I) {
5259 return I->getOpcode() == AddrSpaceCast;
5260 }
5261 static bool classof(const Value *V) {
5262 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5263 }
5264
5265 /// Gets the pointer operand.
5266 Value *getPointerOperand() {
5267 return getOperand(0);
5268 }
5269
5270 /// Gets the pointer operand.
5271 const Value *getPointerOperand() const {
5272 return getOperand(0);
5273 }
5274
5275 /// Gets the operand index of the pointer operand.
5276 static unsigned getPointerOperandIndex() {
5277 return 0U;
5278 }
5279
5280 /// Returns the address space of the pointer operand.
5281 unsigned getSrcAddressSpace() const {
5282 return getPointerOperand()->getType()->getPointerAddressSpace();
5283 }
5284
5285 /// Returns the address space of the result.
5286 unsigned getDestAddressSpace() const {
5287 return getType()->getPointerAddressSpace();
5288 }
5289};
5290
5291/// A helper function that returns the pointer operand of a load or store
5292/// instruction. Returns nullptr if not load or store.
5293inline const Value *getLoadStorePointerOperand(const Value *V) {
5294 if (auto *Load = dyn_cast<LoadInst>(V))
5295 return Load->getPointerOperand();
5296 if (auto *Store = dyn_cast<StoreInst>(V))
5297 return Store->getPointerOperand();
5298 return nullptr;
5299}
5300inline Value *getLoadStorePointerOperand(Value *V) {
5301 return const_cast<Value *>(
5302 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5303}
5304
5305/// A helper function that returns the pointer operand of a load, store
5306/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5307inline const Value *getPointerOperand(const Value *V) {
5308 if (auto *Ptr = getLoadStorePointerOperand(V))
5309 return Ptr;
5310 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5311 return Gep->getPointerOperand();
5312 return nullptr;
5313}
5314inline Value *getPointerOperand(Value *V) {
5315 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5316}
5317
5318/// A helper function that returns the alignment of load or store instruction.
5319inline Align getLoadStoreAlignment(Value *I) {
5320 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast<void> (0))
5321 "Expected Load or Store instruction")(static_cast<void> (0));
5322 if (auto *LI = dyn_cast<LoadInst>(I))
5323 return LI->getAlign();
5324 return cast<StoreInst>(I)->getAlign();
5325}
5326
5327/// A helper function that returns the address space of the pointer operand of
5328/// load or store instruction.
5329inline unsigned getLoadStoreAddressSpace(Value *I) {
5330 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast<void> (0))
5331 "Expected Load or Store instruction")(static_cast<void> (0));
5332 if (auto *LI = dyn_cast<LoadInst>(I))
5333 return LI->getPointerAddressSpace();
5334 return cast<StoreInst>(I)->getPointerAddressSpace();
5335}
5336
5337/// A helper function that returns the type of a load or store instruction.
5338inline Type *getLoadStoreType(Value *I) {
5339 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast<void> (0))
5340 "Expected Load or Store instruction")(static_cast<void> (0));
5341 if (auto *LI = dyn_cast<LoadInst>(I))
5342 return LI->getType();
5343 return cast<StoreInst>(I)->getValueOperand()->getType();
5344}
5345
5346//===----------------------------------------------------------------------===//
5347// FreezeInst Class
5348//===----------------------------------------------------------------------===//
5349
5350/// This class represents a freeze function that returns random concrete
5351/// value if an operand is either a poison value or an undef value
5352class FreezeInst : public UnaryInstruction {
5353protected:
5354 // Note: Instruction needs to be a friend here to call cloneImpl.
5355 friend class Instruction;
5356
5357 /// Clone an identical FreezeInst
5358 FreezeInst *cloneImpl() const;
5359
5360public:
5361 explicit FreezeInst(Value *S,
5362 const Twine &NameStr = "",
5363 Instruction *InsertBefore = nullptr);
5364 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5365
5366 // Methods for support type inquiry through isa, cast, and dyn_cast:
5367 static inline bool classof(const Instruction *I) {
5368 return I->getOpcode() == Freeze;
5369 }
5370 static inline bool classof(const Value *V) {
5371 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5372 }
5373};
5374
5375} // end namespace llvm
5376
5377#endif // LLVM_IR_INSTRUCTIONS_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/Type.h

1//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Type class. For more "Type"
10// stuff, look in DerivedTypes.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TYPE_H
15#define LLVM_IR_TYPE_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/Support/CBindingWrapping.h"
21#include "llvm/Support/Casting.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/TypeSize.h"
25#include <cassert>
26#include <cstdint>
27#include <iterator>
28
29namespace llvm {
30
31class IntegerType;
32class LLVMContext;
33class PointerType;
34class raw_ostream;
35class StringRef;
36
37/// The instances of the Type class are immutable: once they are created,
38/// they are never changed. Also note that only one instance of a particular
39/// type is ever created. Thus seeing if two types are equal is a matter of
40/// doing a trivial pointer comparison. To enforce that no two equal instances
41/// are created, Type instances can only be created via static factory methods
42/// in class Type and in derived classes. Once allocated, Types are never
43/// free'd.
44///
45class Type {
46public:
47 //===--------------------------------------------------------------------===//
48 /// Definitions of all of the base types for the Type system. Based on this
49 /// value, you can cast to a class defined in DerivedTypes.h.
50 /// Note: If you add an element to this, you need to add an element to the
51 /// Type::getPrimitiveType function, or else things will break!
52 /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
53 ///
54 enum TypeID {
55 // PrimitiveTypes
56 HalfTyID = 0, ///< 16-bit floating point type
57 BFloatTyID, ///< 16-bit floating point type (7-bit significand)
58 FloatTyID, ///< 32-bit floating point type
59 DoubleTyID, ///< 64-bit floating point type
60 X86_FP80TyID, ///< 80-bit floating point type (X87)
61 FP128TyID, ///< 128-bit floating point type (112-bit significand)
62 PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
63 VoidTyID, ///< type with no size
64 LabelTyID, ///< Labels
65 MetadataTyID, ///< Metadata
66 X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
67 X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific)
68 TokenTyID, ///< Tokens
69
70 // Derived types... see DerivedTypes.h file.
71 IntegerTyID, ///< Arbitrary bit width integers
72 FunctionTyID, ///< Functions
73 PointerTyID, ///< Pointers
74 StructTyID, ///< Structures
75 ArrayTyID, ///< Arrays
76 FixedVectorTyID, ///< Fixed width SIMD vector type
77 ScalableVectorTyID ///< Scalable SIMD vector type
78 };
79
80private:
81 /// This refers to the LLVMContext in which this type was uniqued.
82 LLVMContext &Context;
83
84 TypeID ID : 8; // The current base type of this type.
85 unsigned SubclassData : 24; // Space for subclasses to store data.
86 // Note that this should be synchronized with
87 // MAX_INT_BITS value in IntegerType class.
88
89protected:
90 friend class LLVMContextImpl;
91
92 explicit Type(LLVMContext &C, TypeID tid)
93 : Context(C), ID(tid), SubclassData(0) {}
94 ~Type() = default;
95
96 unsigned getSubclassData() const { return SubclassData; }
97
98 void setSubclassData(unsigned val) {
99 SubclassData = val;
100 // Ensure we don't have any accidental truncation.
101 assert(getSubclassData() == val && "Subclass data too large for field")(static_cast<void> (0));
102 }
103
104 /// Keeps track of how many Type*'s there are in the ContainedTys list.
105 unsigned NumContainedTys = 0;
106
107 /// A pointer to the array of Types contained by this Type. For example, this
108 /// includes the arguments of a function type, the elements of a structure,
109 /// the pointee of a pointer, the element type of an array, etc. This pointer
110 /// may be 0 for types that don't contain other types (Integer, Double,
111 /// Float).
112 Type * const *ContainedTys = nullptr;
113
114public:
115 /// Print the current type.
116 /// Omit the type details if \p NoDetails == true.
117 /// E.g., let %st = type { i32, i16 }
118 /// When \p NoDetails is true, we only print %st.
119 /// Put differently, \p NoDetails prints the type as if
120 /// inlined with the operands when printing an instruction.
121 void print(raw_ostream &O, bool IsForDebug = false,
122 bool NoDetails = false) const;
123
124 void dump() const;
125
126 /// Return the LLVMContext in which this type was uniqued.
127 LLVMContext &getContext() const { return Context; }
128
129 //===--------------------------------------------------------------------===//
130 // Accessors for working with types.
131 //
132
133 /// Return the type id for the type. This will return one of the TypeID enum
134 /// elements defined above.
135 TypeID getTypeID() const { return ID; }
136
137 /// Return true if this is 'void'.
138 bool isVoidTy() const { return getTypeID() == VoidTyID; }
139
140 /// Return true if this is 'half', a 16-bit IEEE fp type.
141 bool isHalfTy() const { return getTypeID() == HalfTyID; }
142
143 /// Return true if this is 'bfloat', a 16-bit bfloat type.
144 bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
145
146 /// Return true if this is 'float', a 32-bit IEEE fp type.
147 bool isFloatTy() const { return getTypeID() == FloatTyID; }
148
149 /// Return true if this is 'double', a 64-bit IEEE fp type.
150 bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
151
152 /// Return true if this is x86 long double.
153 bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
154
155 /// Return true if this is 'fp128'.
156 bool isFP128Ty() const { return getTypeID() == FP128TyID; }
157
158 /// Return true if this is powerpc long double.
159 bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
160
161 /// Return true if this is one of the six floating-point types
162 bool isFloatingPointTy() const {
163 return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
164 getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
165 getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
166 getTypeID() == PPC_FP128TyID;
167 }
168
169 const fltSemantics &getFltSemantics() const {
170 switch (getTypeID()) {
171 case HalfTyID: return APFloat::IEEEhalf();
172 case BFloatTyID: return APFloat::BFloat();
173 case FloatTyID: return APFloat::IEEEsingle();
174 case DoubleTyID: return APFloat::IEEEdouble();
175 case X86_FP80TyID: return APFloat::x87DoubleExtended();
176 case FP128TyID: return APFloat::IEEEquad();
177 case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
178 default: llvm_unreachable("Invalid floating type")__builtin_unreachable();
179 }
180 }
181
182 /// Return true if this is X86 MMX.
183 bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
184
185 /// Return true if this is X86 AMX.
186 bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }
187
188 /// Return true if this is a FP type or a vector of FP.
189 bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
190
191 /// Return true if this is 'label'.
192 bool isLabelTy() const { return getTypeID() == LabelTyID; }
193
194 /// Return true if this is 'metadata'.
195 bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
196
197 /// Return true if this is 'token'.
198 bool isTokenTy() const { return getTypeID() == TokenTyID; }
199
200 /// True if this is an instance of IntegerType.
201 bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
24
Assuming the condition is true
25
Returning the value 1, which participates in a condition later
202
203 /// Return true if this is an IntegerType of the given width.
204 bool isIntegerTy(unsigned Bitwidth) const;
205
206 /// Return true if this is an integer type or a vector of integer types.
207 bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
208
209 /// Return true if this is an integer type or a vector of integer types of
210 /// the given width.
211 bool isIntOrIntVectorTy(unsigned BitWidth) const {
212 return getScalarType()->isIntegerTy(BitWidth);
213 }
214
215 /// Return true if this is an integer type or a pointer type.
216 bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }
217
218 /// True if this is an instance of FunctionType.
219 bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
220
221 /// True if this is an instance of StructType.
222 bool isStructTy() const { return getTypeID() == StructTyID; }
223
224 /// True if this is an instance of ArrayType.
225 bool isArrayTy() const { return getTypeID() == ArrayTyID; }
226
227 /// True if this is an instance of PointerType.
228 bool isPointerTy() const { return getTypeID() == PointerTyID; }
229
230 /// True if this is an instance of an opaque PointerType.
231 bool isOpaquePointerTy() const;
232
233 /// Return true if this is a pointer type or a vector of pointer types.
234 bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
235
236 /// True if this is an instance of VectorType.
237 inline bool isVectorTy() const {
238 return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
239 }
240
241 /// Return true if this type could be converted with a lossless BitCast to
242 /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
243 /// same size only where no re-interpretation of the bits is done.
244 /// Determine if this type could be losslessly bitcast to Ty
245 bool canLosslesslyBitCastTo(Type *Ty) const;
246
247 /// Return true if this type is empty, that is, it has no elements or all of
248 /// its elements are empty.
249 bool isEmptyTy() const;
250
251 /// Return true if the type is "first class", meaning it is a valid type for a
252 /// Value.
253 bool isFirstClassType() const {
254 return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
255 }
256
257 /// Return true if the type is a valid type for a register in codegen. This
258 /// includes all first-class types except struct and array types.
259 bool isSingleValueType() const {
260 return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
261 isPointerTy() || isVectorTy() || isX86_AMXTy();
262 }
263
264 /// Return true if the type is an aggregate type. This means it is valid as
265 /// the first operand of an insertvalue or extractvalue instruction. This
266 /// includes struct and array types, but does not include vector types.
267 bool isAggregateType() const {
268 return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
269 }
270
271 /// Return true if it makes sense to take the size of this type. To get the
272 /// actual size for a particular target, it is reasonable to use the
273 /// DataLayout subsystem to do this.
274 bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
275 // If it's a primitive, it is always sized.
276 if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
277 getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
278 getTypeID() == X86_AMXTyID)
279 return true;
280 // If it is not something that can have a size (e.g. a function or label),
281 // it doesn't have a size.
282 if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
283 return false;
284 // Otherwise we have to try harder to decide.
285 return isSizedDerivedType(Visited);
286 }
287
288 /// Return the basic size of this type if it is a primitive type. These are
289 /// fixed by LLVM and are not target-dependent.
290 /// This will return zero if the type does not have a size or is not a
291 /// primitive type.
292 ///
293 /// If this is a scalable vector type, the scalable property will be set and
294 /// the runtime size will be a positive integer multiple of the base size.
295 ///
296 /// Note that this may not reflect the size of memory allocated for an
297 /// instance of the type or the number of bytes that are written when an
298 /// instance of the type is stored to memory. The DataLayout class provides
299 /// additional query functions to provide this information.
300 ///
301 TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__));
302
303 /// If this is a vector type, return the getPrimitiveSizeInBits value for the
304 /// element type. Otherwise return the getPrimitiveSizeInBits value for this
305 /// type.
306 unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__));
307
308 /// Return the width of the mantissa of this type. This is only valid on
309 /// floating-point types. If the FP type does not have a stable mantissa (e.g.
310 /// ppc long double), this method returns -1.
311 int getFPMantissaWidth() const;
312
313 /// Return whether the type is IEEE compatible, as defined by the eponymous
314 /// method in APFloat.
315 bool isIEEE() const { return APFloat::getZero(getFltSemantics()).isIEEE(); }
316
317 /// If this is a vector type, return the element type, otherwise return
318 /// 'this'.
319 inline Type *getScalarType() const {
320 if (isVectorTy())
321 return getContainedType(0);
322 return const_cast<Type *>(this);
323 }
324
325 //===--------------------------------------------------------------------===//
326 // Type Iteration support.
327 //
328 using subtype_iterator = Type * const *;
329
330 subtype_iterator subtype_begin() const { return ContainedTys; }
331 subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
332 ArrayRef<Type*> subtypes() const {
333 return makeArrayRef(subtype_begin(), subtype_end());
334 }
335
336 using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
337
338 subtype_reverse_iterator subtype_rbegin() const {
339 return subtype_reverse_iterator(subtype_end());
340 }
341 subtype_reverse_iterator subtype_rend() const {
342 return subtype_reverse_iterator(subtype_begin());
343 }
344
345 /// This method is used to implement the type iterator (defined at the end of
346 /// the file). For derived types, this returns the types 'contained' in the
347 /// derived type.
348 Type *getContainedType(unsigned i) const {
349 assert(i < NumContainedTys && "Index out of range!")(static_cast<void> (0));
350 return ContainedTys[i];
351 }
352
353 /// Return the number of types in the derived type.
354 unsigned getNumContainedTypes() const { return NumContainedTys; }
355
356 //===--------------------------------------------------------------------===//
357 // Helper methods corresponding to subclass methods. This forces a cast to
358 // the specified subclass and calls its accessor. "getArrayNumElements" (for
359 // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
360 // only intended to cover the core methods that are frequently used, helper
361 // methods should not be added here.
362
363 inline unsigned getIntegerBitWidth() const;
364
365 inline Type *getFunctionParamType(unsigned i) const;
366 inline unsigned getFunctionNumParams() const;
367 inline bool isFunctionVarArg() const;
368
369 inline StringRef getStructName() const;
370 inline unsigned getStructNumElements() const;
371 inline Type *getStructElementType(unsigned N) const;
372
373 inline uint64_t getArrayNumElements() const;
374
375 Type *getArrayElementType() const {
376 assert(getTypeID() == ArrayTyID)(static_cast<void> (0));
377 return ContainedTys[0];
378 }
379
380 Type *getPointerElementType() const {
381 assert(getTypeID() == PointerTyID)(static_cast<void> (0));
382 return ContainedTys[0];
383 }
384
385 /// Given vector type, change the element type,
386 /// whilst keeping the old number of elements.
387 /// For non-vectors simply returns \p EltTy.
388 inline Type *getWithNewType(Type *EltTy) const;
389
390 /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
391 /// whilst keeping the old number of lanes.
392 inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
393
394 /// Given scalar/vector integer type, returns a type with elements twice as
395 /// wide as in the original type. For vectors, preserves element count.
396 inline Type *getExtendedType() const;
397
398 /// Get the address space of this pointer or pointer vector type.
399 inline unsigned getPointerAddressSpace() const;
400
401 //===--------------------------------------------------------------------===//
402 // Static members exported by the Type class itself. Useful for getting
403 // instances of Type.
404 //
405
406 /// Return a type based on an identifier.
407 static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
408
409 //===--------------------------------------------------------------------===//
410 // These are the builtin types that are always available.
411 //
412 static Type *getVoidTy(LLVMContext &C);
413 static Type *getLabelTy(LLVMContext &C);
414 static Type *getHalfTy(LLVMContext &C);
415 static Type *getBFloatTy(LLVMContext &C);
416 static Type *getFloatTy(LLVMContext &C);
417 static Type *getDoubleTy(LLVMContext &C);
418 static Type *getMetadataTy(LLVMContext &C);
419 static Type *getX86_FP80Ty(LLVMContext &C);
420 static Type *getFP128Ty(LLVMContext &C);
421 static Type *getPPC_FP128Ty(LLVMContext &C);
422 static Type *getX86_MMXTy(LLVMContext &C);
423 static Type *getX86_AMXTy(LLVMContext &C);
424 static Type *getTokenTy(LLVMContext &C);
425 static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
426 static IntegerType *getInt1Ty(LLVMContext &C);
427 static IntegerType *getInt8Ty(LLVMContext &C);
428 static IntegerType *getInt16Ty(LLVMContext &C);
429 static IntegerType *getInt32Ty(LLVMContext &C);
430 static IntegerType *getInt64Ty(LLVMContext &C);
431 static IntegerType *getInt128Ty(LLVMContext &C);
432 template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
433 int noOfBits = sizeof(ScalarTy) * CHAR_BIT8;
434 if (std::is_integral<ScalarTy>::value) {
435 return (Type*) Type::getIntNTy(C, noOfBits);
436 } else if (std::is_floating_point<ScalarTy>::value) {
437 switch (noOfBits) {
438 case 32:
439 return Type::getFloatTy(C);
440 case 64:
441 return Type::getDoubleTy(C);
442 }
443 }
444 llvm_unreachable("Unsupported type in Type::getScalarTy")__builtin_unreachable();
445 }
446 static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S) {
447 Type *Ty;
448 if (&S == &APFloat::IEEEhalf())
449 Ty = Type::getHalfTy(C);
450 else if (&S == &APFloat::BFloat())
451 Ty = Type::getBFloatTy(C);
452 else if (&S == &APFloat::IEEEsingle())
453 Ty = Type::getFloatTy(C);
454 else if (&S == &APFloat::IEEEdouble())
455 Ty = Type::getDoubleTy(C);
456 else if (&S == &APFloat::x87DoubleExtended())
457 Ty = Type::getX86_FP80Ty(C);
458 else if (&S == &APFloat::IEEEquad())
459 Ty = Type::getFP128Ty(C);
460 else {
461 assert(&S == &APFloat::PPCDoubleDouble() && "Unknown FP format")(static_cast<void> (0));
462 Ty = Type::getPPC_FP128Ty(C);
463 }
464 return Ty;
465 }
466
467 //===--------------------------------------------------------------------===//
468 // Convenience methods for getting pointer types with one of the above builtin
469 // types as pointee.
470 //
471 static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
472 static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
473 static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
474 static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
475 static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
476 static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
477 static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
478 static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
479 static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
480 static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
481 static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
482 static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
483 static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
484 static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
485 static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
486
487 /// Return a pointer to the current type. This is equivalent to
488 /// PointerType::get(Foo, AddrSpace).
489 /// TODO: Remove this after opaque pointer transition is complete.
490 PointerType *getPointerTo(unsigned AddrSpace = 0) const;
491
492private:
493 /// Derived types like structures and arrays are sized iff all of the members
494 /// of the type are sized as well. Since asking for their size is relatively
495 /// uncommon, move this operation out-of-line.
496 bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
497};
498
499// Printing of types.
500inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
501 T.print(OS);
502 return OS;
503}
504
505// allow isa<PointerType>(x) to work without DerivedTypes.h included.
506template <> struct isa_impl<PointerType, Type> {
507 static inline bool doit(const Type &Ty) {
508 return Ty.getTypeID() == Type::PointerTyID;
509 }
510};
511
512// Create wrappers for C Binding types (see CBindingWrapping.h).
513DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast<
Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return
reinterpret_cast<LLVMTypeRef>(const_cast<Type*>(
P)); } template<typename T> inline T *unwrap(LLVMTypeRef
P) { return cast<T>(unwrap(P)); }
514
515/* Specialized opaque type conversions.
516 */
517inline Type **unwrap(LLVMTypeRef* Tys) {
518 return reinterpret_cast<Type**>(Tys);
519}
520
521inline LLVMTypeRef *wrap(Type **Tys) {
522 return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
523}
524
525} // end namespace llvm
526
527#endif // LLVM_IR_TYPE_H