Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include/llvm/IR/Instructions.h
Warning:line 2720, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name IndirectBrExpandPass.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/CodeGen -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

1//===- IndirectBrExpandPass.cpp - Expand indirectbr to switch -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Implements an expansion pass to turn `indirectbr` instructions in the IR
11/// into `switch` instructions. This works by enumerating the basic blocks in
12/// a dense range of integers, replacing each `blockaddr` constant with the
13/// corresponding integer constant, and then building a switch that maps from
14/// the integers to the actual blocks. All of the indirectbr instructions in the
15/// function are redirected to this common switch.
16///
17/// While this is generically useful if a target is unable to codegen
18/// `indirectbr` natively, it is primarily useful when there is some desire to
19/// get the builtin non-jump-table lowering of a switch even when the input
20/// source contained an explicit indirect branch construct.
21///
22/// Note that it doesn't make any sense to enable this pass unless a target also
23/// disables jump-table lowering of switches. Doing that is likely to pessimize
24/// the code.
25///
26//===----------------------------------------------------------------------===//
27
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Sequence.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Analysis/DomTreeUpdater.h"
32#include "llvm/CodeGen/TargetPassConfig.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/InitializePasses.h"
40#include "llvm/Pass.h"
41#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Target/TargetMachine.h"
43
44using namespace llvm;
45
46#define DEBUG_TYPE"indirectbr-expand" "indirectbr-expand"
47
48namespace {
49
50class IndirectBrExpandPass : public FunctionPass {
51 const TargetLowering *TLI = nullptr;
52
53public:
54 static char ID; // Pass identification, replacement for typeid
55
56 IndirectBrExpandPass() : FunctionPass(ID) {
57 initializeIndirectBrExpandPassPass(*PassRegistry::getPassRegistry());
58 }
59
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 AU.addPreserved<DominatorTreeWrapperPass>();
62 }
63
64 bool runOnFunction(Function &F) override;
65};
66
67} // end anonymous namespace
68
69char IndirectBrExpandPass::ID = 0;
70
71INITIALIZE_PASS_BEGIN(IndirectBrExpandPass, DEBUG_TYPE,static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
72 "Expand indirectbr instructions", false, false)static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
73INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
74INITIALIZE_PASS_END(IndirectBrExpandPass, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
75 "Expand indirectbr instructions", false, false)PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
76
77FunctionPass *llvm::createIndirectBrExpandPass() {
78 return new IndirectBrExpandPass();
79}
80
81bool IndirectBrExpandPass::runOnFunction(Function &F) {
82 auto &DL = F.getParent()->getDataLayout();
83 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1
Calling 'Pass::getAnalysisIfAvailable'
7
Returning from 'Pass::getAnalysisIfAvailable'
84 if (!TPC)
8
Assuming 'TPC' is non-null
9
Taking false branch
85 return false;
86
87 auto &TM = TPC->getTM<TargetMachine>();
88 auto &STI = *TM.getSubtargetImpl(F);
89 if (!STI.enableIndirectBrExpand())
10
Assuming the condition is false
11
Taking false branch
90 return false;
91 TLI = STI.getTargetLowering();
92
93 Optional<DomTreeUpdater> DTU;
94 if (auto *DTWP
11.1
'DTWP' is null
11.1
'DTWP' is null
11.1
'DTWP' is null
= getAnalysisIfAvailable<DominatorTreeWrapperPass>())
12
Taking false branch
95 DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
96
97 SmallVector<IndirectBrInst *, 1> IndirectBrs;
98
99 // Set of all potential successors for indirectbr instructions.
100 SmallPtrSet<BasicBlock *, 4> IndirectBrSuccs;
101
102 // Build a list of indirectbrs that we want to rewrite.
103 for (BasicBlock &BB : F)
104 if (auto *IBr = dyn_cast<IndirectBrInst>(BB.getTerminator())) {
105 // Handle the degenerate case of no successors by replacing the indirectbr
106 // with unreachable as there is no successor available.
107 if (IBr->getNumSuccessors() == 0) {
108 (void)new UnreachableInst(F.getContext(), IBr);
109 IBr->eraseFromParent();
110 continue;
111 }
112
113 IndirectBrs.push_back(IBr);
114 for (BasicBlock *SuccBB : IBr->successors())
115 IndirectBrSuccs.insert(SuccBB);
116 }
117
118 if (IndirectBrs.empty())
13
Taking false branch
119 return false;
120
121 // If we need to replace any indirectbrs we need to establish integer
122 // constants that will correspond to each of the basic blocks in the function
123 // whose address escapes. We do that here and rewrite all the blockaddress
124 // constants to just be those integer constants cast to a pointer type.
125 SmallVector<BasicBlock *, 4> BBs;
126
127 for (BasicBlock &BB : F) {
128 // Skip blocks that aren't successors to an indirectbr we're going to
129 // rewrite.
130 if (!IndirectBrSuccs.count(&BB))
131 continue;
132
133 auto IsBlockAddressUse = [&](const Use &U) {
134 return isa<BlockAddress>(U.getUser());
135 };
136 auto BlockAddressUseIt = llvm::find_if(BB.uses(), IsBlockAddressUse);
137 if (BlockAddressUseIt == BB.use_end())
138 continue;
139
140 assert(std::find_if(std::next(BlockAddressUseIt), BB.use_end(),(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 143, __extension__
__PRETTY_FUNCTION__))
141 IsBlockAddressUse) == BB.use_end() &&(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 143, __extension__
__PRETTY_FUNCTION__))
142 "There should only ever be a single blockaddress use because it is "(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 143, __extension__
__PRETTY_FUNCTION__))
143 "a constant and should be uniqued.")(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 143, __extension__
__PRETTY_FUNCTION__))
;
144
145 auto *BA = cast<BlockAddress>(BlockAddressUseIt->getUser());
146
147 // Skip if the constant was formed but ended up not being used (due to DCE
148 // or whatever).
149 if (!BA->isConstantUsed())
150 continue;
151
152 // Compute the index we want to use for this basic block. We can't use zero
153 // because null can be compared with block addresses.
154 int BBIndex = BBs.size() + 1;
155 BBs.push_back(&BB);
156
157 auto *ITy = cast<IntegerType>(DL.getIntPtrType(BA->getType()));
158 ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex);
159
160 // Now rewrite the blockaddress to an integer constant based on the index.
161 // FIXME: This part doesn't properly recognize other uses of blockaddress
162 // expressions, for instance, where they are used to pass labels to
163 // asm-goto. This part of the pass needs a rework.
164 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType()));
165 }
166
167 if (BBs.empty()) {
14
Taking false branch
168 // There are no blocks whose address is taken, so any indirectbr instruction
169 // cannot get a valid input and we can replace all of them with unreachable.
170 SmallVector<DominatorTree::UpdateType, 8> Updates;
171 if (DTU)
172 Updates.reserve(IndirectBrSuccs.size());
173 for (auto *IBr : IndirectBrs) {
174 if (DTU) {
175 for (BasicBlock *SuccBB : IBr->successors())
176 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
177 }
178 (void)new UnreachableInst(F.getContext(), IBr);
179 IBr->eraseFromParent();
180 }
181 if (DTU) {
182 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 183, __extension__
__PRETTY_FUNCTION__))
183 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 183, __extension__
__PRETTY_FUNCTION__))
;
184 DTU->applyUpdates(Updates);
185 }
186 return true;
187 }
188
189 BasicBlock *SwitchBB;
190 Value *SwitchValue;
191
192 // Compute a common integer type across all the indirectbr instructions.
193 IntegerType *CommonITy = nullptr;
15
'CommonITy' initialized to a null pointer value
194 for (auto *IBr : IndirectBrs) {
16
Assuming '__begin1' is equal to '__end1'
195 auto *ITy =
196 cast<IntegerType>(DL.getIntPtrType(IBr->getAddress()->getType()));
197 if (!CommonITy || ITy->getBitWidth() > CommonITy->getBitWidth())
198 CommonITy = ITy;
199 }
200
201 auto GetSwitchValue = [DL, CommonITy](IndirectBrInst *IBr) {
202 return CastInst::CreatePointerCast(
203 IBr->getAddress(), CommonITy,
204 Twine(IBr->getAddress()->getName()) + ".switch_cast", IBr);
205 };
206
207 SmallVector<DominatorTree::UpdateType, 8> Updates;
208
209 if (IndirectBrs.size() == 1) {
17
Assuming the condition is false
18
Taking false branch
210 // If we only have one indirectbr, we can just directly replace it within
211 // its block.
212 IndirectBrInst *IBr = IndirectBrs[0];
213 SwitchBB = IBr->getParent();
214 SwitchValue = GetSwitchValue(IBr);
215 if (DTU) {
216 Updates.reserve(IndirectBrSuccs.size());
217 for (BasicBlock *SuccBB : IBr->successors())
218 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
219 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 220, __extension__
__PRETTY_FUNCTION__))
220 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 220, __extension__
__PRETTY_FUNCTION__))
;
221 }
222 IBr->eraseFromParent();
223 } else {
224 // Otherwise we need to create a new block to hold the switch across BBs,
225 // jump to that block instead of each indirectbr, and phi together the
226 // values for the switch.
227 SwitchBB = BasicBlock::Create(F.getContext(), "switch_bb", &F);
228 auto *SwitchPN = PHINode::Create(CommonITy, IndirectBrs.size(),
19
Passing null pointer value via 1st parameter 'Ty'
20
Calling 'PHINode::Create'
229 "switch_value_phi", SwitchBB);
230 SwitchValue = SwitchPN;
231
232 // Now replace the indirectbr instructions with direct branches to the
233 // switch block and fill out the PHI operands.
234 if (DTU)
235 Updates.reserve(IndirectBrs.size() + 2 * IndirectBrSuccs.size());
236 for (auto *IBr : IndirectBrs) {
237 SwitchPN->addIncoming(GetSwitchValue(IBr), IBr->getParent());
238 BranchInst::Create(SwitchBB, IBr);
239 if (DTU) {
240 Updates.push_back({DominatorTree::Insert, IBr->getParent(), SwitchBB});
241 for (BasicBlock *SuccBB : IBr->successors())
242 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
243 }
244 IBr->eraseFromParent();
245 }
246 }
247
248 // Now build the switch in the block. The block will have no terminator
249 // already.
250 auto *SI = SwitchInst::Create(SwitchValue, BBs[0], BBs.size(), SwitchBB);
251
252 // Add a case for each block.
253 for (int i : llvm::seq<int>(1, BBs.size()))
254 SI->addCase(ConstantInt::get(CommonITy, i + 1), BBs[i]);
255
256 if (DTU) {
257 // If there were multiple indirectbr's, they may have common successors,
258 // but in the dominator tree, we only track unique edges.
259 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
260 Updates.reserve(Updates.size() + BBs.size());
261 for (BasicBlock *BB : BBs) {
262 if (UniqueSuccessors.insert(BB).second)
263 Updates.push_back({DominatorTree::Insert, SwitchBB, BB});
264 }
265 DTU->applyUpdates(Updates);
266 }
267
268 return true;
269}

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include/llvm/PassAnalysisSupport.h

1//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines stuff that is used to define and "use" Analysis Passes.
10// This file is automatically #included by Pass.h, so:
11//
12// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
13//
14// Instead, #include Pass.h
15//
16//===----------------------------------------------------------------------===//
17
18#if !defined(LLVM_PASS_H) || defined(LLVM_PASSANALYSISSUPPORT_H)
19#error "Do not include <PassAnalysisSupport.h>; include <Pass.h> instead"
20#endif
21
22#ifndef LLVM_PASSANALYSISSUPPORT_H
23#define LLVM_PASSANALYSISSUPPORT_H
24
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallVector.h"
27#include <cassert>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32namespace llvm {
33
34class Function;
35class Pass;
36class PMDataManager;
37class StringRef;
38
39//===----------------------------------------------------------------------===//
40/// Represent the analysis usage information of a pass. This tracks analyses
41/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
42/// TRANSITIVE (must be available throughout the lifetime of the pass), and
43/// analyses that the pass PRESERVES (the pass does not invalidate the results
44/// of these analyses). This information is provided by a pass to the Pass
45/// infrastructure through the getAnalysisUsage virtual function.
46///
47class AnalysisUsage {
48public:
49 using VectorType = SmallVectorImpl<AnalysisID>;
50
51private:
52 /// Sets of analyses required and preserved by a pass
53 // TODO: It's not clear that SmallVector is an appropriate data structure for
54 // this usecase. The sizes were picked to minimize wasted space, but are
55 // otherwise fairly meaningless.
56 SmallVector<AnalysisID, 8> Required;
57 SmallVector<AnalysisID, 2> RequiredTransitive;
58 SmallVector<AnalysisID, 2> Preserved;
59 SmallVector<AnalysisID, 0> Used;
60 bool PreservesAll = false;
61
62 void pushUnique(VectorType &Set, AnalysisID ID) {
63 if (!llvm::is_contained(Set, ID))
64 Set.push_back(ID);
65 }
66
67public:
68 AnalysisUsage() = default;
69
70 ///@{
71 /// Add the specified ID to the required set of the usage info for a pass.
72 AnalysisUsage &addRequiredID(const void *ID);
73 AnalysisUsage &addRequiredID(char &ID);
74 template<class PassClass>
75 AnalysisUsage &addRequired() {
76 return addRequiredID(PassClass::ID);
77 }
78
79 AnalysisUsage &addRequiredTransitiveID(char &ID);
80 template<class PassClass>
81 AnalysisUsage &addRequiredTransitive() {
82 return addRequiredTransitiveID(PassClass::ID);
83 }
84 ///@}
85
86 ///@{
87 /// Add the specified ID to the set of analyses preserved by this pass.
88 AnalysisUsage &addPreservedID(const void *ID) {
89 pushUnique(Preserved, ID);
90 return *this;
91 }
92 AnalysisUsage &addPreservedID(char &ID) {
93 pushUnique(Preserved, &ID);
94 return *this;
95 }
96 /// Add the specified Pass class to the set of analyses preserved by this pass.
97 template<class PassClass>
98 AnalysisUsage &addPreserved() {
99 pushUnique(Preserved, &PassClass::ID);
100 return *this;
101 }
102 ///@}
103
104 ///@{
105 /// Add the specified ID to the set of analyses used by this pass if they are
106 /// available..
107 AnalysisUsage &addUsedIfAvailableID(const void *ID) {
108 pushUnique(Used, ID);
109 return *this;
110 }
111 AnalysisUsage &addUsedIfAvailableID(char &ID) {
112 pushUnique(Used, &ID);
113 return *this;
114 }
115 /// Add the specified Pass class to the set of analyses used by this pass.
116 template<class PassClass>
117 AnalysisUsage &addUsedIfAvailable() {
118 pushUnique(Used, &PassClass::ID);
119 return *this;
120 }
121 ///@}
122
123 /// Add the Pass with the specified argument string to the set of analyses
124 /// preserved by this pass. If no such Pass exists, do nothing. This can be
125 /// useful when a pass is trivially preserved, but may not be linked in. Be
126 /// careful about spelling!
127 AnalysisUsage &addPreserved(StringRef Arg);
128
129 /// Set by analyses that do not transform their input at all
130 void setPreservesAll() { PreservesAll = true; }
131
132 /// Determine whether a pass said it does not transform its input at all
133 bool getPreservesAll() const { return PreservesAll; }
134
135 /// This function should be called by the pass, iff they do not:
136 ///
137 /// 1. Add or remove basic blocks from the function
138 /// 2. Modify terminator instructions in any way.
139 ///
140 /// This function annotates the AnalysisUsage info object to say that analyses
141 /// that only depend on the CFG are preserved by this pass.
142 void setPreservesCFG();
143
144 const VectorType &getRequiredSet() const { return Required; }
145 const VectorType &getRequiredTransitiveSet() const {
146 return RequiredTransitive;
147 }
148 const VectorType &getPreservedSet() const { return Preserved; }
149 const VectorType &getUsedSet() const { return Used; }
150};
151
152//===----------------------------------------------------------------------===//
153/// AnalysisResolver - Simple interface used by Pass objects to pull all
154/// analysis information out of pass manager that is responsible to manage
155/// the pass.
156///
157class AnalysisResolver {
158public:
159 AnalysisResolver() = delete;
160 explicit AnalysisResolver(PMDataManager &P) : PM(P) {}
161
162 PMDataManager &getPMDataManager() { return PM; }
163
164 /// Find pass that is implementing PI.
165 Pass *findImplPass(AnalysisID PI) {
166 Pass *ResultPass = nullptr;
167 for (const auto &AnalysisImpl : AnalysisImpls) {
168 if (AnalysisImpl.first == PI) {
169 ResultPass = AnalysisImpl.second;
170 break;
171 }
172 }
173 return ResultPass;
174 }
175
176 /// Find pass that is implementing PI. Initialize pass for Function F.
177 std::tuple<Pass *, bool> findImplPass(Pass *P, AnalysisID PI, Function &F);
178
179 void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
180 if (findImplPass(PI) == P)
181 return;
182 std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
183 AnalysisImpls.push_back(pir);
184 }
185
186 /// Clear cache that is used to connect a pass to the analysis (PassInfo).
187 void clearAnalysisImpls() {
188 AnalysisImpls.clear();
189 }
190
191 /// Return analysis result or null if it doesn't exist.
192 Pass *getAnalysisIfAvailable(AnalysisID ID) const;
193
194private:
195 /// This keeps track of which passes implements the interfaces that are
196 /// required by the current pass (to implement getAnalysis()).
197 std::vector<std::pair<AnalysisID, Pass *>> AnalysisImpls;
198
199 /// PassManager that is used to resolve analysis info
200 PMDataManager &PM;
201};
202
203/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
204/// get analysis information that might be around, for example to update it.
205/// This is different than getAnalysis in that it can fail (if the analysis
206/// results haven't been computed), so should only be used if you can handle
207/// the case when the analysis is not available. This method is often used by
208/// transformation APIs to update analysis results for a pass automatically as
209/// the transform is performed.
210template<typename AnalysisType>
211AnalysisType *Pass::getAnalysisIfAvailable() const {
212 assert(Resolver && "Pass not resident in a PassManager object!")(static_cast <bool> (Resolver && "Pass not resident in a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass not resident in a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 212, __extension__
__PRETTY_FUNCTION__))
;
2
Assuming field 'Resolver' is non-null
3
'?' condition is true
213
214 const void *PI = &AnalysisType::ID;
215
216 Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI);
217 if (!ResultPass) return nullptr;
4
Assuming 'ResultPass' is non-null
5
Taking false branch
218
219 // Because the AnalysisType may not be a subclass of pass (for
220 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
221 // adjust the return pointer (because the class may multiply inherit, once
222 // from pass, once from AnalysisType).
223 return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
6
Returning pointer, which participates in a condition later
224}
225
226/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
227/// to the analysis information that they claim to use by overriding the
228/// getAnalysisUsage function.
229template<typename AnalysisType>
230AnalysisType &Pass::getAnalysis() const {
231 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 231, __extension__
__PRETTY_FUNCTION__))
;
232 return getAnalysisID<AnalysisType>(&AnalysisType::ID);
233}
234
235template<typename AnalysisType>
236AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
237 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 237, __extension__
__PRETTY_FUNCTION__))
;
238 assert(Resolver&&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver&&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver&&\"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 238, __extension__
__PRETTY_FUNCTION__))
;
239 // PI *must* appear in AnalysisImpls. Because the number of passes used
240 // should be a small number, we just do a linear search over a (dense)
241 // vector.
242 Pass *ResultPass = Resolver->findImplPass(PI);
243 assert(ResultPass &&(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
244 "getAnalysis*() called on an analysis that was not "(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
245 "'required' by pass!")(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
;
246
247 // Because the AnalysisType may not be a subclass of pass (for
248 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
249 // adjust the return pointer (because the class may multiply inherit, once
250 // from pass, once from AnalysisType).
251 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
252}
253
254/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
255/// to the analysis information that they claim to use by overriding the
256/// getAnalysisUsage function. If as part of the dependencies, an IR
257/// transformation is triggered (e.g. because the analysis requires
258/// BreakCriticalEdges), and Changed is non null, *Changed is updated.
259template <typename AnalysisType>
260AnalysisType &Pass::getAnalysis(Function &F, bool *Changed) {
261 assert(Resolver &&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver &&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver &&\"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 261, __extension__
__PRETTY_FUNCTION__))
;
262
263 return getAnalysisID<AnalysisType>(&AnalysisType::ID, F, Changed);
264}
265
266template <typename AnalysisType>
267AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F, bool *Changed) {
268 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 268, __extension__
__PRETTY_FUNCTION__))
;
269 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 269, __extension__
__PRETTY_FUNCTION__))
;
270 // PI *must* appear in AnalysisImpls. Because the number of passes used
271 // should be a small number, we just do a linear search over a (dense)
272 // vector.
273 Pass *ResultPass;
274 bool LocalChanged;
275 std::tie(ResultPass, LocalChanged) = Resolver->findImplPass(this, PI, F);
276
277 assert(ResultPass && "Unable to find requested analysis info")(static_cast <bool> (ResultPass && "Unable to find requested analysis info"
) ? void (0) : __assert_fail ("ResultPass && \"Unable to find requested analysis info\""
, "llvm/include/llvm/PassAnalysisSupport.h", 277, __extension__
__PRETTY_FUNCTION__))
;
278 if (Changed)
279 *Changed |= LocalChanged;
280 else
281 assert(!LocalChanged &&(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "llvm/include/llvm/PassAnalysisSupport.h", 282, __extension__
__PRETTY_FUNCTION__))
282 "A pass trigged a code update but the update status is lost")(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "llvm/include/llvm/PassAnalysisSupport.h", 282, __extension__
__PRETTY_FUNCTION__))
;
283
284 // Because the AnalysisType may not be a subclass of pass (for
285 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
286 // adjust the return pointer (because the class may multiply inherit, once
287 // from pass, once from AnalysisType).
288 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
289}
290
291} // end namespace llvm
292
293#endif // LLVM_PASSANALYSISSUPPORT_H

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/OperandTraits.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class BlockAddress;
48class ConstantInt;
49class DataLayout;
50class StringRef;
51class Type;
52class Value;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62 using AlignmentField = AlignmentBitfieldElementT<0>;
63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
66 SwiftErrorField>(),
67 "Bitfields must be contiguous");
68
69protected:
70 // Note: Instruction needs to be a friend here to call cloneImpl.
71 friend class Instruction;
72
73 AllocaInst *cloneImpl() const;
74
75public:
76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
77 const Twine &Name, Instruction *InsertBefore);
78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, BasicBlock *InsertAtEnd);
80
81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
82 Instruction *InsertBefore);
83 AllocaInst(Type *Ty, unsigned AddrSpace,
84 const Twine &Name, BasicBlock *InsertAtEnd);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", Instruction *InsertBefore = nullptr);
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name, BasicBlock *InsertAtEnd);
90
91 /// Return true if there is an allocation size parameter to the allocation
92 /// instruction that is not 1.
93 bool isArrayAllocation() const;
94
95 /// Get the number of elements allocated. For a simple allocation of a single
96 /// element, this will return a constant 1 value.
97 const Value *getArraySize() const { return getOperand(0); }
98 Value *getArraySize() { return getOperand(0); }
99
100 /// Overload to return most specific pointer type.
101 PointerType *getType() const {
102 return cast<PointerType>(Instruction::getType());
103 }
104
105 /// Return the address space for the allocation.
106 unsigned getAddressSpace() const {
107 return getType()->getAddressSpace();
108 }
109
110 /// Get allocation size in bits. Returns None if size can't be determined,
111 /// e.g. in case of a VLA.
112 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
113
114 /// Return the type that is being allocated by the instruction.
115 Type *getAllocatedType() const { return AllocatedType; }
116 /// for use only in special circumstances that need to generically
117 /// transform a whole instruction (eg: IR linking and vectorization).
118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
119
120 /// Return the alignment of the memory that is being allocated by the
121 /// instruction.
122 Align getAlign() const {
123 return Align(1ULL << getSubclassData<AlignmentField>());
124 }
125
126 void setAlignment(Align Align) {
127 setSubclassData<AlignmentField>(Log2(Align));
128 }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 Align getAlign() const {
218 return Align(1ULL << (getSubclassData<AlignmentField>()));
219 }
220
221 void setAlignment(Align Align) {
222 setSubclassData<AlignmentField>(Log2(Align));
223 }
224
225 /// Returns the ordering constraint of this load instruction.
226 AtomicOrdering getOrdering() const {
227 return getSubclassData<OrderingField>();
228 }
229 /// Sets the ordering constraint of this load instruction. May not be Release
230 /// or AcquireRelease.
231 void setOrdering(AtomicOrdering Ordering) {
232 setSubclassData<OrderingField>(Ordering);
233 }
234
235 /// Returns the synchronization scope ID of this load instruction.
236 SyncScope::ID getSyncScopeID() const {
237 return SSID;
238 }
239
240 /// Sets the synchronization scope ID of this load instruction.
241 void setSyncScopeID(SyncScope::ID SSID) {
242 this->SSID = SSID;
243 }
244
245 /// Sets the ordering constraint and the synchronization scope ID of this load
246 /// instruction.
247 void setAtomic(AtomicOrdering Ordering,
248 SyncScope::ID SSID = SyncScope::System) {
249 setOrdering(Ordering);
250 setSyncScopeID(SSID);
251 }
252
253 bool isSimple() const { return !isAtomic() && !isVolatile(); }
254
255 bool isUnordered() const {
256 return (getOrdering() == AtomicOrdering::NotAtomic ||
257 getOrdering() == AtomicOrdering::Unordered) &&
258 !isVolatile();
259 }
260
261 Value *getPointerOperand() { return getOperand(0); }
262 const Value *getPointerOperand() const { return getOperand(0); }
263 static unsigned getPointerOperandIndex() { return 0U; }
264 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
265
266 /// Returns the address space of the pointer operand.
267 unsigned getPointerAddressSpace() const {
268 return getPointerOperandType()->getPointerAddressSpace();
269 }
270
271 // Methods for support type inquiry through isa, cast, and dyn_cast:
272 static bool classof(const Instruction *I) {
273 return I->getOpcode() == Instruction::Load;
274 }
275 static bool classof(const Value *V) {
276 return isa<Instruction>(V) && classof(cast<Instruction>(V));
277 }
278
279private:
280 // Shadow Instruction::setInstructionSubclassData with a private forwarding
281 // method so that subclasses cannot accidentally use it.
282 template <typename Bitfield>
283 void setSubclassData(typename Bitfield::Type Value) {
284 Instruction::setSubclassData<Bitfield>(Value);
285 }
286
287 /// The synchronization scope ID of this load instruction. Not quite enough
288 /// room in SubClassData for everything, so synchronization scope ID gets its
289 /// own field.
290 SyncScope::ID SSID;
291};
292
293//===----------------------------------------------------------------------===//
294// StoreInst Class
295//===----------------------------------------------------------------------===//
296
297/// An instruction for storing to memory.
298class StoreInst : public Instruction {
299 using VolatileField = BoolBitfieldElementT<0>;
300 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
301 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
302 static_assert(
303 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
304 "Bitfields must be contiguous");
305
306 void AssertOK();
307
308protected:
309 // Note: Instruction needs to be a friend here to call cloneImpl.
310 friend class Instruction;
311
312 StoreInst *cloneImpl() const;
313
314public:
315 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
316 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
317 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
319 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
320 Instruction *InsertBefore = nullptr);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
322 BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
324 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
328
329 // allocate space for exactly two operands
330 void *operator new(size_t S) { return User::operator new(S, 2); }
331 void operator delete(void *Ptr) { User::operator delete(Ptr); }
332
333 /// Return true if this is a store to a volatile memory location.
334 bool isVolatile() const { return getSubclassData<VolatileField>(); }
335
336 /// Specify whether this is a volatile store or not.
337 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
338
339 /// Transparently provide more efficient getOperand methods.
340 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
341
342 Align getAlign() const {
343 return Align(1ULL << (getSubclassData<AlignmentField>()));
344 }
345
346 void setAlignment(Align Align) {
347 setSubclassData<AlignmentField>(Log2(Align));
348 }
349
350 /// Returns the ordering constraint of this store instruction.
351 AtomicOrdering getOrdering() const {
352 return getSubclassData<OrderingField>();
353 }
354
355 /// Sets the ordering constraint of this store instruction. May not be
356 /// Acquire or AcquireRelease.
357 void setOrdering(AtomicOrdering Ordering) {
358 setSubclassData<OrderingField>(Ordering);
359 }
360
361 /// Returns the synchronization scope ID of this store instruction.
362 SyncScope::ID getSyncScopeID() const {
363 return SSID;
364 }
365
366 /// Sets the synchronization scope ID of this store instruction.
367 void setSyncScopeID(SyncScope::ID SSID) {
368 this->SSID = SSID;
369 }
370
371 /// Sets the ordering constraint and the synchronization scope ID of this
372 /// store instruction.
373 void setAtomic(AtomicOrdering Ordering,
374 SyncScope::ID SSID = SyncScope::System) {
375 setOrdering(Ordering);
376 setSyncScopeID(SSID);
377 }
378
379 bool isSimple() const { return !isAtomic() && !isVolatile(); }
380
381 bool isUnordered() const {
382 return (getOrdering() == AtomicOrdering::NotAtomic ||
383 getOrdering() == AtomicOrdering::Unordered) &&
384 !isVolatile();
385 }
386
387 Value *getValueOperand() { return getOperand(0); }
388 const Value *getValueOperand() const { return getOperand(0); }
389
390 Value *getPointerOperand() { return getOperand(1); }
391 const Value *getPointerOperand() const { return getOperand(1); }
392 static unsigned getPointerOperandIndex() { return 1U; }
393 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
394
395 /// Returns the address space of the pointer operand.
396 unsigned getPointerAddressSpace() const {
397 return getPointerOperandType()->getPointerAddressSpace();
398 }
399
400 // Methods for support type inquiry through isa, cast, and dyn_cast:
401 static bool classof(const Instruction *I) {
402 return I->getOpcode() == Instruction::Store;
403 }
404 static bool classof(const Value *V) {
405 return isa<Instruction>(V) && classof(cast<Instruction>(V));
406 }
407
408private:
409 // Shadow Instruction::setInstructionSubclassData with a private forwarding
410 // method so that subclasses cannot accidentally use it.
411 template <typename Bitfield>
412 void setSubclassData(typename Bitfield::Type Value) {
413 Instruction::setSubclassData<Bitfield>(Value);
414 }
415
416 /// The synchronization scope ID of this store instruction. Not quite enough
417 /// room in SubClassData for everything, so synchronization scope ID gets its
418 /// own field.
419 SyncScope::ID SSID;
420};
421
422template <>
423struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
424};
425
426DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
427
428//===----------------------------------------------------------------------===//
429// FenceInst Class
430//===----------------------------------------------------------------------===//
431
432/// An instruction for ordering other memory operations.
433class FenceInst : public Instruction {
434 using OrderingField = AtomicOrderingBitfieldElementT<0>;
435
436 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
437
438protected:
439 // Note: Instruction needs to be a friend here to call cloneImpl.
440 friend class Instruction;
441
442 FenceInst *cloneImpl() const;
443
444public:
445 // Ordering may only be Acquire, Release, AcquireRelease, or
446 // SequentiallyConsistent.
447 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
448 SyncScope::ID SSID = SyncScope::System,
449 Instruction *InsertBefore = nullptr);
450 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
451 BasicBlock *InsertAtEnd);
452
453 // allocate space for exactly zero operands
454 void *operator new(size_t S) { return User::operator new(S, 0); }
455 void operator delete(void *Ptr) { User::operator delete(Ptr); }
456
457 /// Returns the ordering constraint of this fence instruction.
458 AtomicOrdering getOrdering() const {
459 return getSubclassData<OrderingField>();
460 }
461
462 /// Sets the ordering constraint of this fence instruction. May only be
463 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
464 void setOrdering(AtomicOrdering Ordering) {
465 setSubclassData<OrderingField>(Ordering);
466 }
467
468 /// Returns the synchronization scope ID of this fence instruction.
469 SyncScope::ID getSyncScopeID() const {
470 return SSID;
471 }
472
473 /// Sets the synchronization scope ID of this fence instruction.
474 void setSyncScopeID(SyncScope::ID SSID) {
475 this->SSID = SSID;
476 }
477
478 // Methods for support type inquiry through isa, cast, and dyn_cast:
479 static bool classof(const Instruction *I) {
480 return I->getOpcode() == Instruction::Fence;
481 }
482 static bool classof(const Value *V) {
483 return isa<Instruction>(V) && classof(cast<Instruction>(V));
484 }
485
486private:
487 // Shadow Instruction::setInstructionSubclassData with a private forwarding
488 // method so that subclasses cannot accidentally use it.
489 template <typename Bitfield>
490 void setSubclassData(typename Bitfield::Type Value) {
491 Instruction::setSubclassData<Bitfield>(Value);
492 }
493
494 /// The synchronization scope ID of this fence instruction. Not quite enough
495 /// room in SubClassData for everything, so synchronization scope ID gets its
496 /// own field.
497 SyncScope::ID SSID;
498};
499
500//===----------------------------------------------------------------------===//
501// AtomicCmpXchgInst Class
502//===----------------------------------------------------------------------===//
503
504/// An instruction that atomically checks whether a
505/// specified value is in a memory location, and, if it is, stores a new value
506/// there. The value returned by this instruction is a pair containing the
507/// original value as first element, and an i1 indicating success (true) or
508/// failure (false) as second element.
509///
510class AtomicCmpXchgInst : public Instruction {
511 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
512 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
513 SyncScope::ID SSID);
514
515 template <unsigned Offset>
516 using AtomicOrderingBitfieldElement =
517 typename Bitfield::Element<AtomicOrdering, Offset, 3,
518 AtomicOrdering::LAST>;
519
520protected:
521 // Note: Instruction needs to be a friend here to call cloneImpl.
522 friend class Instruction;
523
524 AtomicCmpXchgInst *cloneImpl() const;
525
526public:
527 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
528 AtomicOrdering SuccessOrdering,
529 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
530 Instruction *InsertBefore = nullptr);
531 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
532 AtomicOrdering SuccessOrdering,
533 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
534 BasicBlock *InsertAtEnd);
535
536 // allocate space for exactly three operands
537 void *operator new(size_t S) { return User::operator new(S, 3); }
538 void operator delete(void *Ptr) { User::operator delete(Ptr); }
539
540 using VolatileField = BoolBitfieldElementT<0>;
541 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
542 using SuccessOrderingField =
543 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
544 using FailureOrderingField =
545 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
546 using AlignmentField =
547 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
548 static_assert(
549 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
550 FailureOrderingField, AlignmentField>(),
551 "Bitfields must be contiguous");
552
553 /// Return the alignment of the memory that is being allocated by the
554 /// instruction.
555 Align getAlign() const {
556 return Align(1ULL << getSubclassData<AlignmentField>());
557 }
558
559 void setAlignment(Align Align) {
560 setSubclassData<AlignmentField>(Log2(Align));
561 }
562
563 /// Return true if this is a cmpxchg from a volatile memory
564 /// location.
565 ///
566 bool isVolatile() const { return getSubclassData<VolatileField>(); }
567
568 /// Specify whether this is a volatile cmpxchg.
569 ///
570 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
571
572 /// Return true if this cmpxchg may spuriously fail.
573 bool isWeak() const { return getSubclassData<WeakField>(); }
574
575 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
576
577 /// Transparently provide more efficient getOperand methods.
578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
579
580 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
581 return Ordering != AtomicOrdering::NotAtomic &&
582 Ordering != AtomicOrdering::Unordered;
583 }
584
585 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
586 return Ordering != AtomicOrdering::NotAtomic &&
587 Ordering != AtomicOrdering::Unordered &&
588 Ordering != AtomicOrdering::AcquireRelease &&
589 Ordering != AtomicOrdering::Release;
590 }
591
592 /// Returns the success ordering constraint of this cmpxchg instruction.
593 AtomicOrdering getSuccessOrdering() const {
594 return getSubclassData<SuccessOrderingField>();
595 }
596
597 /// Sets the success ordering constraint of this cmpxchg instruction.
598 void setSuccessOrdering(AtomicOrdering Ordering) {
599 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
600 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
;
601 setSubclassData<SuccessOrderingField>(Ordering);
602 }
603
604 /// Returns the failure ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getFailureOrdering() const {
606 return getSubclassData<FailureOrderingField>();
607 }
608
609 /// Sets the failure ordering constraint of this cmpxchg instruction.
610 void setFailureOrdering(AtomicOrdering Ordering) {
611 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
612 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
;
613 setSubclassData<FailureOrderingField>(Ordering);
614 }
615
616 /// Returns a single ordering which is at least as strong as both the
617 /// success and failure orderings for this cmpxchg.
618 AtomicOrdering getMergedOrdering() const {
619 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
620 return AtomicOrdering::SequentiallyConsistent;
621 if (getFailureOrdering() == AtomicOrdering::Acquire) {
622 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
623 return AtomicOrdering::Acquire;
624 if (getSuccessOrdering() == AtomicOrdering::Release)
625 return AtomicOrdering::AcquireRelease;
626 }
627 return getSuccessOrdering();
628 }
629
630 /// Returns the synchronization scope ID of this cmpxchg instruction.
631 SyncScope::ID getSyncScopeID() const {
632 return SSID;
633 }
634
635 /// Sets the synchronization scope ID of this cmpxchg instruction.
636 void setSyncScopeID(SyncScope::ID SSID) {
637 this->SSID = SSID;
638 }
639
640 Value *getPointerOperand() { return getOperand(0); }
641 const Value *getPointerOperand() const { return getOperand(0); }
642 static unsigned getPointerOperandIndex() { return 0U; }
643
644 Value *getCompareOperand() { return getOperand(1); }
645 const Value *getCompareOperand() const { return getOperand(1); }
646
647 Value *getNewValOperand() { return getOperand(2); }
648 const Value *getNewValOperand() const { return getOperand(2); }
649
650 /// Returns the address space of the pointer operand.
651 unsigned getPointerAddressSpace() const {
652 return getPointerOperand()->getType()->getPointerAddressSpace();
653 }
654
655 /// Returns the strongest permitted ordering on failure, given the
656 /// desired ordering on success.
657 ///
658 /// If the comparison in a cmpxchg operation fails, there is no atomic store
659 /// so release semantics cannot be provided. So this function drops explicit
660 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
661 /// operation would remain SequentiallyConsistent.
662 static AtomicOrdering
663 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
664 switch (SuccessOrdering) {
665 default:
666 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 666)
;
667 case AtomicOrdering::Release:
668 case AtomicOrdering::Monotonic:
669 return AtomicOrdering::Monotonic;
670 case AtomicOrdering::AcquireRelease:
671 case AtomicOrdering::Acquire:
672 return AtomicOrdering::Acquire;
673 case AtomicOrdering::SequentiallyConsistent:
674 return AtomicOrdering::SequentiallyConsistent;
675 }
676 }
677
678 // Methods for support type inquiry through isa, cast, and dyn_cast:
679 static bool classof(const Instruction *I) {
680 return I->getOpcode() == Instruction::AtomicCmpXchg;
681 }
682 static bool classof(const Value *V) {
683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
684 }
685
686private:
687 // Shadow Instruction::setInstructionSubclassData with a private forwarding
688 // method so that subclasses cannot accidentally use it.
689 template <typename Bitfield>
690 void setSubclassData(typename Bitfield::Type Value) {
691 Instruction::setSubclassData<Bitfield>(Value);
692 }
693
694 /// The synchronization scope ID of this cmpxchg instruction. Not quite
695 /// enough room in SubClassData for everything, so synchronization scope ID
696 /// gets its own field.
697 SyncScope::ID SSID;
698};
699
700template <>
701struct OperandTraits<AtomicCmpXchgInst> :
702 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
703};
704
705DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
706
707//===----------------------------------------------------------------------===//
708// AtomicRMWInst Class
709//===----------------------------------------------------------------------===//
710
711/// an instruction that atomically reads a memory location,
712/// combines it with another value, and then stores the result back. Returns
713/// the old value.
714///
715class AtomicRMWInst : public Instruction {
716protected:
717 // Note: Instruction needs to be a friend here to call cloneImpl.
718 friend class Instruction;
719
720 AtomicRMWInst *cloneImpl() const;
721
722public:
723 /// This enumeration lists the possible modifications atomicrmw can make. In
724 /// the descriptions, 'p' is the pointer to the instruction's memory location,
725 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
726 /// instruction. These instructions always return 'old'.
727 enum BinOp : unsigned {
728 /// *p = v
729 Xchg,
730 /// *p = old + v
731 Add,
732 /// *p = old - v
733 Sub,
734 /// *p = old & v
735 And,
736 /// *p = ~(old & v)
737 Nand,
738 /// *p = old | v
739 Or,
740 /// *p = old ^ v
741 Xor,
742 /// *p = old >signed v ? old : v
743 Max,
744 /// *p = old <signed v ? old : v
745 Min,
746 /// *p = old >unsigned v ? old : v
747 UMax,
748 /// *p = old <unsigned v ? old : v
749 UMin,
750
751 /// *p = old + v
752 FAdd,
753
754 /// *p = old - v
755 FSub,
756
757 /// *p = maxnum(old, v)
758 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
759 FMax,
760
761 /// *p = minnum(old, v)
762 /// \p minnum matches the behavior of \p llvm.minnum.*.
763 FMin,
764
765 FIRST_BINOP = Xchg,
766 LAST_BINOP = FMin,
767 BAD_BINOP
768 };
769
770private:
771 template <unsigned Offset>
772 using AtomicOrderingBitfieldElement =
773 typename Bitfield::Element<AtomicOrdering, Offset, 3,
774 AtomicOrdering::LAST>;
775
776 template <unsigned Offset>
777 using BinOpBitfieldElement =
778 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
779
780public:
781 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
782 AtomicOrdering Ordering, SyncScope::ID SSID,
783 Instruction *InsertBefore = nullptr);
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 BasicBlock *InsertAtEnd);
787
788 // allocate space for exactly two operands
789 void *operator new(size_t S) { return User::operator new(S, 2); }
790 void operator delete(void *Ptr) { User::operator delete(Ptr); }
791
792 using VolatileField = BoolBitfieldElementT<0>;
793 using AtomicOrderingField =
794 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
795 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
796 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
797 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
798 OperationField, AlignmentField>(),
799 "Bitfields must be contiguous");
800
801 BinOp getOperation() const { return getSubclassData<OperationField>(); }
802
803 static StringRef getOperationName(BinOp Op);
804
805 static bool isFPOperation(BinOp Op) {
806 switch (Op) {
807 case AtomicRMWInst::FAdd:
808 case AtomicRMWInst::FSub:
809 case AtomicRMWInst::FMax:
810 case AtomicRMWInst::FMin:
811 return true;
812 default:
813 return false;
814 }
815 }
816
817 void setOperation(BinOp Operation) {
818 setSubclassData<OperationField>(Operation);
819 }
820
821 /// Return the alignment of the memory that is being allocated by the
822 /// instruction.
823 Align getAlign() const {
824 return Align(1ULL << getSubclassData<AlignmentField>());
825 }
826
827 void setAlignment(Align Align) {
828 setSubclassData<AlignmentField>(Log2(Align));
829 }
830
831 /// Return true if this is a RMW on a volatile memory location.
832 ///
833 bool isVolatile() const { return getSubclassData<VolatileField>(); }
834
835 /// Specify whether this is a volatile RMW or not.
836 ///
837 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
838
839 /// Transparently provide more efficient getOperand methods.
840 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
841
842 /// Returns the ordering constraint of this rmw instruction.
843 AtomicOrdering getOrdering() const {
844 return getSubclassData<AtomicOrderingField>();
845 }
846
847 /// Sets the ordering constraint of this rmw instruction.
848 void setOrdering(AtomicOrdering Ordering) {
849 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
850 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
;
851 assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
852 "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
;
853 setSubclassData<AtomicOrderingField>(Ordering);
854 }
855
856 /// Returns the synchronization scope ID of this rmw instruction.
857 SyncScope::ID getSyncScopeID() const {
858 return SSID;
859 }
860
861 /// Sets the synchronization scope ID of this rmw instruction.
862 void setSyncScopeID(SyncScope::ID SSID) {
863 this->SSID = SSID;
864 }
865
866 Value *getPointerOperand() { return getOperand(0); }
867 const Value *getPointerOperand() const { return getOperand(0); }
868 static unsigned getPointerOperandIndex() { return 0U; }
869
870 Value *getValOperand() { return getOperand(1); }
871 const Value *getValOperand() const { return getOperand(1); }
872
873 /// Returns the address space of the pointer operand.
874 unsigned getPointerAddressSpace() const {
875 return getPointerOperand()->getType()->getPointerAddressSpace();
876 }
877
878 bool isFloatingPointOperation() const {
879 return isFPOperation(getOperation());
880 }
881
882 // Methods for support type inquiry through isa, cast, and dyn_cast:
883 static bool classof(const Instruction *I) {
884 return I->getOpcode() == Instruction::AtomicRMW;
885 }
886 static bool classof(const Value *V) {
887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
888 }
889
890private:
891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
892 AtomicOrdering Ordering, SyncScope::ID SSID);
893
894 // Shadow Instruction::setInstructionSubclassData with a private forwarding
895 // method so that subclasses cannot accidentally use it.
896 template <typename Bitfield>
897 void setSubclassData(typename Bitfield::Type Value) {
898 Instruction::setSubclassData<Bitfield>(Value);
899 }
900
901 /// The synchronization scope ID of this rmw instruction. Not quite enough
902 /// room in SubClassData for everything, so synchronization scope ID gets its
903 /// own field.
904 SyncScope::ID SSID;
905};
906
907template <>
908struct OperandTraits<AtomicRMWInst>
909 : public FixedNumOperandTraits<AtomicRMWInst,2> {
910};
911
912DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
913
914//===----------------------------------------------------------------------===//
915// GetElementPtrInst Class
916//===----------------------------------------------------------------------===//
917
918// checkGEPType - Simple wrapper function to give a better assertion failure
919// message on bad indexes for a gep instruction.
920//
921inline Type *checkGEPType(Type *Ty) {
922 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__
))
;
923 return Ty;
924}
925
926/// an instruction for type-safe pointer arithmetic to
927/// access elements of arrays and structs
928///
929class GetElementPtrInst : public Instruction {
930 Type *SourceElementType;
931 Type *ResultElementType;
932
933 GetElementPtrInst(const GetElementPtrInst &GEPI);
934
935 /// Constructors - Create a getelementptr instruction with a base pointer an
936 /// list of indices. The first ctor can optionally insert before an existing
937 /// instruction, the second appends the new instruction to the specified
938 /// BasicBlock.
939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
940 ArrayRef<Value *> IdxList, unsigned Values,
941 const Twine &NameStr, Instruction *InsertBefore);
942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
943 ArrayRef<Value *> IdxList, unsigned Values,
944 const Twine &NameStr, BasicBlock *InsertAtEnd);
945
946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
947
948protected:
949 // Note: Instruction needs to be a friend here to call cloneImpl.
950 friend class Instruction;
951
952 GetElementPtrInst *cloneImpl() const;
953
954public:
955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
956 ArrayRef<Value *> IdxList,
957 const Twine &NameStr = "",
958 Instruction *InsertBefore = nullptr) {
959 unsigned Values = 1 + unsigned(IdxList.size());
960 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__
))
;
961 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
962 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
;
963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
964 NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
968 ArrayRef<Value *> IdxList,
969 const Twine &NameStr,
970 BasicBlock *InsertAtEnd) {
971 unsigned Values = 1 + unsigned(IdxList.size());
972 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__
))
;
973 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
974 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
;
975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
976 NameStr, InsertAtEnd);
977 }
978
979 /// Create an "inbounds" getelementptr. See the documentation for the
980 /// "inbounds" flag in LangRef.html for details.
981 static GetElementPtrInst *
982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
983 const Twine &NameStr = "",
984 Instruction *InsertBefore = nullptr) {
985 GetElementPtrInst *GEP =
986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
987 GEP->setIsInBounds(true);
988 return GEP;
989 }
990
991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr,
994 BasicBlock *InsertAtEnd) {
995 GetElementPtrInst *GEP =
996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
997 GEP->setIsInBounds(true);
998 return GEP;
999 }
1000
1001 /// Transparently provide more efficient getOperand methods.
1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1003
1004 Type *getSourceElementType() const { return SourceElementType; }
1005
1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1008
1009 Type *getResultElementType() const {
1010 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
;
1012 return ResultElementType;
1013 }
1014
1015 /// Returns the address space of this instruction's pointer type.
1016 unsigned getAddressSpace() const {
1017 // Note that this is always the same as the pointer operand's address space
1018 // and that is cheaper to compute, so cheat here.
1019 return getPointerAddressSpace();
1020 }
1021
1022 /// Returns the result type of a getelementptr with the given source
1023 /// element type and indexes.
1024 ///
1025 /// Null is returned if the indices are invalid for the specified
1026 /// source element type.
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1030
1031 /// Return the type of the element at the given index of an indexable
1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1033 ///
1034 /// Returns null if the type can't be indexed, or the given index is not
1035 /// legal for the given type.
1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1038
1039 inline op_iterator idx_begin() { return op_begin()+1; }
1040 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1041 inline op_iterator idx_end() { return op_end(); }
1042 inline const_op_iterator idx_end() const { return op_end(); }
1043
1044 inline iterator_range<op_iterator> indices() {
1045 return make_range(idx_begin(), idx_end());
1046 }
1047
1048 inline iterator_range<const_op_iterator> indices() const {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 Value *getPointerOperand() {
1053 return getOperand(0);
1054 }
1055 const Value *getPointerOperand() const {
1056 return getOperand(0);
1057 }
1058 static unsigned getPointerOperandIndex() {
1059 return 0U; // get index for modifying correct operand.
1060 }
1061
1062 /// Method to return the pointer operand as a
1063 /// PointerType.
1064 Type *getPointerOperandType() const {
1065 return getPointerOperand()->getType();
1066 }
1067
1068 /// Returns the address space of the pointer operand.
1069 unsigned getPointerAddressSpace() const {
1070 return getPointerOperandType()->getPointerAddressSpace();
1071 }
1072
1073 /// Returns the pointer type returned by the GEP
1074 /// instruction, which may be a vector of pointers.
1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1076 ArrayRef<Value *> IdxList) {
1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1080 Type *PtrTy = OrigPtrTy->isOpaque()
1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1082 : PointerType::get(ResultElemTy, AddrSpace);
1083 // Vector GEP
1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1085 ElementCount EltCount = PtrVTy->getElementCount();
1086 return VectorType::get(PtrTy, EltCount);
1087 }
1088 for (Value *Index : IdxList)
1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1090 ElementCount EltCount = IndexVTy->getElementCount();
1091 return VectorType::get(PtrTy, EltCount);
1092 }
1093 // Scalar GEP
1094 return PtrTy;
1095 }
1096
1097 unsigned getNumIndices() const { // Note: always non-negative
1098 return getNumOperands() - 1;
1099 }
1100
1101 bool hasIndices() const {
1102 return getNumOperands() > 1;
1103 }
1104
1105 /// Return true if all of the indices of this GEP are
1106 /// zeros. If so, the result pointer and the first operand have the same
1107 /// value, just potentially different types.
1108 bool hasAllZeroIndices() const;
1109
1110 /// Return true if all of the indices of this GEP are
1111 /// constant integers. If so, the result pointer and the first operand have
1112 /// a constant offset between them.
1113 bool hasAllConstantIndices() const;
1114
1115 /// Set or clear the inbounds flag on this GEP instruction.
1116 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1117 void setIsInBounds(bool b = true);
1118
1119 /// Determine whether the GEP has the inbounds flag.
1120 bool isInBounds() const;
1121
1122 /// Accumulate the constant address offset of this GEP if possible.
1123 ///
1124 /// This routine accepts an APInt into which it will accumulate the constant
1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1126 /// all-constant, it returns false and the value of the offset APInt is
1127 /// undefined (it is *not* preserved!). The APInt passed into this routine
1128 /// must be at least as wide as the IntPtr type for the address space of
1129 /// the base GEP pointer.
1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1132 MapVector<Value *, APInt> &VariableOffsets,
1133 APInt &ConstantOffset) const;
1134 // Methods for support type inquiry through isa, cast, and dyn_cast:
1135 static bool classof(const Instruction *I) {
1136 return (I->getOpcode() == Instruction::GetElementPtr);
1137 }
1138 static bool classof(const Value *V) {
1139 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1140 }
1141};
1142
1143template <>
1144struct OperandTraits<GetElementPtrInst> :
1145 public VariadicOperandTraits<GetElementPtrInst, 1> {
1146};
1147
1148GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1149 ArrayRef<Value *> IdxList, unsigned Values,
1150 const Twine &NameStr,
1151 Instruction *InsertBefore)
1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1154 Values, InsertBefore),
1155 SourceElementType(PointeeType),
1156 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1157 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
;
1159 init(Ptr, IdxList, NameStr);
1160}
1161
1162GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1163 ArrayRef<Value *> IdxList, unsigned Values,
1164 const Twine &NameStr,
1165 BasicBlock *InsertAtEnd)
1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1168 Values, InsertAtEnd),
1169 SourceElementType(PointeeType),
1170 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1171 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
;
1173 init(Ptr, IdxList, NameStr);
1174}
1175
1176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1177
1178//===----------------------------------------------------------------------===//
1179// ICmpInst Class
1180//===----------------------------------------------------------------------===//
1181
1182/// This instruction compares its operands according to the predicate given
1183/// to the constructor. It only operates on integers or pointers. The operands
1184/// must be identical types.
1185/// Represent an integer comparison operator.
1186class ICmpInst: public CmpInst {
1187 void AssertOK() {
1188 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
1189 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
1191 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
;
1192 // Check that the operands are the right type
1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 }
1197
1198protected:
1199 // Note: Instruction needs to be a friend here to call cloneImpl.
1200 friend class Instruction;
1201
1202 /// Clone an identical ICmpInst
1203 ICmpInst *cloneImpl() const;
1204
1205public:
1206 /// Constructor with insert-before-instruction semantics.
1207 ICmpInst(
1208 Instruction *InsertBefore, ///< Where to insert
1209 Predicate pred, ///< The predicate to use for the comparison
1210 Value *LHS, ///< The left-hand-side of the expression
1211 Value *RHS, ///< The right-hand-side of the expression
1212 const Twine &NameStr = "" ///< Name of the instruction
1213 ) : CmpInst(makeCmpResultType(LHS->getType()),
1214 Instruction::ICmp, pred, LHS, RHS, NameStr,
1215 InsertBefore) {
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219 }
1220
1221 /// Constructor with insert-at-end semantics.
1222 ICmpInst(
1223 BasicBlock &InsertAtEnd, ///< Block to insert into.
1224 Predicate pred, ///< The predicate to use for the comparison
1225 Value *LHS, ///< The left-hand-side of the expression
1226 Value *RHS, ///< The right-hand-side of the expression
1227 const Twine &NameStr = "" ///< Name of the instruction
1228 ) : CmpInst(makeCmpResultType(LHS->getType()),
1229 Instruction::ICmp, pred, LHS, RHS, NameStr,
1230 &InsertAtEnd) {
1231#ifndef NDEBUG
1232 AssertOK();
1233#endif
1234 }
1235
1236 /// Constructor with no-insertion semantics
1237 ICmpInst(
1238 Predicate pred, ///< The predicate to use for the comparison
1239 Value *LHS, ///< The left-hand-side of the expression
1240 Value *RHS, ///< The right-hand-side of the expression
1241 const Twine &NameStr = "" ///< Name of the instruction
1242 ) : CmpInst(makeCmpResultType(LHS->getType()),
1243 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1244#ifndef NDEBUG
1245 AssertOK();
1246#endif
1247 }
1248
1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1250 /// @returns the predicate that would be the result if the operand were
1251 /// regarded as signed.
1252 /// Return the signed version of the predicate
1253 Predicate getSignedPredicate() const {
1254 return getSignedPredicate(getPredicate());
1255 }
1256
1257 /// This is a static version that you can use without an instruction.
1258 /// Return the signed version of the predicate.
1259 static Predicate getSignedPredicate(Predicate pred);
1260
1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1262 /// @returns the predicate that would be the result if the operand were
1263 /// regarded as unsigned.
1264 /// Return the unsigned version of the predicate
1265 Predicate getUnsignedPredicate() const {
1266 return getUnsignedPredicate(getPredicate());
1267 }
1268
1269 /// This is a static version that you can use without an instruction.
1270 /// Return the unsigned version of the predicate.
1271 static Predicate getUnsignedPredicate(Predicate pred);
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 static bool isEquality(Predicate P) {
1276 return P == ICMP_EQ || P == ICMP_NE;
1277 }
1278
1279 /// Return true if this predicate is either EQ or NE. This also
1280 /// tests for commutativity.
1281 bool isEquality() const {
1282 return isEquality(getPredicate());
1283 }
1284
1285 /// @returns true if the predicate of this ICmpInst is commutative
1286 /// Determine if this relation is commutative.
1287 bool isCommutative() const { return isEquality(); }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 bool isRelational() const {
1292 return !isEquality();
1293 }
1294
1295 /// Return true if the predicate is relational (not EQ or NE).
1296 ///
1297 static bool isRelational(Predicate P) {
1298 return !isEquality(P);
1299 }
1300
1301 /// Return true if the predicate is SGT or UGT.
1302 ///
1303 static bool isGT(Predicate P) {
1304 return P == ICMP_SGT || P == ICMP_UGT;
1305 }
1306
1307 /// Return true if the predicate is SLT or ULT.
1308 ///
1309 static bool isLT(Predicate P) {
1310 return P == ICMP_SLT || P == ICMP_ULT;
1311 }
1312
1313 /// Return true if the predicate is SGE or UGE.
1314 ///
1315 static bool isGE(Predicate P) {
1316 return P == ICMP_SGE || P == ICMP_UGE;
1317 }
1318
1319 /// Return true if the predicate is SLE or ULE.
1320 ///
1321 static bool isLE(Predicate P) {
1322 return P == ICMP_SLE || P == ICMP_ULE;
1323 }
1324
1325 /// Returns the sequence of all ICmp predicates.
1326 ///
1327 static auto predicates() { return ICmpPredicates(); }
1328
1329 /// Exchange the two operands to this instruction in such a way that it does
1330 /// not modify the semantics of the instruction. The predicate value may be
1331 /// changed to retain the same result if the predicate is order dependent
1332 /// (e.g. ult).
1333 /// Swap operands and adjust predicate.
1334 void swapOperands() {
1335 setPredicate(getSwappedPredicate());
1336 Op<0>().swap(Op<1>());
1337 }
1338
1339 /// Return result of `LHS Pred RHS` comparison.
1340 static bool compare(const APInt &LHS, const APInt &RHS,
1341 ICmpInst::Predicate Pred);
1342
1343 // Methods for support type inquiry through isa, cast, and dyn_cast:
1344 static bool classof(const Instruction *I) {
1345 return I->getOpcode() == Instruction::ICmp;
1346 }
1347 static bool classof(const Value *V) {
1348 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1349 }
1350};
1351
1352//===----------------------------------------------------------------------===//
1353// FCmpInst Class
1354//===----------------------------------------------------------------------===//
1355
1356/// This instruction compares its operands according to the predicate given
1357/// to the constructor. It only operates on floating point values or packed
1358/// vectors of floating point values. The operands must be identical types.
1359/// Represents a floating point comparison operator.
1360class FCmpInst: public CmpInst {
1361 void AssertOK() {
1362 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__
))
;
1363 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365 // Check that the operands are the right type
1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
1367 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
;
1368 }
1369
1370protected:
1371 // Note: Instruction needs to be a friend here to call cloneImpl.
1372 friend class Instruction;
1373
1374 /// Clone an identical FCmpInst
1375 FCmpInst *cloneImpl() const;
1376
1377public:
1378 /// Constructor with insert-before-instruction semantics.
1379 FCmpInst(
1380 Instruction *InsertBefore, ///< Where to insert
1381 Predicate pred, ///< The predicate to use for the comparison
1382 Value *LHS, ///< The left-hand-side of the expression
1383 Value *RHS, ///< The right-hand-side of the expression
1384 const Twine &NameStr = "" ///< Name of the instruction
1385 ) : CmpInst(makeCmpResultType(LHS->getType()),
1386 Instruction::FCmp, pred, LHS, RHS, NameStr,
1387 InsertBefore) {
1388 AssertOK();
1389 }
1390
1391 /// Constructor with insert-at-end semantics.
1392 FCmpInst(
1393 BasicBlock &InsertAtEnd, ///< Block to insert into.
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 ) : CmpInst(makeCmpResultType(LHS->getType()),
1399 Instruction::FCmp, pred, LHS, RHS, NameStr,
1400 &InsertAtEnd) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(
1406 Predicate Pred, ///< The predicate to use for the comparison
1407 Value *LHS, ///< The left-hand-side of the expression
1408 Value *RHS, ///< The right-hand-side of the expression
1409 const Twine &NameStr = "", ///< Name of the instruction
1410 Instruction *FlagsSource = nullptr
1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1412 RHS, NameStr, nullptr, FlagsSource) {
1413 AssertOK();
1414 }
1415
1416 /// @returns true if the predicate of this instruction is EQ or NE.
1417 /// Determine if this is an equality predicate.
1418 static bool isEquality(Predicate Pred) {
1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1420 Pred == FCMP_UNE;
1421 }
1422
1423 /// @returns true if the predicate of this instruction is EQ or NE.
1424 /// Determine if this is an equality predicate.
1425 bool isEquality() const { return isEquality(getPredicate()); }
1426
1427 /// @returns true if the predicate of this instruction is commutative.
1428 /// Determine if this is a commutative predicate.
1429 bool isCommutative() const {
1430 return isEquality() ||
1431 getPredicate() == FCMP_FALSE ||
1432 getPredicate() == FCMP_TRUE ||
1433 getPredicate() == FCMP_ORD ||
1434 getPredicate() == FCMP_UNO;
1435 }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1446 void swapOperands() {
1447 setPredicate(getSwappedPredicate());
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1475 CallInst(const CallInst &CI);
1476
1477 /// Construct a CallInst given a range of arguments.
1478 /// Construct a CallInst from a range of arguments
1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1481 Instruction *InsertBefore);
1482
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 const Twine &NameStr, Instruction *InsertBefore)
1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1486
1487 /// Construct a CallInst given a range of arguments.
1488 /// Construct a CallInst from a range of arguments
1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 Instruction *InsertBefore);
1495
1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1497 BasicBlock *InsertAtEnd);
1498
1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1502
1503 /// Compute the number of operands to allocate.
1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1505 // We need one operand for the called function, plus the input operand
1506 // counts provided.
1507 return 1 + NumArgs + NumBundleInputs;
1508 }
1509
1510protected:
1511 // Note: Instruction needs to be a friend here to call cloneImpl.
1512 friend class Instruction;
1513
1514 CallInst *cloneImpl() const;
1515
1516public:
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr,
1524 Instruction *InsertBefore = nullptr) {
1525 return new (ComputeNumOperands(Args.size()))
1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1530 ArrayRef<OperandBundleDef> Bundles = None,
1531 const Twine &NameStr = "",
1532 Instruction *InsertBefore = nullptr) {
1533 const int NumOperands =
1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1536
1537 return new (NumOperands, DescriptorBytes)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1542 BasicBlock *InsertAtEnd) {
1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1544 }
1545
1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1547 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1548 return new (ComputeNumOperands(Args.size()))
1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1553 ArrayRef<OperandBundleDef> Bundles,
1554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1555 const int NumOperands =
1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1558
1559 return new (NumOperands, DescriptorBytes)
1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1561 }
1562
1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1564 Instruction *InsertBefore = nullptr) {
1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1566 InsertBefore);
1567 }
1568
1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles = None,
1571 const Twine &NameStr = "",
1572 Instruction *InsertBefore = nullptr) {
1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1574 NameStr, InsertBefore);
1575 }
1576
1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1578 const Twine &NameStr,
1579 Instruction *InsertBefore = nullptr) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1581 InsertBefore);
1582 }
1583
1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1585 BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1587 InsertAtEnd);
1588 }
1589
1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1591 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1593 InsertAtEnd);
1594 }
1595
1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1597 ArrayRef<OperandBundleDef> Bundles,
1598 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1600 NameStr, InsertAtEnd);
1601 }
1602
1603 /// Create a clone of \p CI with a different set of operand bundles and
1604 /// insert it before \p InsertPt.
1605 ///
1606 /// The returned call instruction is identical \p CI in every way except that
1607 /// the operand bundles for the new instruction are set to the operand bundles
1608 /// in \p Bundles.
1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1610 Instruction *InsertPt = nullptr);
1611
1612 /// Generate the IR for a call to malloc:
1613 /// 1. Compute the malloc call's argument as the specified type's size,
1614 /// possibly multiplied by the array size if the array size is not
1615 /// constant 1.
1616 /// 2. Call malloc with that argument.
1617 /// 3. Bitcast the result of the malloc call to the specified type.
1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1619 Type *AllocTy, Value *AllocSize,
1620 Value *ArraySize = nullptr,
1621 Function *MallocF = nullptr,
1622 const Twine &Name = "");
1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1624 Type *AllocTy, Value *AllocSize,
1625 Value *ArraySize = nullptr,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1629 Type *AllocTy, Value *AllocSize,
1630 Value *ArraySize = nullptr,
1631 ArrayRef<OperandBundleDef> Bundles = None,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 ArrayRef<OperandBundleDef> Bundles = None,
1638 Function *MallocF = nullptr,
1639 const Twine &Name = "");
1640 /// Generate the IR for a call to the builtin free function.
1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source,
1647 ArrayRef<OperandBundleDef> Bundles,
1648 BasicBlock *InsertAtEnd);
1649
1650 // Note that 'musttail' implies 'tail'.
1651 enum TailCallKind : unsigned {
1652 TCK_None = 0,
1653 TCK_Tail = 1,
1654 TCK_MustTail = 2,
1655 TCK_NoTail = 3,
1656 TCK_LAST = TCK_NoTail
1657 };
1658
1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1660 static_assert(
1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1662 "Bitfields must be contiguous");
1663
1664 TailCallKind getTailCallKind() const {
1665 return getSubclassData<TailCallKindField>();
1666 }
1667
1668 bool isTailCall() const {
1669 TailCallKind Kind = getTailCallKind();
1670 return Kind == TCK_Tail || Kind == TCK_MustTail;
1671 }
1672
1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1674
1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1676
1677 void setTailCallKind(TailCallKind TCK) {
1678 setSubclassData<TailCallKindField>(TCK);
1679 }
1680
1681 void setTailCall(bool IsTc = true) {
1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1683 }
1684
1685 /// Return true if the call can return twice
1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1688
1689 // Methods for support type inquiry through isa, cast, and dyn_cast:
1690 static bool classof(const Instruction *I) {
1691 return I->getOpcode() == Instruction::Call;
1692 }
1693 static bool classof(const Value *V) {
1694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1695 }
1696
1697 /// Updates profile metadata by scaling it by \p S / \p T.
1698 void updateProfWeight(uint64_t S, uint64_t T);
1699
1700private:
1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1702 // method so that subclasses cannot accidentally use it.
1703 template <typename Bitfield>
1704 void setSubclassData(typename Bitfield::Type Value) {
1705 Instruction::setSubclassData<Bitfield>(Value);
1706 }
1707};
1708
1709CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : CallBase(Ty->getReturnType(), Instruction::Call,
1713 OperandTraits<CallBase>::op_end(this) -
1714 (Args.size() + CountBundleInputs(Bundles) + 1),
1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1716 InsertAtEnd) {
1717 init(Ty, Func, Args, Bundles, NameStr);
1718}
1719
1720CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1722 Instruction *InsertBefore)
1723 : CallBase(Ty->getReturnType(), Instruction::Call,
1724 OperandTraits<CallBase>::op_end(this) -
1725 (Args.size() + CountBundleInputs(Bundles) + 1),
1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1727 InsertBefore) {
1728 init(Ty, Func, Args, Bundles, NameStr);
1729}
1730
1731//===----------------------------------------------------------------------===//
1732// SelectInst Class
1733//===----------------------------------------------------------------------===//
1734
1735/// This class represents the LLVM 'select' instruction.
1736///
1737class SelectInst : public Instruction {
1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1739 Instruction *InsertBefore)
1740 : Instruction(S1->getType(), Instruction::Select,
1741 &Op<0>(), 3, InsertBefore) {
1742 init(C, S1, S2);
1743 setName(NameStr);
1744 }
1745
1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1747 BasicBlock *InsertAtEnd)
1748 : Instruction(S1->getType(), Instruction::Select,
1749 &Op<0>(), 3, InsertAtEnd) {
1750 init(C, S1, S2);
1751 setName(NameStr);
1752 }
1753
1754 void init(Value *C, Value *S1, Value *S2) {
1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__
))
;
1756 Op<0>() = C;
1757 Op<1>() = S1;
1758 Op<2>() = S2;
1759 }
1760
1761protected:
1762 // Note: Instruction needs to be a friend here to call cloneImpl.
1763 friend class Instruction;
1764
1765 SelectInst *cloneImpl() const;
1766
1767public:
1768 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1769 const Twine &NameStr = "",
1770 Instruction *InsertBefore = nullptr,
1771 Instruction *MDFrom = nullptr) {
1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1773 if (MDFrom)
1774 Sel->copyMetadata(*MDFrom);
1775 return Sel;
1776 }
1777
1778 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1779 const Twine &NameStr,
1780 BasicBlock *InsertAtEnd) {
1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1782 }
1783
1784 const Value *getCondition() const { return Op<0>(); }
1785 const Value *getTrueValue() const { return Op<1>(); }
1786 const Value *getFalseValue() const { return Op<2>(); }
1787 Value *getCondition() { return Op<0>(); }
1788 Value *getTrueValue() { return Op<1>(); }
1789 Value *getFalseValue() { return Op<2>(); }
1790
1791 void setCondition(Value *V) { Op<0>() = V; }
1792 void setTrueValue(Value *V) { Op<1>() = V; }
1793 void setFalseValue(Value *V) { Op<2>() = V; }
1794
1795 /// Swap the true and false values of the select instruction.
1796 /// This doesn't swap prof metadata.
1797 void swapValues() { Op<1>().swap(Op<2>()); }
1798
1799 /// Return a string if the specified operands are invalid
1800 /// for a select operation, otherwise return null.
1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1802
1803 /// Transparently provide more efficient getOperand methods.
1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1805
1806 OtherOps getOpcode() const {
1807 return static_cast<OtherOps>(Instruction::getOpcode());
1808 }
1809
1810 // Methods for support type inquiry through isa, cast, and dyn_cast:
1811 static bool classof(const Instruction *I) {
1812 return I->getOpcode() == Instruction::Select;
1813 }
1814 static bool classof(const Value *V) {
1815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1816 }
1817};
1818
1819template <>
1820struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1821};
1822
1823DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1824
1825//===----------------------------------------------------------------------===//
1826// VAArgInst Class
1827//===----------------------------------------------------------------------===//
1828
1829/// This class represents the va_arg llvm instruction, which returns
1830/// an argument of the specified type given a va_list and increments that list
1831///
1832class VAArgInst : public UnaryInstruction {
1833protected:
1834 // Note: Instruction needs to be a friend here to call cloneImpl.
1835 friend class Instruction;
1836
1837 VAArgInst *cloneImpl() const;
1838
1839public:
1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1841 Instruction *InsertBefore = nullptr)
1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1843 setName(NameStr);
1844 }
1845
1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1847 BasicBlock *InsertAtEnd)
1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1849 setName(NameStr);
1850 }
1851
1852 Value *getPointerOperand() { return getOperand(0); }
1853 const Value *getPointerOperand() const { return getOperand(0); }
1854 static unsigned getPointerOperandIndex() { return 0U; }
1855
1856 // Methods for support type inquiry through isa, cast, and dyn_cast:
1857 static bool classof(const Instruction *I) {
1858 return I->getOpcode() == VAArg;
1859 }
1860 static bool classof(const Value *V) {
1861 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1862 }
1863};
1864
1865//===----------------------------------------------------------------------===//
1866// ExtractElementInst Class
1867//===----------------------------------------------------------------------===//
1868
1869/// This instruction extracts a single (scalar)
1870/// element from a VectorType value
1871///
1872class ExtractElementInst : public Instruction {
1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1874 Instruction *InsertBefore = nullptr);
1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1876 BasicBlock *InsertAtEnd);
1877
1878protected:
1879 // Note: Instruction needs to be a friend here to call cloneImpl.
1880 friend class Instruction;
1881
1882 ExtractElementInst *cloneImpl() const;
1883
1884public:
1885 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1886 const Twine &NameStr = "",
1887 Instruction *InsertBefore = nullptr) {
1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1889 }
1890
1891 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1892 const Twine &NameStr,
1893 BasicBlock *InsertAtEnd) {
1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1895 }
1896
1897 /// Return true if an extractelement instruction can be
1898 /// formed with the specified operands.
1899 static bool isValidOperands(const Value *Vec, const Value *Idx);
1900
1901 Value *getVectorOperand() { return Op<0>(); }
1902 Value *getIndexOperand() { return Op<1>(); }
1903 const Value *getVectorOperand() const { return Op<0>(); }
1904 const Value *getIndexOperand() const { return Op<1>(); }
1905
1906 VectorType *getVectorOperandType() const {
1907 return cast<VectorType>(getVectorOperand()->getType());
1908 }
1909
1910 /// Transparently provide more efficient getOperand methods.
1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1912
1913 // Methods for support type inquiry through isa, cast, and dyn_cast:
1914 static bool classof(const Instruction *I) {
1915 return I->getOpcode() == Instruction::ExtractElement;
1916 }
1917 static bool classof(const Value *V) {
1918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1919 }
1920};
1921
1922template <>
1923struct OperandTraits<ExtractElementInst> :
1924 public FixedNumOperandTraits<ExtractElementInst, 2> {
1925};
1926
1927DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1928
1929//===----------------------------------------------------------------------===//
1930// InsertElementInst Class
1931//===----------------------------------------------------------------------===//
1932
1933/// This instruction inserts a single (scalar)
1934/// element into a VectorType value
1935///
1936class InsertElementInst : public Instruction {
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1938 const Twine &NameStr = "",
1939 Instruction *InsertBefore = nullptr);
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1941 BasicBlock *InsertAtEnd);
1942
1943protected:
1944 // Note: Instruction needs to be a friend here to call cloneImpl.
1945 friend class Instruction;
1946
1947 InsertElementInst *cloneImpl() const;
1948
1949public:
1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1951 const Twine &NameStr = "",
1952 Instruction *InsertBefore = nullptr) {
1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1954 }
1955
1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1957 const Twine &NameStr,
1958 BasicBlock *InsertAtEnd) {
1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1960 }
1961
1962 /// Return true if an insertelement instruction can be
1963 /// formed with the specified operands.
1964 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1965 const Value *Idx);
1966
1967 /// Overload to return most specific vector type.
1968 ///
1969 VectorType *getType() const {
1970 return cast<VectorType>(Instruction::getType());
1971 }
1972
1973 /// Transparently provide more efficient getOperand methods.
1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1975
1976 // Methods for support type inquiry through isa, cast, and dyn_cast:
1977 static bool classof(const Instruction *I) {
1978 return I->getOpcode() == Instruction::InsertElement;
1979 }
1980 static bool classof(const Value *V) {
1981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1982 }
1983};
1984
1985template <>
1986struct OperandTraits<InsertElementInst> :
1987 public FixedNumOperandTraits<InsertElementInst, 3> {
1988};
1989
1990DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1991
1992//===----------------------------------------------------------------------===//
1993// ShuffleVectorInst Class
1994//===----------------------------------------------------------------------===//
1995
1996constexpr int UndefMaskElem = -1;
1997
1998/// This instruction constructs a fixed permutation of two
1999/// input vectors.
2000///
2001/// For each element of the result vector, the shuffle mask selects an element
2002/// from one of the input vectors to copy to the result. Non-negative elements
2003/// in the mask represent an index into the concatenated pair of input vectors.
2004/// UndefMaskElem (-1) specifies that the result element is undefined.
2005///
2006/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2007/// requirement may be relaxed in the future.
2008class ShuffleVectorInst : public Instruction {
2009 SmallVector<int, 4> ShuffleMask;
2010 Constant *ShuffleMaskForBitcode;
2011
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 ShuffleVectorInst *cloneImpl() const;
2017
2018public:
2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2020 Instruction *InsertBefore = nullptr);
2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2022 BasicBlock *InsertAtEnd);
2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr = "",
2034 Instruction *InsertBefor = nullptr);
2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2036 const Twine &NameStr, BasicBlock *InsertAtEnd);
2037
2038 void *operator new(size_t S) { return User::operator new(S, 2); }
2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2040
2041 /// Swap the operands and adjust the mask to preserve the semantics
2042 /// of the instruction.
2043 void commute();
2044
2045 /// Return true if a shufflevector instruction can be
2046 /// formed with the specified operands.
2047 static bool isValidOperands(const Value *V1, const Value *V2,
2048 const Value *Mask);
2049 static bool isValidOperands(const Value *V1, const Value *V2,
2050 ArrayRef<int> Mask);
2051
2052 /// Overload to return most specific vector type.
2053 ///
2054 VectorType *getType() const {
2055 return cast<VectorType>(Instruction::getType());
2056 }
2057
2058 /// Transparently provide more efficient getOperand methods.
2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2060
2061 /// Return the shuffle mask value of this instruction for the given element
2062 /// index. Return UndefMaskElem if the element is undef.
2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2064
2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 static void getShuffleMask(const Constant *Mask,
2068 SmallVectorImpl<int> &Result);
2069
2070 /// Return the mask for this instruction as a vector of integers. Undefined
2071 /// elements of the mask are returned as UndefMaskElem.
2072 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2074 }
2075
2076 /// Return the mask for this instruction, for use in bitcode.
2077 ///
2078 /// TODO: This is temporary until we decide a new bitcode encoding for
2079 /// shufflevector.
2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2081
2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2083 Type *ResultTy);
2084
2085 void setShuffleMask(ArrayRef<int> Mask);
2086
2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2088
2089 /// Return true if this shuffle returns a vector with a different number of
2090 /// elements than its source vectors.
2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2093 bool changesLength() const {
2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2095 ->getElementCount()
2096 .getKnownMinValue();
2097 unsigned NumMaskElts = ShuffleMask.size();
2098 return NumSourceElts != NumMaskElts;
2099 }
2100
2101 /// Return true if this shuffle returns a vector with a greater number of
2102 /// elements than its source vectors.
2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2104 bool increasesLength() const {
2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2106 ->getElementCount()
2107 .getKnownMinValue();
2108 unsigned NumMaskElts = ShuffleMask.size();
2109 return NumSourceElts < NumMaskElts;
2110 }
2111
2112 /// Return true if this shuffle mask chooses elements from exactly one source
2113 /// vector.
2114 /// Example: <7,5,undef,7>
2115 /// This assumes that vector operands are the same length as the mask.
2116 static bool isSingleSourceMask(ArrayRef<int> Mask);
2117 static bool isSingleSourceMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__
))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isSingleSourceMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without changing the length of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 bool isSingleSource() const {
2129 return !changesLength() && isSingleSourceMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle mask chooses elements from exactly one source
2133 /// vector without lane crossings. A shuffle using this mask is not
2134 /// necessarily a no-op because it may change the number of elements from its
2135 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2136 /// Example: <undef,undef,2,3>
2137 static bool isIdentityMask(ArrayRef<int> Mask);
2138 static bool isIdentityMask(const Constant *Mask) {
2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__
))
;
2140
2141 // Not possible to express a shuffle mask for a scalable vector for this
2142 // case.
2143 if (isa<ScalableVectorType>(Mask->getType()))
2144 return false;
2145
2146 SmallVector<int, 16> MaskAsInts;
2147 getShuffleMask(Mask, MaskAsInts);
2148 return isIdentityMask(MaskAsInts);
2149 }
2150
2151 /// Return true if this shuffle chooses elements from exactly one source
2152 /// vector without lane crossings and does not change the number of elements
2153 /// from its input vectors.
2154 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2155 bool isIdentity() const {
2156 // Not possible to express a shuffle mask for a scalable vector for this
2157 // case.
2158 if (isa<ScalableVectorType>(getType()))
2159 return false;
2160
2161 return !changesLength() && isIdentityMask(ShuffleMask);
2162 }
2163
2164 /// Return true if this shuffle lengthens exactly one source vector with
2165 /// undefs in the high elements.
2166 bool isIdentityWithPadding() const;
2167
2168 /// Return true if this shuffle extracts the first N elements of exactly one
2169 /// source vector.
2170 bool isIdentityWithExtract() const;
2171
2172 /// Return true if this shuffle concatenates its 2 source vectors. This
2173 /// returns false if either input is undefined. In that case, the shuffle is
2174 /// is better classified as an identity with padding operation.
2175 bool isConcat() const;
2176
2177 /// Return true if this shuffle mask chooses elements from its source vectors
2178 /// without lane crossings. A shuffle using this mask would be
2179 /// equivalent to a vector select with a constant condition operand.
2180 /// Example: <4,1,6,undef>
2181 /// This returns false if the mask does not choose from both input vectors.
2182 /// In that case, the shuffle is better classified as an identity shuffle.
2183 /// This assumes that vector operands are the same length as the mask
2184 /// (a length-changing shuffle can never be equivalent to a vector select).
2185 static bool isSelectMask(ArrayRef<int> Mask);
2186 static bool isSelectMask(const Constant *Mask) {
2187 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2187, __extension__ __PRETTY_FUNCTION__
))
;
2188 SmallVector<int, 16> MaskAsInts;
2189 getShuffleMask(Mask, MaskAsInts);
2190 return isSelectMask(MaskAsInts);
2191 }
2192
2193 /// Return true if this shuffle chooses elements from its source vectors
2194 /// without lane crossings and all operands have the same number of elements.
2195 /// In other words, this shuffle is equivalent to a vector select with a
2196 /// constant condition operand.
2197 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2198 /// This returns false if the mask does not choose from both input vectors.
2199 /// In that case, the shuffle is better classified as an identity shuffle.
2200 /// TODO: Optionally allow length-changing shuffles.
2201 bool isSelect() const {
2202 return !changesLength() && isSelectMask(ShuffleMask);
2203 }
2204
2205 /// Return true if this shuffle mask swaps the order of elements from exactly
2206 /// one source vector.
2207 /// Example: <7,6,undef,4>
2208 /// This assumes that vector operands are the same length as the mask.
2209 static bool isReverseMask(ArrayRef<int> Mask);
2210 static bool isReverseMask(const Constant *Mask) {
2211 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2211, __extension__ __PRETTY_FUNCTION__
))
;
2212 SmallVector<int, 16> MaskAsInts;
2213 getShuffleMask(Mask, MaskAsInts);
2214 return isReverseMask(MaskAsInts);
2215 }
2216
2217 /// Return true if this shuffle swaps the order of elements from exactly
2218 /// one source vector.
2219 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2220 /// TODO: Optionally allow length-changing shuffles.
2221 bool isReverse() const {
2222 return !changesLength() && isReverseMask(ShuffleMask);
2223 }
2224
2225 /// Return true if this shuffle mask chooses all elements with the same value
2226 /// as the first element of exactly one source vector.
2227 /// Example: <4,undef,undef,4>
2228 /// This assumes that vector operands are the same length as the mask.
2229 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2230 static bool isZeroEltSplatMask(const Constant *Mask) {
2231 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2231, __extension__ __PRETTY_FUNCTION__
))
;
2232 SmallVector<int, 16> MaskAsInts;
2233 getShuffleMask(Mask, MaskAsInts);
2234 return isZeroEltSplatMask(MaskAsInts);
2235 }
2236
2237 /// Return true if all elements of this shuffle are the same value as the
2238 /// first element of exactly one source vector without changing the length
2239 /// of that vector.
2240 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2241 /// TODO: Optionally allow length-changing shuffles.
2242 /// TODO: Optionally allow splats from other elements.
2243 bool isZeroEltSplat() const {
2244 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2245 }
2246
2247 /// Return true if this shuffle mask is a transpose mask.
2248 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2249 /// even- or odd-numbered vector elements from two n-dimensional source
2250 /// vectors and write each result into consecutive elements of an
2251 /// n-dimensional destination vector. Two shuffles are necessary to complete
2252 /// the transpose, one for the even elements and another for the odd elements.
2253 /// This description closely follows how the TRN1 and TRN2 AArch64
2254 /// instructions operate.
2255 ///
2256 /// For example, a simple 2x2 matrix can be transposed with:
2257 ///
2258 /// ; Original matrix
2259 /// m0 = < a, b >
2260 /// m1 = < c, d >
2261 ///
2262 /// ; Transposed matrix
2263 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2264 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2265 ///
2266 /// For matrices having greater than n columns, the resulting nx2 transposed
2267 /// matrix is stored in two result vectors such that one vector contains
2268 /// interleaved elements from all the even-numbered rows and the other vector
2269 /// contains interleaved elements from all the odd-numbered rows. For example,
2270 /// a 2x4 matrix can be transposed with:
2271 ///
2272 /// ; Original matrix
2273 /// m0 = < a, b, c, d >
2274 /// m1 = < e, f, g, h >
2275 ///
2276 /// ; Transposed matrix
2277 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2278 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2279 static bool isTransposeMask(ArrayRef<int> Mask);
2280 static bool isTransposeMask(const Constant *Mask) {
2281 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2281, __extension__ __PRETTY_FUNCTION__
))
;
2282 SmallVector<int, 16> MaskAsInts;
2283 getShuffleMask(Mask, MaskAsInts);
2284 return isTransposeMask(MaskAsInts);
2285 }
2286
2287 /// Return true if this shuffle transposes the elements of its inputs without
2288 /// changing the length of the vectors. This operation may also be known as a
2289 /// merge or interleave. See the description for isTransposeMask() for the
2290 /// exact specification.
2291 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2292 bool isTranspose() const {
2293 return !changesLength() && isTransposeMask(ShuffleMask);
2294 }
2295
2296 /// Return true if this shuffle mask is a splice mask, concatenating the two
2297 /// inputs together and then extracts an original width vector starting from
2298 /// the splice index.
2299 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2300 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2301 static bool isSpliceMask(const Constant *Mask, int &Index) {
2302 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2302, __extension__ __PRETTY_FUNCTION__
))
;
2303 SmallVector<int, 16> MaskAsInts;
2304 getShuffleMask(Mask, MaskAsInts);
2305 return isSpliceMask(MaskAsInts, Index);
2306 }
2307
2308 /// Return true if this shuffle splices two inputs without changing the length
2309 /// of the vectors. This operation concatenates the two inputs together and
2310 /// then extracts an original width vector starting from the splice index.
2311 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2312 bool isSplice(int &Index) const {
2313 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2314 }
2315
2316 /// Return true if this shuffle mask is an extract subvector mask.
2317 /// A valid extract subvector mask returns a smaller vector from a single
2318 /// source operand. The base extraction index is returned as well.
2319 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2320 int &Index);
2321 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2322 int &Index) {
2323 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2323, __extension__ __PRETTY_FUNCTION__
))
;
2324 // Not possible to express a shuffle mask for a scalable vector for this
2325 // case.
2326 if (isa<ScalableVectorType>(Mask->getType()))
2327 return false;
2328 SmallVector<int, 16> MaskAsInts;
2329 getShuffleMask(Mask, MaskAsInts);
2330 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2331 }
2332
2333 /// Return true if this shuffle mask is an extract subvector mask.
2334 bool isExtractSubvectorMask(int &Index) const {
2335 // Not possible to express a shuffle mask for a scalable vector for this
2336 // case.
2337 if (isa<ScalableVectorType>(getType()))
2338 return false;
2339
2340 int NumSrcElts =
2341 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2342 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2343 }
2344
2345 /// Return true if this shuffle mask is an insert subvector mask.
2346 /// A valid insert subvector mask inserts the lowest elements of a second
2347 /// source operand into an in-place first source operand operand.
2348 /// Both the sub vector width and the insertion index is returned.
2349 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2350 int &NumSubElts, int &Index);
2351 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2352 int &NumSubElts, int &Index) {
2353 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2353, __extension__ __PRETTY_FUNCTION__
))
;
2354 // Not possible to express a shuffle mask for a scalable vector for this
2355 // case.
2356 if (isa<ScalableVectorType>(Mask->getType()))
2357 return false;
2358 SmallVector<int, 16> MaskAsInts;
2359 getShuffleMask(Mask, MaskAsInts);
2360 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2361 }
2362
2363 /// Return true if this shuffle mask is an insert subvector mask.
2364 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2365 // Not possible to express a shuffle mask for a scalable vector for this
2366 // case.
2367 if (isa<ScalableVectorType>(getType()))
2368 return false;
2369
2370 int NumSrcElts =
2371 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2372 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2373 }
2374
2375 /// Return true if this shuffle mask replicates each of the \p VF elements
2376 /// in a vector \p ReplicationFactor times.
2377 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2378 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2379 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2380 int &VF);
2381 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2382 int &VF) {
2383 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2383, __extension__ __PRETTY_FUNCTION__
))
;
2384 // Not possible to express a shuffle mask for a scalable vector for this
2385 // case.
2386 if (isa<ScalableVectorType>(Mask->getType()))
2387 return false;
2388 SmallVector<int, 16> MaskAsInts;
2389 getShuffleMask(Mask, MaskAsInts);
2390 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2391 }
2392
2393 /// Return true if this shuffle mask is a replication mask.
2394 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2395
2396 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2397 /// i.e. each index between [0..VF) is used exactly once in each submask of
2398 /// size VF.
2399 /// For example, the mask for \p VF=4 is:
2400 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2401 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2402 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2403 /// element 3 is used twice in the second submask
2404 /// (3,3,1,0) and index 2 is not used at all.
2405 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2406
2407 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2408 /// mask.
2409 bool isOneUseSingleSourceMask(int VF) const;
2410
2411 /// Change values in a shuffle permute mask assuming the two vector operands
2412 /// of length InVecNumElts have swapped position.
2413 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2414 unsigned InVecNumElts) {
2415 for (int &Idx : Mask) {
2416 if (Idx == -1)
2417 continue;
2418 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2419 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2420, __extension__ __PRETTY_FUNCTION__
))
2420 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2420, __extension__ __PRETTY_FUNCTION__
))
;
2421 }
2422 }
2423
2424 // Methods for support type inquiry through isa, cast, and dyn_cast:
2425 static bool classof(const Instruction *I) {
2426 return I->getOpcode() == Instruction::ShuffleVector;
2427 }
2428 static bool classof(const Value *V) {
2429 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2430 }
2431};
2432
2433template <>
2434struct OperandTraits<ShuffleVectorInst>
2435 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2436
2437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2437, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2437, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2438
2439//===----------------------------------------------------------------------===//
2440// ExtractValueInst Class
2441//===----------------------------------------------------------------------===//
2442
2443/// This instruction extracts a struct member or array
2444/// element value from an aggregate value.
2445///
2446class ExtractValueInst : public UnaryInstruction {
2447 SmallVector<unsigned, 4> Indices;
2448
2449 ExtractValueInst(const ExtractValueInst &EVI);
2450
2451 /// Constructors - Create a extractvalue instruction with a base aggregate
2452 /// value and a list of indices. The first ctor can optionally insert before
2453 /// an existing instruction, the second appends the new instruction to the
2454 /// specified BasicBlock.
2455 inline ExtractValueInst(Value *Agg,
2456 ArrayRef<unsigned> Idxs,
2457 const Twine &NameStr,
2458 Instruction *InsertBefore);
2459 inline ExtractValueInst(Value *Agg,
2460 ArrayRef<unsigned> Idxs,
2461 const Twine &NameStr, BasicBlock *InsertAtEnd);
2462
2463 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2464
2465protected:
2466 // Note: Instruction needs to be a friend here to call cloneImpl.
2467 friend class Instruction;
2468
2469 ExtractValueInst *cloneImpl() const;
2470
2471public:
2472 static ExtractValueInst *Create(Value *Agg,
2473 ArrayRef<unsigned> Idxs,
2474 const Twine &NameStr = "",
2475 Instruction *InsertBefore = nullptr) {
2476 return new
2477 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2478 }
2479
2480 static ExtractValueInst *Create(Value *Agg,
2481 ArrayRef<unsigned> Idxs,
2482 const Twine &NameStr,
2483 BasicBlock *InsertAtEnd) {
2484 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2485 }
2486
2487 /// Returns the type of the element that would be extracted
2488 /// with an extractvalue instruction with the specified parameters.
2489 ///
2490 /// Null is returned if the indices are invalid for the specified type.
2491 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2492
2493 using idx_iterator = const unsigned*;
2494
2495 inline idx_iterator idx_begin() const { return Indices.begin(); }
2496 inline idx_iterator idx_end() const { return Indices.end(); }
2497 inline iterator_range<idx_iterator> indices() const {
2498 return make_range(idx_begin(), idx_end());
2499 }
2500
2501 Value *getAggregateOperand() {
2502 return getOperand(0);
2503 }
2504 const Value *getAggregateOperand() const {
2505 return getOperand(0);
2506 }
2507 static unsigned getAggregateOperandIndex() {
2508 return 0U; // get index for modifying correct operand
2509 }
2510
2511 ArrayRef<unsigned> getIndices() const {
2512 return Indices;
2513 }
2514
2515 unsigned getNumIndices() const {
2516 return (unsigned)Indices.size();
2517 }
2518
2519 bool hasIndices() const {
2520 return true;
2521 }
2522
2523 // Methods for support type inquiry through isa, cast, and dyn_cast:
2524 static bool classof(const Instruction *I) {
2525 return I->getOpcode() == Instruction::ExtractValue;
2526 }
2527 static bool classof(const Value *V) {
2528 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2529 }
2530};
2531
2532ExtractValueInst::ExtractValueInst(Value *Agg,
2533 ArrayRef<unsigned> Idxs,
2534 const Twine &NameStr,
2535 Instruction *InsertBefore)
2536 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2537 ExtractValue, Agg, InsertBefore) {
2538 init(Idxs, NameStr);
2539}
2540
2541ExtractValueInst::ExtractValueInst(Value *Agg,
2542 ArrayRef<unsigned> Idxs,
2543 const Twine &NameStr,
2544 BasicBlock *InsertAtEnd)
2545 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2546 ExtractValue, Agg, InsertAtEnd) {
2547 init(Idxs, NameStr);
2548}
2549
2550//===----------------------------------------------------------------------===//
2551// InsertValueInst Class
2552//===----------------------------------------------------------------------===//
2553
2554/// This instruction inserts a struct field of array element
2555/// value into an aggregate value.
2556///
2557class InsertValueInst : public Instruction {
2558 SmallVector<unsigned, 4> Indices;
2559
2560 InsertValueInst(const InsertValueInst &IVI);
2561
2562 /// Constructors - Create a insertvalue instruction with a base aggregate
2563 /// value, a value to insert, and a list of indices. The first ctor can
2564 /// optionally insert before an existing instruction, the second appends
2565 /// the new instruction to the specified BasicBlock.
2566 inline InsertValueInst(Value *Agg, Value *Val,
2567 ArrayRef<unsigned> Idxs,
2568 const Twine &NameStr,
2569 Instruction *InsertBefore);
2570 inline InsertValueInst(Value *Agg, Value *Val,
2571 ArrayRef<unsigned> Idxs,
2572 const Twine &NameStr, BasicBlock *InsertAtEnd);
2573
2574 /// Constructors - These two constructors are convenience methods because one
2575 /// and two index insertvalue instructions are so common.
2576 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2577 const Twine &NameStr = "",
2578 Instruction *InsertBefore = nullptr);
2579 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2580 BasicBlock *InsertAtEnd);
2581
2582 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2583 const Twine &NameStr);
2584
2585protected:
2586 // Note: Instruction needs to be a friend here to call cloneImpl.
2587 friend class Instruction;
2588
2589 InsertValueInst *cloneImpl() const;
2590
2591public:
2592 // allocate space for exactly two operands
2593 void *operator new(size_t S) { return User::operator new(S, 2); }
2594 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2595
2596 static InsertValueInst *Create(Value *Agg, Value *Val,
2597 ArrayRef<unsigned> Idxs,
2598 const Twine &NameStr = "",
2599 Instruction *InsertBefore = nullptr) {
2600 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2601 }
2602
2603 static InsertValueInst *Create(Value *Agg, Value *Val,
2604 ArrayRef<unsigned> Idxs,
2605 const Twine &NameStr,
2606 BasicBlock *InsertAtEnd) {
2607 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2608 }
2609
2610 /// Transparently provide more efficient getOperand methods.
2611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2612
2613 using idx_iterator = const unsigned*;
2614
2615 inline idx_iterator idx_begin() const { return Indices.begin(); }
2616 inline idx_iterator idx_end() const { return Indices.end(); }
2617 inline iterator_range<idx_iterator> indices() const {
2618 return make_range(idx_begin(), idx_end());
2619 }
2620
2621 Value *getAggregateOperand() {
2622 return getOperand(0);
2623 }
2624 const Value *getAggregateOperand() const {
2625 return getOperand(0);
2626 }
2627 static unsigned getAggregateOperandIndex() {
2628 return 0U; // get index for modifying correct operand
2629 }
2630
2631 Value *getInsertedValueOperand() {
2632 return getOperand(1);
2633 }
2634 const Value *getInsertedValueOperand() const {
2635 return getOperand(1);
2636 }
2637 static unsigned getInsertedValueOperandIndex() {
2638 return 1U; // get index for modifying correct operand
2639 }
2640
2641 ArrayRef<unsigned> getIndices() const {
2642 return Indices;
2643 }
2644
2645 unsigned getNumIndices() const {
2646 return (unsigned)Indices.size();
2647 }
2648
2649 bool hasIndices() const {
2650 return true;
2651 }
2652
2653 // Methods for support type inquiry through isa, cast, and dyn_cast:
2654 static bool classof(const Instruction *I) {
2655 return I->getOpcode() == Instruction::InsertValue;
2656 }
2657 static bool classof(const Value *V) {
2658 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2659 }
2660};
2661
2662template <>
2663struct OperandTraits<InsertValueInst> :
2664 public FixedNumOperandTraits<InsertValueInst, 2> {
2665};
2666
2667InsertValueInst::InsertValueInst(Value *Agg,
2668 Value *Val,
2669 ArrayRef<unsigned> Idxs,
2670 const Twine &NameStr,
2671 Instruction *InsertBefore)
2672 : Instruction(Agg->getType(), InsertValue,
2673 OperandTraits<InsertValueInst>::op_begin(this),
2674 2, InsertBefore) {
2675 init(Agg, Val, Idxs, NameStr);
2676}
2677
2678InsertValueInst::InsertValueInst(Value *Agg,
2679 Value *Val,
2680 ArrayRef<unsigned> Idxs,
2681 const Twine &NameStr,
2682 BasicBlock *InsertAtEnd)
2683 : Instruction(Agg->getType(), InsertValue,
2684 OperandTraits<InsertValueInst>::op_begin(this),
2685 2, InsertAtEnd) {
2686 init(Agg, Val, Idxs, NameStr);
2687}
2688
2689DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2689, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2689, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2690
2691//===----------------------------------------------------------------------===//
2692// PHINode Class
2693//===----------------------------------------------------------------------===//
2694
2695// PHINode - The PHINode class is used to represent the magical mystical PHI
2696// node, that can not exist in nature, but can be synthesized in a computer
2697// scientist's overactive imagination.
2698//
2699class PHINode : public Instruction {
2700 /// The number of operands actually allocated. NumOperands is
2701 /// the number actually in use.
2702 unsigned ReservedSpace;
2703
2704 PHINode(const PHINode &PN);
2705
2706 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2707 const Twine &NameStr = "",
2708 Instruction *InsertBefore = nullptr)
2709 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2710 ReservedSpace(NumReservedValues) {
2711 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2711, __extension__ __PRETTY_FUNCTION__
))
;
2712 setName(NameStr);
2713 allocHungoffUses(ReservedSpace);
2714 }
2715
2716 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2717 BasicBlock *InsertAtEnd)
2718 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2719 ReservedSpace(NumReservedValues) {
2720 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2720, __extension__ __PRETTY_FUNCTION__
))
;
23
Called C++ object pointer is null
2721 setName(NameStr);
2722 allocHungoffUses(ReservedSpace);
2723 }
2724
2725protected:
2726 // Note: Instruction needs to be a friend here to call cloneImpl.
2727 friend class Instruction;
2728
2729 PHINode *cloneImpl() const;
2730
2731 // allocHungoffUses - this is more complicated than the generic
2732 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2733 // values and pointers to the incoming blocks, all in one allocation.
2734 void allocHungoffUses(unsigned N) {
2735 User::allocHungoffUses(N, /* IsPhi */ true);
2736 }
2737
2738public:
2739 /// Constructors - NumReservedValues is a hint for the number of incoming
2740 /// edges that this phi node will have (use 0 if you really have no idea).
2741 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2742 const Twine &NameStr = "",
2743 Instruction *InsertBefore = nullptr) {
2744 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2745 }
2746
2747 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2748 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2749 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
21
Passing null pointer value via 1st parameter 'Ty'
22
Calling constructor for 'PHINode'
2750 }
2751
2752 /// Provide fast operand accessors
2753 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2754
2755 // Block iterator interface. This provides access to the list of incoming
2756 // basic blocks, which parallels the list of incoming values.
2757
2758 using block_iterator = BasicBlock **;
2759 using const_block_iterator = BasicBlock * const *;
2760
2761 block_iterator block_begin() {
2762 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2763 }
2764
2765 const_block_iterator block_begin() const {
2766 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2767 }
2768
2769 block_iterator block_end() {
2770 return block_begin() + getNumOperands();
2771 }
2772
2773 const_block_iterator block_end() const {
2774 return block_begin() + getNumOperands();
2775 }
2776
2777 iterator_range<block_iterator> blocks() {
2778 return make_range(block_begin(), block_end());
2779 }
2780
2781 iterator_range<const_block_iterator> blocks() const {
2782 return make_range(block_begin(), block_end());
2783 }
2784
2785 op_range incoming_values() { return operands(); }
2786
2787 const_op_range incoming_values() const { return operands(); }
2788
2789 /// Return the number of incoming edges
2790 ///
2791 unsigned getNumIncomingValues() const { return getNumOperands(); }
2792
2793 /// Return incoming value number x
2794 ///
2795 Value *getIncomingValue(unsigned i) const {
2796 return getOperand(i);
2797 }
2798 void setIncomingValue(unsigned i, Value *V) {
2799 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2799, __extension__ __PRETTY_FUNCTION__
))
;
2800 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2801, __extension__ __PRETTY_FUNCTION__
))
2801 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2801, __extension__ __PRETTY_FUNCTION__
))
;
2802 setOperand(i, V);
2803 }
2804
2805 static unsigned getOperandNumForIncomingValue(unsigned i) {
2806 return i;
2807 }
2808
2809 static unsigned getIncomingValueNumForOperand(unsigned i) {
2810 return i;
2811 }
2812
2813 /// Return incoming basic block number @p i.
2814 ///
2815 BasicBlock *getIncomingBlock(unsigned i) const {
2816 return block_begin()[i];
2817 }
2818
2819 /// Return incoming basic block corresponding
2820 /// to an operand of the PHI.
2821 ///
2822 BasicBlock *getIncomingBlock(const Use &U) const {
2823 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2823, __extension__ __PRETTY_FUNCTION__
))
;
2824 return getIncomingBlock(unsigned(&U - op_begin()));
2825 }
2826
2827 /// Return incoming basic block corresponding
2828 /// to value use iterator.
2829 ///
2830 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2831 return getIncomingBlock(I.getUse());
2832 }
2833
2834 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2835 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2835, __extension__ __PRETTY_FUNCTION__
))
;
2836 block_begin()[i] = BB;
2837 }
2838
2839 /// Replace every incoming basic block \p Old to basic block \p New.
2840 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2841 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2841, __extension__ __PRETTY_FUNCTION__
))
;
2842 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2843 if (getIncomingBlock(Op) == Old)
2844 setIncomingBlock(Op, New);
2845 }
2846
2847 /// Add an incoming value to the end of the PHI list
2848 ///
2849 void addIncoming(Value *V, BasicBlock *BB) {
2850 if (getNumOperands() == ReservedSpace)
2851 growOperands(); // Get more space!
2852 // Initialize some new operands.
2853 setNumHungOffUseOperands(getNumOperands() + 1);
2854 setIncomingValue(getNumOperands() - 1, V);
2855 setIncomingBlock(getNumOperands() - 1, BB);
2856 }
2857
2858 /// Remove an incoming value. This is useful if a
2859 /// predecessor basic block is deleted. The value removed is returned.
2860 ///
2861 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2862 /// is true), the PHI node is destroyed and any uses of it are replaced with
2863 /// dummy values. The only time there should be zero incoming values to a PHI
2864 /// node is when the block is dead, so this strategy is sound.
2865 ///
2866 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2867
2868 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2869 int Idx = getBasicBlockIndex(BB);
2870 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2870, __extension__ __PRETTY_FUNCTION__
))
;
2871 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2872 }
2873
2874 /// Return the first index of the specified basic
2875 /// block in the value list for this PHI. Returns -1 if no instance.
2876 ///
2877 int getBasicBlockIndex(const BasicBlock *BB) const {
2878 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2879 if (block_begin()[i] == BB)
2880 return i;
2881 return -1;
2882 }
2883
2884 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2885 int Idx = getBasicBlockIndex(BB);
2886 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2886, __extension__ __PRETTY_FUNCTION__
))
;
2887 return getIncomingValue(Idx);
2888 }
2889
2890 /// Set every incoming value(s) for block \p BB to \p V.
2891 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2892 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2892, __extension__ __PRETTY_FUNCTION__
))
;
2893 bool Found = false;
2894 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2895 if (getIncomingBlock(Op) == BB) {
2896 Found = true;
2897 setIncomingValue(Op, V);
2898 }
2899 (void)Found;
2900 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2900, __extension__ __PRETTY_FUNCTION__
))
;
2901 }
2902
2903 /// If the specified PHI node always merges together the
2904 /// same value, return the value, otherwise return null.
2905 Value *hasConstantValue() const;
2906
2907 /// Whether the specified PHI node always merges
2908 /// together the same value, assuming undefs are equal to a unique
2909 /// non-undef value.
2910 bool hasConstantOrUndefValue() const;
2911
2912 /// If the PHI node is complete which means all of its parent's predecessors
2913 /// have incoming value in this PHI, return true, otherwise return false.
2914 bool isComplete() const {
2915 return llvm::all_of(predecessors(getParent()),
2916 [this](const BasicBlock *Pred) {
2917 return getBasicBlockIndex(Pred) >= 0;
2918 });
2919 }
2920
2921 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2922 static bool classof(const Instruction *I) {
2923 return I->getOpcode() == Instruction::PHI;
2924 }
2925 static bool classof(const Value *V) {
2926 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2927 }
2928
2929private:
2930 void growOperands();
2931};
2932
2933template <>
2934struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2935};
2936
2937DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2937, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2937, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2938
2939//===----------------------------------------------------------------------===//
2940// LandingPadInst Class
2941//===----------------------------------------------------------------------===//
2942
2943//===---------------------------------------------------------------------------
2944/// The landingpad instruction holds all of the information
2945/// necessary to generate correct exception handling. The landingpad instruction
2946/// cannot be moved from the top of a landing pad block, which itself is
2947/// accessible only from the 'unwind' edge of an invoke. This uses the
2948/// SubclassData field in Value to store whether or not the landingpad is a
2949/// cleanup.
2950///
2951class LandingPadInst : public Instruction {
2952 using CleanupField = BoolBitfieldElementT<0>;
2953
2954 /// The number of operands actually allocated. NumOperands is
2955 /// the number actually in use.
2956 unsigned ReservedSpace;
2957
2958 LandingPadInst(const LandingPadInst &LP);
2959
2960public:
2961 enum ClauseType { Catch, Filter };
2962
2963private:
2964 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2965 const Twine &NameStr, Instruction *InsertBefore);
2966 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2967 const Twine &NameStr, BasicBlock *InsertAtEnd);
2968
2969 // Allocate space for exactly zero operands.
2970 void *operator new(size_t S) { return User::operator new(S); }
2971
2972 void growOperands(unsigned Size);
2973 void init(unsigned NumReservedValues, const Twine &NameStr);
2974
2975protected:
2976 // Note: Instruction needs to be a friend here to call cloneImpl.
2977 friend class Instruction;
2978
2979 LandingPadInst *cloneImpl() const;
2980
2981public:
2982 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2983
2984 /// Constructors - NumReservedClauses is a hint for the number of incoming
2985 /// clauses that this landingpad will have (use 0 if you really have no idea).
2986 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2987 const Twine &NameStr = "",
2988 Instruction *InsertBefore = nullptr);
2989 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2990 const Twine &NameStr, BasicBlock *InsertAtEnd);
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Return 'true' if this landingpad instruction is a
2996 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2997 /// doesn't catch the exception.
2998 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2999
3000 /// Indicate that this landingpad instruction is a cleanup.
3001 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3002
3003 /// Add a catch or filter clause to the landing pad.
3004 void addClause(Constant *ClauseVal);
3005
3006 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3007 /// determine what type of clause this is.
3008 Constant *getClause(unsigned Idx) const {
3009 return cast<Constant>(getOperandList()[Idx]);
3010 }
3011
3012 /// Return 'true' if the clause and index Idx is a catch clause.
3013 bool isCatch(unsigned Idx) const {
3014 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3015 }
3016
3017 /// Return 'true' if the clause and index Idx is a filter clause.
3018 bool isFilter(unsigned Idx) const {
3019 return isa<ArrayType>(getOperandList()[Idx]->getType());
3020 }
3021
3022 /// Get the number of clauses for this landing pad.
3023 unsigned getNumClauses() const { return getNumOperands(); }
3024
3025 /// Grow the size of the operand list to accommodate the new
3026 /// number of clauses.
3027 void reserveClauses(unsigned Size) { growOperands(Size); }
3028
3029 // Methods for support type inquiry through isa, cast, and dyn_cast:
3030 static bool classof(const Instruction *I) {
3031 return I->getOpcode() == Instruction::LandingPad;
3032 }
3033 static bool classof(const Value *V) {
3034 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3035 }
3036};
3037
3038template <>
3039struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3040};
3041
3042DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3042, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3042, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3043
3044//===----------------------------------------------------------------------===//
3045// ReturnInst Class
3046//===----------------------------------------------------------------------===//
3047
3048//===---------------------------------------------------------------------------
3049/// Return a value (possibly void), from a function. Execution
3050/// does not continue in this function any longer.
3051///
3052class ReturnInst : public Instruction {
3053 ReturnInst(const ReturnInst &RI);
3054
3055private:
3056 // ReturnInst constructors:
3057 // ReturnInst() - 'ret void' instruction
3058 // ReturnInst( null) - 'ret void' instruction
3059 // ReturnInst(Value* X) - 'ret X' instruction
3060 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3061 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3062 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3063 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3064 //
3065 // NOTE: If the Value* passed is of type void then the constructor behaves as
3066 // if it was passed NULL.
3067 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3068 Instruction *InsertBefore = nullptr);
3069 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3070 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3071
3072protected:
3073 // Note: Instruction needs to be a friend here to call cloneImpl.
3074 friend class Instruction;
3075
3076 ReturnInst *cloneImpl() const;
3077
3078public:
3079 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3080 Instruction *InsertBefore = nullptr) {
3081 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3082 }
3083
3084 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3085 BasicBlock *InsertAtEnd) {
3086 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3087 }
3088
3089 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3090 return new(0) ReturnInst(C, InsertAtEnd);
3091 }
3092
3093 /// Provide fast operand accessors
3094 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3095
3096 /// Convenience accessor. Returns null if there is no return value.
3097 Value *getReturnValue() const {
3098 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3099 }
3100
3101 unsigned getNumSuccessors() const { return 0; }
3102
3103 // Methods for support type inquiry through isa, cast, and dyn_cast:
3104 static bool classof(const Instruction *I) {
3105 return (I->getOpcode() == Instruction::Ret);
3106 }
3107 static bool classof(const Value *V) {
3108 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3109 }
3110
3111private:
3112 BasicBlock *getSuccessor(unsigned idx) const {
3113 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3113)
;
3114 }
3115
3116 void setSuccessor(unsigned idx, BasicBlock *B) {
3117 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3117)
;
3118 }
3119};
3120
3121template <>
3122struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3123};
3124
3125DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3125, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3125, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3126
3127//===----------------------------------------------------------------------===//
3128// BranchInst Class
3129//===----------------------------------------------------------------------===//
3130
3131//===---------------------------------------------------------------------------
3132/// Conditional or Unconditional Branch instruction.
3133///
3134class BranchInst : public Instruction {
3135 /// Ops list - Branches are strange. The operands are ordered:
3136 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3137 /// they don't have to check for cond/uncond branchness. These are mostly
3138 /// accessed relative from op_end().
3139 BranchInst(const BranchInst &BI);
3140 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3141 // BranchInst(BB *B) - 'br B'
3142 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3143 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3144 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3145 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3146 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3147 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3148 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3149 Instruction *InsertBefore = nullptr);
3150 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3151 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3152 BasicBlock *InsertAtEnd);
3153
3154 void AssertOK();
3155
3156protected:
3157 // Note: Instruction needs to be a friend here to call cloneImpl.
3158 friend class Instruction;
3159
3160 BranchInst *cloneImpl() const;
3161
3162public:
3163 /// Iterator type that casts an operand to a basic block.
3164 ///
3165 /// This only makes sense because the successors are stored as adjacent
3166 /// operands for branch instructions.
3167 struct succ_op_iterator
3168 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3169 std::random_access_iterator_tag, BasicBlock *,
3170 ptrdiff_t, BasicBlock *, BasicBlock *> {
3171 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3172
3173 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3174 BasicBlock *operator->() const { return operator*(); }
3175 };
3176
3177 /// The const version of `succ_op_iterator`.
3178 struct const_succ_op_iterator
3179 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3180 std::random_access_iterator_tag,
3181 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3182 const BasicBlock *> {
3183 explicit const_succ_op_iterator(const_value_op_iterator I)
3184 : iterator_adaptor_base(I) {}
3185
3186 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3187 const BasicBlock *operator->() const { return operator*(); }
3188 };
3189
3190 static BranchInst *Create(BasicBlock *IfTrue,
3191 Instruction *InsertBefore = nullptr) {
3192 return new(1) BranchInst(IfTrue, InsertBefore);
3193 }
3194
3195 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3196 Value *Cond, Instruction *InsertBefore = nullptr) {
3197 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3198 }
3199
3200 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3201 return new(1) BranchInst(IfTrue, InsertAtEnd);
3202 }
3203
3204 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3205 Value *Cond, BasicBlock *InsertAtEnd) {
3206 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3207 }
3208
3209 /// Transparently provide more efficient getOperand methods.
3210 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3211
3212 bool isUnconditional() const { return getNumOperands() == 1; }
3213 bool isConditional() const { return getNumOperands() == 3; }
3214
3215 Value *getCondition() const {
3216 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3216, __extension__ __PRETTY_FUNCTION__
))
;
3217 return Op<-3>();
3218 }
3219
3220 void setCondition(Value *V) {
3221 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3221, __extension__ __PRETTY_FUNCTION__
))
;
3222 Op<-3>() = V;
3223 }
3224
3225 unsigned getNumSuccessors() const { return 1+isConditional(); }
3226
3227 BasicBlock *getSuccessor(unsigned i) const {
3228 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3228, __extension__ __PRETTY_FUNCTION__
))
;
3229 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3230 }
3231
3232 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3233 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3233, __extension__ __PRETTY_FUNCTION__
))
;
3234 *(&Op<-1>() - idx) = NewSucc;
3235 }
3236
3237 /// Swap the successors of this branch instruction.
3238 ///
3239 /// Swaps the successors of the branch instruction. This also swaps any
3240 /// branch weight metadata associated with the instruction so that it
3241 /// continues to map correctly to each operand.
3242 void swapSuccessors();
3243
3244 iterator_range<succ_op_iterator> successors() {
3245 return make_range(
3246 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3247 succ_op_iterator(value_op_end()));
3248 }
3249
3250 iterator_range<const_succ_op_iterator> successors() const {
3251 return make_range(const_succ_op_iterator(
3252 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3253 const_succ_op_iterator(value_op_end()));
3254 }
3255
3256 // Methods for support type inquiry through isa, cast, and dyn_cast:
3257 static bool classof(const Instruction *I) {
3258 return (I->getOpcode() == Instruction::Br);
3259 }
3260 static bool classof(const Value *V) {
3261 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3262 }
3263};
3264
3265template <>
3266struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3267};
3268
3269DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3270
3271//===----------------------------------------------------------------------===//
3272// SwitchInst Class
3273//===----------------------------------------------------------------------===//
3274
3275//===---------------------------------------------------------------------------
3276/// Multiway switch
3277///
3278class SwitchInst : public Instruction {
3279 unsigned ReservedSpace;
3280
3281 // Operand[0] = Value to switch on
3282 // Operand[1] = Default basic block destination
3283 // Operand[2n ] = Value to match
3284 // Operand[2n+1] = BasicBlock to go to on match
3285 SwitchInst(const SwitchInst &SI);
3286
3287 /// Create a new switch instruction, specifying a value to switch on and a
3288 /// default destination. The number of additional cases can be specified here
3289 /// to make memory allocation more efficient. This constructor can also
3290 /// auto-insert before another instruction.
3291 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3292 Instruction *InsertBefore);
3293
3294 /// Create a new switch instruction, specifying a value to switch on and a
3295 /// default destination. The number of additional cases can be specified here
3296 /// to make memory allocation more efficient. This constructor also
3297 /// auto-inserts at the end of the specified BasicBlock.
3298 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3299 BasicBlock *InsertAtEnd);
3300
3301 // allocate space for exactly zero operands
3302 void *operator new(size_t S) { return User::operator new(S); }
3303
3304 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3305 void growOperands();
3306
3307protected:
3308 // Note: Instruction needs to be a friend here to call cloneImpl.
3309 friend class Instruction;
3310
3311 SwitchInst *cloneImpl() const;
3312
3313public:
3314 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3315
3316 // -2
3317 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3318
3319 template <typename CaseHandleT> class CaseIteratorImpl;
3320
3321 /// A handle to a particular switch case. It exposes a convenient interface
3322 /// to both the case value and the successor block.
3323 ///
3324 /// We define this as a template and instantiate it to form both a const and
3325 /// non-const handle.
3326 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3327 class CaseHandleImpl {
3328 // Directly befriend both const and non-const iterators.
3329 friend class SwitchInst::CaseIteratorImpl<
3330 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3331
3332 protected:
3333 // Expose the switch type we're parameterized with to the iterator.
3334 using SwitchInstType = SwitchInstT;
3335
3336 SwitchInstT *SI;
3337 ptrdiff_t Index;
3338
3339 CaseHandleImpl() = default;
3340 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3341
3342 public:
3343 /// Resolves case value for current case.
3344 ConstantIntT *getCaseValue() const {
3345 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
3346 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
;
3347 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3348 }
3349
3350 /// Resolves successor for current case.
3351 BasicBlockT *getCaseSuccessor() const {
3352 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
3353 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
3354 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
;
3355 return SI->getSuccessor(getSuccessorIndex());
3356 }
3357
3358 /// Returns number of current case.
3359 unsigned getCaseIndex() const { return Index; }
3360
3361 /// Returns successor index for current case successor.
3362 unsigned getSuccessorIndex() const {
3363 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
3364 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
3365 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
;
3366 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3367 }
3368
3369 bool operator==(const CaseHandleImpl &RHS) const {
3370 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3370, __extension__ __PRETTY_FUNCTION__
))
;
3371 return Index == RHS.Index;
3372 }
3373 };
3374
3375 using ConstCaseHandle =
3376 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3377
3378 class CaseHandle
3379 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3380 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3381
3382 public:
3383 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3384
3385 /// Sets the new value for current case.
3386 void setValue(ConstantInt *V) const {
3387 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3388, __extension__ __PRETTY_FUNCTION__
))
3388 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3388, __extension__ __PRETTY_FUNCTION__
))
;
3389 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3390 }
3391
3392 /// Sets the new successor for current case.
3393 void setSuccessor(BasicBlock *S) const {
3394 SI->setSuccessor(getSuccessorIndex(), S);
3395 }
3396 };
3397
3398 template <typename CaseHandleT>
3399 class CaseIteratorImpl
3400 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3401 std::random_access_iterator_tag,
3402 const CaseHandleT> {
3403 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3404
3405 CaseHandleT Case;
3406
3407 public:
3408 /// Default constructed iterator is in an invalid state until assigned to
3409 /// a case for a particular switch.
3410 CaseIteratorImpl() = default;
3411
3412 /// Initializes case iterator for given SwitchInst and for given
3413 /// case number.
3414 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3415
3416 /// Initializes case iterator for given SwitchInst and for given
3417 /// successor index.
3418 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3419 unsigned SuccessorIndex) {
3420 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3421, __extension__ __PRETTY_FUNCTION__
))
3421 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3421, __extension__ __PRETTY_FUNCTION__
))
;
3422 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3423 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3424 }
3425
3426 /// Support converting to the const variant. This will be a no-op for const
3427 /// variant.
3428 operator CaseIteratorImpl<ConstCaseHandle>() const {
3429 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3430 }
3431
3432 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3433 // Check index correctness after addition.
3434 // Note: Index == getNumCases() means end().
3435 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
3436 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
3437 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
;
3438 Case.Index += N;
3439 return *this;
3440 }
3441 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3442 // Check index correctness after subtraction.
3443 // Note: Case.Index == getNumCases() means end().
3444 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
3445 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
3446 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
;
3447 Case.Index -= N;
3448 return *this;
3449 }
3450 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3451 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3451, __extension__ __PRETTY_FUNCTION__
))
;
3452 return Case.Index - RHS.Case.Index;
3453 }
3454 bool operator==(const CaseIteratorImpl &RHS) const {
3455 return Case == RHS.Case;
3456 }
3457 bool operator<(const CaseIteratorImpl &RHS) const {
3458 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3458, __extension__ __PRETTY_FUNCTION__
))
;
3459 return Case.Index < RHS.Case.Index;
3460 }
3461 const CaseHandleT &operator*() const { return Case; }
3462 };
3463
3464 using CaseIt = CaseIteratorImpl<CaseHandle>;
3465 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3466
3467 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3468 unsigned NumCases,
3469 Instruction *InsertBefore = nullptr) {
3470 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3471 }
3472
3473 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3474 unsigned NumCases, BasicBlock *InsertAtEnd) {
3475 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3476 }
3477
3478 /// Provide fast operand accessors
3479 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3480
3481 // Accessor Methods for Switch stmt
3482 Value *getCondition() const { return getOperand(0); }
3483 void setCondition(Value *V) { setOperand(0, V); }
3484
3485 BasicBlock *getDefaultDest() const {
3486 return cast<BasicBlock>(getOperand(1));
3487 }
3488
3489 void setDefaultDest(BasicBlock *DefaultCase) {
3490 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3491 }
3492
3493 /// Return the number of 'cases' in this switch instruction, excluding the
3494 /// default case.
3495 unsigned getNumCases() const {
3496 return getNumOperands()/2 - 1;
3497 }
3498
3499 /// Returns a read/write iterator that points to the first case in the
3500 /// SwitchInst.
3501 CaseIt case_begin() {
3502 return CaseIt(this, 0);
3503 }
3504
3505 /// Returns a read-only iterator that points to the first case in the
3506 /// SwitchInst.
3507 ConstCaseIt case_begin() const {
3508 return ConstCaseIt(this, 0);
3509 }
3510
3511 /// Returns a read/write iterator that points one past the last in the
3512 /// SwitchInst.
3513 CaseIt case_end() {
3514 return CaseIt(this, getNumCases());
3515 }
3516
3517 /// Returns a read-only iterator that points one past the last in the
3518 /// SwitchInst.
3519 ConstCaseIt case_end() const {
3520 return ConstCaseIt(this, getNumCases());
3521 }
3522
3523 /// Iteration adapter for range-for loops.
3524 iterator_range<CaseIt> cases() {
3525 return make_range(case_begin(), case_end());
3526 }
3527
3528 /// Constant iteration adapter for range-for loops.
3529 iterator_range<ConstCaseIt> cases() const {
3530 return make_range(case_begin(), case_end());
3531 }
3532
3533 /// Returns an iterator that points to the default case.
3534 /// Note: this iterator allows to resolve successor only. Attempt
3535 /// to resolve case value causes an assertion.
3536 /// Also note, that increment and decrement also causes an assertion and
3537 /// makes iterator invalid.
3538 CaseIt case_default() {
3539 return CaseIt(this, DefaultPseudoIndex);
3540 }
3541 ConstCaseIt case_default() const {
3542 return ConstCaseIt(this, DefaultPseudoIndex);
3543 }
3544
3545 /// Search all of the case values for the specified constant. If it is
3546 /// explicitly handled, return the case iterator of it, otherwise return
3547 /// default case iterator to indicate that it is handled by the default
3548 /// handler.
3549 CaseIt findCaseValue(const ConstantInt *C) {
3550 return CaseIt(
3551 this,
3552 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3553 }
3554 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3555 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3556 return Case.getCaseValue() == C;
3557 });
3558 if (I != case_end())
3559 return I;
3560
3561 return case_default();
3562 }
3563
3564 /// Finds the unique case value for a given successor. Returns null if the
3565 /// successor is not found, not unique, or is the default case.
3566 ConstantInt *findCaseDest(BasicBlock *BB) {
3567 if (BB == getDefaultDest())
3568 return nullptr;
3569
3570 ConstantInt *CI = nullptr;
3571 for (auto Case : cases()) {
3572 if (Case.getCaseSuccessor() != BB)
3573 continue;
3574
3575 if (CI)
3576 return nullptr; // Multiple cases lead to BB.
3577
3578 CI = Case.getCaseValue();
3579 }
3580
3581 return CI;
3582 }
3583
3584 /// Add an entry to the switch instruction.
3585 /// Note:
3586 /// This action invalidates case_end(). Old case_end() iterator will
3587 /// point to the added case.
3588 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3589
3590 /// This method removes the specified case and its successor from the switch
3591 /// instruction. Note that this operation may reorder the remaining cases at
3592 /// index idx and above.
3593 /// Note:
3594 /// This action invalidates iterators for all cases following the one removed,
3595 /// including the case_end() iterator. It returns an iterator for the next
3596 /// case.
3597 CaseIt removeCase(CaseIt I);
3598
3599 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3600 BasicBlock *getSuccessor(unsigned idx) const {
3601 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3601, __extension__ __PRETTY_FUNCTION__
))
;
3602 return cast<BasicBlock>(getOperand(idx*2+1));
3603 }
3604 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3605 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3605, __extension__ __PRETTY_FUNCTION__
))
;
3606 setOperand(idx * 2 + 1, NewSucc);
3607 }
3608
3609 // Methods for support type inquiry through isa, cast, and dyn_cast:
3610 static bool classof(const Instruction *I) {
3611 return I->getOpcode() == Instruction::Switch;
3612 }
3613 static bool classof(const Value *V) {
3614 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3615 }
3616};
3617
3618/// A wrapper class to simplify modification of SwitchInst cases along with
3619/// their prof branch_weights metadata.
3620class SwitchInstProfUpdateWrapper {
3621 SwitchInst &SI;
3622 Optional<SmallVector<uint32_t, 8> > Weights = None;
3623 bool Changed = false;
3624
3625protected:
3626 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3627
3628 MDNode *buildProfBranchWeightsMD();
3629
3630 void init();
3631
3632public:
3633 using CaseWeightOpt = Optional<uint32_t>;
3634 SwitchInst *operator->() { return &SI; }
3635 SwitchInst &operator*() { return SI; }
3636 operator SwitchInst *() { return &SI; }
3637
3638 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3639
3640 ~SwitchInstProfUpdateWrapper() {
3641 if (Changed)
3642 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3643 }
3644
3645 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3646 /// correspondent branch weight.
3647 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3648
3649 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3650 /// specified branch weight for the added case.
3651 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3652
3653 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3654 /// this object to not touch the underlying SwitchInst in destructor.
3655 SymbolTableList<Instruction>::iterator eraseFromParent();
3656
3657 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3658 CaseWeightOpt getSuccessorWeight(unsigned idx);
3659
3660 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3661};
3662
3663template <>
3664struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3665};
3666
3667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3667, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3667, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3668
3669//===----------------------------------------------------------------------===//
3670// IndirectBrInst Class
3671//===----------------------------------------------------------------------===//
3672
3673//===---------------------------------------------------------------------------
3674/// Indirect Branch Instruction.
3675///
3676class IndirectBrInst : public Instruction {
3677 unsigned ReservedSpace;
3678
3679 // Operand[0] = Address to jump to
3680 // Operand[n+1] = n-th destination
3681 IndirectBrInst(const IndirectBrInst &IBI);
3682
3683 /// Create a new indirectbr instruction, specifying an
3684 /// Address to jump to. The number of expected destinations can be specified
3685 /// here to make memory allocation more efficient. This constructor can also
3686 /// autoinsert before another instruction.
3687 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3688
3689 /// Create a new indirectbr instruction, specifying an
3690 /// Address to jump to. The number of expected destinations can be specified
3691 /// here to make memory allocation more efficient. This constructor also
3692 /// autoinserts at the end of the specified BasicBlock.
3693 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3694
3695 // allocate space for exactly zero operands
3696 void *operator new(size_t S) { return User::operator new(S); }
3697
3698 void init(Value *Address, unsigned NumDests);
3699 void growOperands();
3700
3701protected:
3702 // Note: Instruction needs to be a friend here to call cloneImpl.
3703 friend class Instruction;
3704
3705 IndirectBrInst *cloneImpl() const;
3706
3707public:
3708 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3709
3710 /// Iterator type that casts an operand to a basic block.
3711 ///
3712 /// This only makes sense because the successors are stored as adjacent
3713 /// operands for indirectbr instructions.
3714 struct succ_op_iterator
3715 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3716 std::random_access_iterator_tag, BasicBlock *,
3717 ptrdiff_t, BasicBlock *, BasicBlock *> {
3718 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3719
3720 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3721 BasicBlock *operator->() const { return operator*(); }
3722 };
3723
3724 /// The const version of `succ_op_iterator`.
3725 struct const_succ_op_iterator
3726 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3727 std::random_access_iterator_tag,
3728 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3729 const BasicBlock *> {
3730 explicit const_succ_op_iterator(const_value_op_iterator I)
3731 : iterator_adaptor_base(I) {}
3732
3733 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3734 const BasicBlock *operator->() const { return operator*(); }
3735 };
3736
3737 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3738 Instruction *InsertBefore = nullptr) {
3739 return new IndirectBrInst(Address, NumDests, InsertBefore);
3740 }
3741
3742 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3743 BasicBlock *InsertAtEnd) {
3744 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3745 }
3746
3747 /// Provide fast operand accessors.
3748 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3749
3750 // Accessor Methods for IndirectBrInst instruction.
3751 Value *getAddress() { return getOperand(0); }
3752 const Value *getAddress() const { return getOperand(0); }
3753 void setAddress(Value *V) { setOperand(0, V); }
3754
3755 /// return the number of possible destinations in this
3756 /// indirectbr instruction.
3757 unsigned getNumDestinations() const { return getNumOperands()-1; }
3758
3759 /// Return the specified destination.
3760 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3761 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3762
3763 /// Add a destination.
3764 ///
3765 void addDestination(BasicBlock *Dest);
3766
3767 /// This method removes the specified successor from the
3768 /// indirectbr instruction.
3769 void removeDestination(unsigned i);
3770
3771 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3772 BasicBlock *getSuccessor(unsigned i) const {
3773 return cast<BasicBlock>(getOperand(i+1));
3774 }
3775 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3776 setOperand(i + 1, NewSucc);
3777 }
3778
3779 iterator_range<succ_op_iterator> successors() {
3780 return make_range(succ_op_iterator(std::next(value_op_begin())),
3781 succ_op_iterator(value_op_end()));
3782 }
3783
3784 iterator_range<const_succ_op_iterator> successors() const {
3785 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3786 const_succ_op_iterator(value_op_end()));
3787 }
3788
3789 // Methods for support type inquiry through isa, cast, and dyn_cast:
3790 static bool classof(const Instruction *I) {
3791 return I->getOpcode() == Instruction::IndirectBr;
3792 }
3793 static bool classof(const Value *V) {
3794 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3795 }
3796};
3797
3798template <>
3799struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3800};
3801
3802DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3802, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3802, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3803
3804//===----------------------------------------------------------------------===//
3805// InvokeInst Class
3806//===----------------------------------------------------------------------===//
3807
3808/// Invoke instruction. The SubclassData field is used to hold the
3809/// calling convention of the call.
3810///
3811class InvokeInst : public CallBase {
3812 /// The number of operands for this call beyond the called function,
3813 /// arguments, and operand bundles.
3814 static constexpr int NumExtraOperands = 2;
3815
3816 /// The index from the end of the operand array to the normal destination.
3817 static constexpr int NormalDestOpEndIdx = -3;
3818
3819 /// The index from the end of the operand array to the unwind destination.
3820 static constexpr int UnwindDestOpEndIdx = -2;
3821
3822 InvokeInst(const InvokeInst &BI);
3823
3824 /// Construct an InvokeInst given a range of arguments.
3825 ///
3826 /// Construct an InvokeInst from a range of arguments
3827 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3828 BasicBlock *IfException, ArrayRef<Value *> Args,
3829 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3830 const Twine &NameStr, Instruction *InsertBefore);
3831
3832 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3833 BasicBlock *IfException, ArrayRef<Value *> Args,
3834 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3835 const Twine &NameStr, BasicBlock *InsertAtEnd);
3836
3837 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3838 BasicBlock *IfException, ArrayRef<Value *> Args,
3839 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3840
3841 /// Compute the number of operands to allocate.
3842 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3843 // We need one operand for the called function, plus our extra operands and
3844 // the input operand counts provided.
3845 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3846 }
3847
3848protected:
3849 // Note: Instruction needs to be a friend here to call cloneImpl.
3850 friend class Instruction;
3851
3852 InvokeInst *cloneImpl() const;
3853
3854public:
3855 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3856 BasicBlock *IfException, ArrayRef<Value *> Args,
3857 const Twine &NameStr,
3858 Instruction *InsertBefore = nullptr) {
3859 int NumOperands = ComputeNumOperands(Args.size());
3860 return new (NumOperands)
3861 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3862 NameStr, InsertBefore);
3863 }
3864
3865 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3866 BasicBlock *IfException, ArrayRef<Value *> Args,
3867 ArrayRef<OperandBundleDef> Bundles = None,
3868 const Twine &NameStr = "",
3869 Instruction *InsertBefore = nullptr) {
3870 int NumOperands =
3871 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3872 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3873
3874 return new (NumOperands, DescriptorBytes)
3875 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3876 NameStr, InsertBefore);
3877 }
3878
3879 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3880 BasicBlock *IfException, ArrayRef<Value *> Args,
3881 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3882 int NumOperands = ComputeNumOperands(Args.size());
3883 return new (NumOperands)
3884 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3885 NameStr, InsertAtEnd);
3886 }
3887
3888 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3889 BasicBlock *IfException, ArrayRef<Value *> Args,
3890 ArrayRef<OperandBundleDef> Bundles,
3891 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3892 int NumOperands =
3893 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3894 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3895
3896 return new (NumOperands, DescriptorBytes)
3897 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3898 NameStr, InsertAtEnd);
3899 }
3900
3901 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3902 BasicBlock *IfException, ArrayRef<Value *> Args,
3903 const Twine &NameStr,
3904 Instruction *InsertBefore = nullptr) {
3905 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3906 IfException, Args, None, NameStr, InsertBefore);
3907 }
3908
3909 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3910 BasicBlock *IfException, ArrayRef<Value *> Args,
3911 ArrayRef<OperandBundleDef> Bundles = None,
3912 const Twine &NameStr = "",
3913 Instruction *InsertBefore = nullptr) {
3914 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3915 IfException, Args, Bundles, NameStr, InsertBefore);
3916 }
3917
3918 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3919 BasicBlock *IfException, ArrayRef<Value *> Args,
3920 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3921 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3922 IfException, Args, NameStr, InsertAtEnd);
3923 }
3924
3925 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3926 BasicBlock *IfException, ArrayRef<Value *> Args,
3927 ArrayRef<OperandBundleDef> Bundles,
3928 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3929 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3930 IfException, Args, Bundles, NameStr, InsertAtEnd);
3931 }
3932
3933 /// Create a clone of \p II with a different set of operand bundles and
3934 /// insert it before \p InsertPt.
3935 ///
3936 /// The returned invoke instruction is identical to \p II in every way except
3937 /// that the operand bundles for the new instruction are set to the operand
3938 /// bundles in \p Bundles.
3939 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3940 Instruction *InsertPt = nullptr);
3941
3942 // get*Dest - Return the destination basic blocks...
3943 BasicBlock *getNormalDest() const {
3944 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3945 }
3946 BasicBlock *getUnwindDest() const {
3947 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3948 }
3949 void setNormalDest(BasicBlock *B) {
3950 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3951 }
3952 void setUnwindDest(BasicBlock *B) {
3953 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3954 }
3955
3956 /// Get the landingpad instruction from the landing pad
3957 /// block (the unwind destination).
3958 LandingPadInst *getLandingPadInst() const;
3959
3960 BasicBlock *getSuccessor(unsigned i) const {
3961 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3961, __extension__ __PRETTY_FUNCTION__
))
;
3962 return i == 0 ? getNormalDest() : getUnwindDest();
3963 }
3964
3965 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3966 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3966, __extension__ __PRETTY_FUNCTION__
))
;
3967 if (i == 0)
3968 setNormalDest(NewSucc);
3969 else
3970 setUnwindDest(NewSucc);
3971 }
3972
3973 unsigned getNumSuccessors() const { return 2; }
3974
3975 // Methods for support type inquiry through isa, cast, and dyn_cast:
3976 static bool classof(const Instruction *I) {
3977 return (I->getOpcode() == Instruction::Invoke);
3978 }
3979 static bool classof(const Value *V) {
3980 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3981 }
3982
3983private:
3984 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3985 // method so that subclasses cannot accidentally use it.
3986 template <typename Bitfield>
3987 void setSubclassData(typename Bitfield::Type Value) {
3988 Instruction::setSubclassData<Bitfield>(Value);
3989 }
3990};
3991
3992InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3993 BasicBlock *IfException, ArrayRef<Value *> Args,
3994 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3995 const Twine &NameStr, Instruction *InsertBefore)
3996 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3997 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3998 InsertBefore) {
3999 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4000}
4001
4002InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4003 BasicBlock *IfException, ArrayRef<Value *> Args,
4004 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4005 const Twine &NameStr, BasicBlock *InsertAtEnd)
4006 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4007 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4008 InsertAtEnd) {
4009 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4010}
4011
4012//===----------------------------------------------------------------------===//
4013// CallBrInst Class
4014//===----------------------------------------------------------------------===//
4015
4016/// CallBr instruction, tracking function calls that may not return control but
4017/// instead transfer it to a third location. The SubclassData field is used to
4018/// hold the calling convention of the call.
4019///
4020class CallBrInst : public CallBase {
4021
4022 unsigned NumIndirectDests;
4023
4024 CallBrInst(const CallBrInst &BI);
4025
4026 /// Construct a CallBrInst given a range of arguments.
4027 ///
4028 /// Construct a CallBrInst from a range of arguments
4029 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4030 ArrayRef<BasicBlock *> IndirectDests,
4031 ArrayRef<Value *> Args,
4032 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4033 const Twine &NameStr, Instruction *InsertBefore);
4034
4035 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4036 ArrayRef<BasicBlock *> IndirectDests,
4037 ArrayRef<Value *> Args,
4038 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4039 const Twine &NameStr, BasicBlock *InsertAtEnd);
4040
4041 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4042 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4043 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4044
4045 /// Compute the number of operands to allocate.
4046 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4047 int NumBundleInputs = 0) {
4048 // We need one operand for the called function, plus our extra operands and
4049 // the input operand counts provided.
4050 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4051 }
4052
4053protected:
4054 // Note: Instruction needs to be a friend here to call cloneImpl.
4055 friend class Instruction;
4056
4057 CallBrInst *cloneImpl() const;
4058
4059public:
4060 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4061 BasicBlock *DefaultDest,
4062 ArrayRef<BasicBlock *> IndirectDests,
4063 ArrayRef<Value *> Args, const Twine &NameStr,
4064 Instruction *InsertBefore = nullptr) {
4065 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4066 return new (NumOperands)
4067 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4068 NumOperands, NameStr, InsertBefore);
4069 }
4070
4071 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4072 BasicBlock *DefaultDest,
4073 ArrayRef<BasicBlock *> IndirectDests,
4074 ArrayRef<Value *> Args,
4075 ArrayRef<OperandBundleDef> Bundles = None,
4076 const Twine &NameStr = "",
4077 Instruction *InsertBefore = nullptr) {
4078 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4079 CountBundleInputs(Bundles));
4080 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4081
4082 return new (NumOperands, DescriptorBytes)
4083 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4084 NumOperands, NameStr, InsertBefore);
4085 }
4086
4087 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4088 BasicBlock *DefaultDest,
4089 ArrayRef<BasicBlock *> IndirectDests,
4090 ArrayRef<Value *> Args, const Twine &NameStr,
4091 BasicBlock *InsertAtEnd) {
4092 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4093 return new (NumOperands)
4094 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4095 NumOperands, NameStr, InsertAtEnd);
4096 }
4097
4098 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4099 BasicBlock *DefaultDest,
4100 ArrayRef<BasicBlock *> IndirectDests,
4101 ArrayRef<Value *> Args,
4102 ArrayRef<OperandBundleDef> Bundles,
4103 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4104 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4105 CountBundleInputs(Bundles));
4106 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4107
4108 return new (NumOperands, DescriptorBytes)
4109 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4110 NumOperands, NameStr, InsertAtEnd);
4111 }
4112
4113 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4114 ArrayRef<BasicBlock *> IndirectDests,
4115 ArrayRef<Value *> Args, const Twine &NameStr,
4116 Instruction *InsertBefore = nullptr) {
4117 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4118 IndirectDests, Args, NameStr, InsertBefore);
4119 }
4120
4121 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4122 ArrayRef<BasicBlock *> IndirectDests,
4123 ArrayRef<Value *> Args,
4124 ArrayRef<OperandBundleDef> Bundles = None,
4125 const Twine &NameStr = "",
4126 Instruction *InsertBefore = nullptr) {
4127 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4128 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4129 }
4130
4131 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4132 ArrayRef<BasicBlock *> IndirectDests,
4133 ArrayRef<Value *> Args, const Twine &NameStr,
4134 BasicBlock *InsertAtEnd) {
4135 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4136 IndirectDests, Args, NameStr, InsertAtEnd);
4137 }
4138
4139 static CallBrInst *Create(FunctionCallee Func,
4140 BasicBlock *DefaultDest,
4141 ArrayRef<BasicBlock *> IndirectDests,
4142 ArrayRef<Value *> Args,
4143 ArrayRef<OperandBundleDef> Bundles,
4144 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4145 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4146 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4147 }
4148
4149 /// Create a clone of \p CBI with a different set of operand bundles and
4150 /// insert it before \p InsertPt.
4151 ///
4152 /// The returned callbr instruction is identical to \p CBI in every way
4153 /// except that the operand bundles for the new instruction are set to the
4154 /// operand bundles in \p Bundles.
4155 static CallBrInst *Create(CallBrInst *CBI,
4156 ArrayRef<OperandBundleDef> Bundles,
4157 Instruction *InsertPt = nullptr);
4158
4159 /// Return the number of callbr indirect dest labels.
4160 ///
4161 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4162
4163 /// getIndirectDestLabel - Return the i-th indirect dest label.
4164 ///
4165 Value *getIndirectDestLabel(unsigned i) const {
4166 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4166, __extension__ __PRETTY_FUNCTION__
))
;
4167 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4168 }
4169
4170 Value *getIndirectDestLabelUse(unsigned i) const {
4171 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4171, __extension__ __PRETTY_FUNCTION__
))
;
4172 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4173 }
4174
4175 // Return the destination basic blocks...
4176 BasicBlock *getDefaultDest() const {
4177 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4178 }
4179 BasicBlock *getIndirectDest(unsigned i) const {
4180 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4181 }
4182 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4183 SmallVector<BasicBlock *, 16> IndirectDests;
4184 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4185 IndirectDests.push_back(getIndirectDest(i));
4186 return IndirectDests;
4187 }
4188 void setDefaultDest(BasicBlock *B) {
4189 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4190 }
4191 void setIndirectDest(unsigned i, BasicBlock *B) {
4192 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4193 }
4194
4195 BasicBlock *getSuccessor(unsigned i) const {
4196 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4197, __extension__ __PRETTY_FUNCTION__
))
4197 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4197, __extension__ __PRETTY_FUNCTION__
))
;
4198 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4199 }
4200
4201 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4202 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__
))
4203 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__
))
;
4204 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4205 }
4206
4207 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4208
4209 // Methods for support type inquiry through isa, cast, and dyn_cast:
4210 static bool classof(const Instruction *I) {
4211 return (I->getOpcode() == Instruction::CallBr);
4212 }
4213 static bool classof(const Value *V) {
4214 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4215 }
4216
4217private:
4218 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4219 // method so that subclasses cannot accidentally use it.
4220 template <typename Bitfield>
4221 void setSubclassData(typename Bitfield::Type Value) {
4222 Instruction::setSubclassData<Bitfield>(Value);
4223 }
4224};
4225
4226CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4227 ArrayRef<BasicBlock *> IndirectDests,
4228 ArrayRef<Value *> Args,
4229 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4230 const Twine &NameStr, Instruction *InsertBefore)
4231 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4232 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4233 InsertBefore) {
4234 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4235}
4236
4237CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4238 ArrayRef<BasicBlock *> IndirectDests,
4239 ArrayRef<Value *> Args,
4240 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4241 const Twine &NameStr, BasicBlock *InsertAtEnd)
4242 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4243 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4244 InsertAtEnd) {
4245 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4246}
4247
4248//===----------------------------------------------------------------------===//
4249// ResumeInst Class
4250//===----------------------------------------------------------------------===//
4251
4252//===---------------------------------------------------------------------------
4253/// Resume the propagation of an exception.
4254///
4255class ResumeInst : public Instruction {
4256 ResumeInst(const ResumeInst &RI);
4257
4258 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4259 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4260
4261protected:
4262 // Note: Instruction needs to be a friend here to call cloneImpl.
4263 friend class Instruction;
4264
4265 ResumeInst *cloneImpl() const;
4266
4267public:
4268 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4269 return new(1) ResumeInst(Exn, InsertBefore);
4270 }
4271
4272 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4273 return new(1) ResumeInst(Exn, InsertAtEnd);
4274 }
4275
4276 /// Provide fast operand accessors
4277 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4278
4279 /// Convenience accessor.
4280 Value *getValue() const { return Op<0>(); }
4281
4282 unsigned getNumSuccessors() const { return 0; }
4283
4284 // Methods for support type inquiry through isa, cast, and dyn_cast:
4285 static bool classof(const Instruction *I) {
4286 return I->getOpcode() == Instruction::Resume;
4287 }
4288 static bool classof(const Value *V) {
4289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4290 }
4291
4292private:
4293 BasicBlock *getSuccessor(unsigned idx) const {
4294 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4294)
;
4295 }
4296
4297 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4298 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4298)
;
4299 }
4300};
4301
4302template <>
4303struct OperandTraits<ResumeInst> :
4304 public FixedNumOperandTraits<ResumeInst, 1> {
4305};
4306
4307DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4307, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this))[i_nocapture
].get()); } void ResumeInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ResumeInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4307, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4308
4309//===----------------------------------------------------------------------===//
4310// CatchSwitchInst Class
4311//===----------------------------------------------------------------------===//
4312class CatchSwitchInst : public Instruction {
4313 using UnwindDestField = BoolBitfieldElementT<0>;
4314
4315 /// The number of operands actually allocated. NumOperands is
4316 /// the number actually in use.
4317 unsigned ReservedSpace;
4318
4319 // Operand[0] = Outer scope
4320 // Operand[1] = Unwind block destination
4321 // Operand[n] = BasicBlock to go to on match
4322 CatchSwitchInst(const CatchSwitchInst &CSI);
4323
4324 /// Create a new switch instruction, specifying a
4325 /// default destination. The number of additional handlers can be specified
4326 /// here to make memory allocation more efficient.
4327 /// This constructor can also autoinsert before another instruction.
4328 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4329 unsigned NumHandlers, const Twine &NameStr,
4330 Instruction *InsertBefore);
4331
4332 /// Create a new switch instruction, specifying a
4333 /// default destination. The number of additional handlers can be specified
4334 /// here to make memory allocation more efficient.
4335 /// This constructor also autoinserts at the end of the specified BasicBlock.
4336 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4337 unsigned NumHandlers, const Twine &NameStr,
4338 BasicBlock *InsertAtEnd);
4339
4340 // allocate space for exactly zero operands
4341 void *operator new(size_t S) { return User::operator new(S); }
4342
4343 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4344 void growOperands(unsigned Size);
4345
4346protected:
4347 // Note: Instruction needs to be a friend here to call cloneImpl.
4348 friend class Instruction;
4349
4350 CatchSwitchInst *cloneImpl() const;
4351
4352public:
4353 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4354
4355 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4356 unsigned NumHandlers,
4357 const Twine &NameStr = "",
4358 Instruction *InsertBefore = nullptr) {
4359 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4360 InsertBefore);
4361 }
4362
4363 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4364 unsigned NumHandlers, const Twine &NameStr,
4365 BasicBlock *InsertAtEnd) {
4366 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4367 InsertAtEnd);
4368 }
4369
4370 /// Provide fast operand accessors
4371 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4372
4373 // Accessor Methods for CatchSwitch stmt
4374 Value *getParentPad() const { return getOperand(0); }
4375 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4376
4377 // Accessor Methods for CatchSwitch stmt
4378 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4379 bool unwindsToCaller() const { return !hasUnwindDest(); }
4380 BasicBlock *getUnwindDest() const {
4381 if (hasUnwindDest())
4382 return cast<BasicBlock>(getOperand(1));
4383 return nullptr;
4384 }
4385 void setUnwindDest(BasicBlock *UnwindDest) {
4386 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4386, __extension__
__PRETTY_FUNCTION__))
;
4387 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4387
, __extension__ __PRETTY_FUNCTION__))
;
4388 setOperand(1, UnwindDest);
4389 }
4390
4391 /// return the number of 'handlers' in this catchswitch
4392 /// instruction, except the default handler
4393 unsigned getNumHandlers() const {
4394 if (hasUnwindDest())
4395 return getNumOperands() - 2;
4396 return getNumOperands() - 1;
4397 }
4398
4399private:
4400 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4401 static const BasicBlock *handler_helper(const Value *V) {
4402 return cast<BasicBlock>(V);
4403 }
4404
4405public:
4406 using DerefFnTy = BasicBlock *(*)(Value *);
4407 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4408 using handler_range = iterator_range<handler_iterator>;
4409 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4410 using const_handler_iterator =
4411 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4412 using const_handler_range = iterator_range<const_handler_iterator>;
4413
4414 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4415 handler_iterator handler_begin() {
4416 op_iterator It = op_begin() + 1;
4417 if (hasUnwindDest())
4418 ++It;
4419 return handler_iterator(It, DerefFnTy(handler_helper));
4420 }
4421
4422 /// Returns an iterator that points to the first handler in the
4423 /// CatchSwitchInst.
4424 const_handler_iterator handler_begin() const {
4425 const_op_iterator It = op_begin() + 1;
4426 if (hasUnwindDest())
4427 ++It;
4428 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4429 }
4430
4431 /// Returns a read-only iterator that points one past the last
4432 /// handler in the CatchSwitchInst.
4433 handler_iterator handler_end() {
4434 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4435 }
4436
4437 /// Returns an iterator that points one past the last handler in the
4438 /// CatchSwitchInst.
4439 const_handler_iterator handler_end() const {
4440 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4441 }
4442
4443 /// iteration adapter for range-for loops.
4444 handler_range handlers() {
4445 return make_range(handler_begin(), handler_end());
4446 }
4447
4448 /// iteration adapter for range-for loops.
4449 const_handler_range handlers() const {
4450 return make_range(handler_begin(), handler_end());
4451 }
4452
4453 /// Add an entry to the switch instruction...
4454 /// Note:
4455 /// This action invalidates handler_end(). Old handler_end() iterator will
4456 /// point to the added handler.
4457 void addHandler(BasicBlock *Dest);
4458
4459 void removeHandler(handler_iterator HI);
4460
4461 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4462 BasicBlock *getSuccessor(unsigned Idx) const {
4463 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4464, __extension__ __PRETTY_FUNCTION__
))
4464 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4464, __extension__ __PRETTY_FUNCTION__
))
;
4465 return cast<BasicBlock>(getOperand(Idx + 1));
4466 }
4467 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4468 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4469, __extension__ __PRETTY_FUNCTION__
))
4469 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4469, __extension__ __PRETTY_FUNCTION__
))
;
4470 setOperand(Idx + 1, NewSucc);
4471 }
4472
4473 // Methods for support type inquiry through isa, cast, and dyn_cast:
4474 static bool classof(const Instruction *I) {
4475 return I->getOpcode() == Instruction::CatchSwitch;
4476 }
4477 static bool classof(const Value *V) {
4478 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4479 }
4480};
4481
4482template <>
4483struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4484
4485DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4485, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchSwitchInst
>::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture
].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4485, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands
() const { return OperandTraits<CatchSwitchInst>::operands
(this); } template <int Idx_nocapture> Use &CatchSwitchInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchSwitchInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4486
4487//===----------------------------------------------------------------------===//
4488// CleanupPadInst Class
4489//===----------------------------------------------------------------------===//
4490class CleanupPadInst : public FuncletPadInst {
4491private:
4492 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4493 unsigned Values, const Twine &NameStr,
4494 Instruction *InsertBefore)
4495 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4496 NameStr, InsertBefore) {}
4497 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4498 unsigned Values, const Twine &NameStr,
4499 BasicBlock *InsertAtEnd)
4500 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4501 NameStr, InsertAtEnd) {}
4502
4503public:
4504 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4505 const Twine &NameStr = "",
4506 Instruction *InsertBefore = nullptr) {
4507 unsigned Values = 1 + Args.size();
4508 return new (Values)
4509 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4510 }
4511
4512 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4513 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4514 unsigned Values = 1 + Args.size();
4515 return new (Values)
4516 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4517 }
4518
4519 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4520 static bool classof(const Instruction *I) {
4521 return I->getOpcode() == Instruction::CleanupPad;
4522 }
4523 static bool classof(const Value *V) {
4524 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4525 }
4526};
4527
4528//===----------------------------------------------------------------------===//
4529// CatchPadInst Class
4530//===----------------------------------------------------------------------===//
4531class CatchPadInst : public FuncletPadInst {
4532private:
4533 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4534 unsigned Values, const Twine &NameStr,
4535 Instruction *InsertBefore)
4536 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4537 NameStr, InsertBefore) {}
4538 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4539 unsigned Values, const Twine &NameStr,
4540 BasicBlock *InsertAtEnd)
4541 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4542 NameStr, InsertAtEnd) {}
4543
4544public:
4545 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4546 const Twine &NameStr = "",
4547 Instruction *InsertBefore = nullptr) {
4548 unsigned Values = 1 + Args.size();
4549 return new (Values)
4550 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4551 }
4552
4553 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4555 unsigned Values = 1 + Args.size();
4556 return new (Values)
4557 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4558 }
4559
4560 /// Convenience accessors
4561 CatchSwitchInst *getCatchSwitch() const {
4562 return cast<CatchSwitchInst>(Op<-1>());
4563 }
4564 void setCatchSwitch(Value *CatchSwitch) {
4565 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4565,
__extension__ __PRETTY_FUNCTION__))
;
4566 Op<-1>() = CatchSwitch;
4567 }
4568
4569 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4570 static bool classof(const Instruction *I) {
4571 return I->getOpcode() == Instruction::CatchPad;
4572 }
4573 static bool classof(const Value *V) {
4574 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4575 }
4576};
4577
4578//===----------------------------------------------------------------------===//
4579// CatchReturnInst Class
4580//===----------------------------------------------------------------------===//
4581
4582class CatchReturnInst : public Instruction {
4583 CatchReturnInst(const CatchReturnInst &RI);
4584 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4585 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4586
4587 void init(Value *CatchPad, BasicBlock *BB);
4588
4589protected:
4590 // Note: Instruction needs to be a friend here to call cloneImpl.
4591 friend class Instruction;
4592
4593 CatchReturnInst *cloneImpl() const;
4594
4595public:
4596 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4597 Instruction *InsertBefore = nullptr) {
4598 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4598, __extension__
__PRETTY_FUNCTION__))
;
4599 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4599, __extension__ __PRETTY_FUNCTION__
))
;
4600 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4601 }
4602
4603 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4604 BasicBlock *InsertAtEnd) {
4605 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4605, __extension__
__PRETTY_FUNCTION__))
;
4606 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4606, __extension__ __PRETTY_FUNCTION__
))
;
4607 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4608 }
4609
4610 /// Provide fast operand accessors
4611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4612
4613 /// Convenience accessors.
4614 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4615 void setCatchPad(CatchPadInst *CatchPad) {
4616 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4616, __extension__
__PRETTY_FUNCTION__))
;
4617 Op<0>() = CatchPad;
4618 }
4619
4620 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4621 void setSuccessor(BasicBlock *NewSucc) {
4622 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4622, __extension__
__PRETTY_FUNCTION__))
;
4623 Op<1>() = NewSucc;
4624 }
4625 unsigned getNumSuccessors() const { return 1; }
4626
4627 /// Get the parentPad of this catchret's catchpad's catchswitch.
4628 /// The successor block is implicitly a member of this funclet.
4629 Value *getCatchSwitchParentPad() const {
4630 return getCatchPad()->getCatchSwitch()->getParentPad();
4631 }
4632
4633 // Methods for support type inquiry through isa, cast, and dyn_cast:
4634 static bool classof(const Instruction *I) {
4635 return (I->getOpcode() == Instruction::CatchRet);
4636 }
4637 static bool classof(const Value *V) {
4638 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4639 }
4640
4641private:
4642 BasicBlock *getSuccessor(unsigned Idx) const {
4643 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4643, __extension__ __PRETTY_FUNCTION__
))
;
4644 return getSuccessor();
4645 }
4646
4647 void setSuccessor(unsigned Idx, BasicBlock *B) {
4648 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4648, __extension__ __PRETTY_FUNCTION__
))
;
4649 setSuccessor(B);
4650 }
4651};
4652
4653template <>
4654struct OperandTraits<CatchReturnInst>
4655 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4656
4657DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4657, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchReturnInst
>::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture
].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4657, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands
() const { return OperandTraits<CatchReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CatchReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4658
4659//===----------------------------------------------------------------------===//
4660// CleanupReturnInst Class
4661//===----------------------------------------------------------------------===//
4662
4663class CleanupReturnInst : public Instruction {
4664 using UnwindDestField = BoolBitfieldElementT<0>;
4665
4666private:
4667 CleanupReturnInst(const CleanupReturnInst &RI);
4668 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4669 Instruction *InsertBefore = nullptr);
4670 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4671 BasicBlock *InsertAtEnd);
4672
4673 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4674
4675protected:
4676 // Note: Instruction needs to be a friend here to call cloneImpl.
4677 friend class Instruction;
4678
4679 CleanupReturnInst *cloneImpl() const;
4680
4681public:
4682 static CleanupReturnInst *Create(Value *CleanupPad,
4683 BasicBlock *UnwindBB = nullptr,
4684 Instruction *InsertBefore = nullptr) {
4685 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4685, __extension__
__PRETTY_FUNCTION__))
;
4686 unsigned Values = 1;
4687 if (UnwindBB)
4688 ++Values;
4689 return new (Values)
4690 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4691 }
4692
4693 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4694 BasicBlock *InsertAtEnd) {
4695 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4695, __extension__
__PRETTY_FUNCTION__))
;
4696 unsigned Values = 1;
4697 if (UnwindBB)
4698 ++Values;
4699 return new (Values)
4700 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4701 }
4702
4703 /// Provide fast operand accessors
4704 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4705
4706 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4707 bool unwindsToCaller() const { return !hasUnwindDest(); }
4708
4709 /// Convenience accessor.
4710 CleanupPadInst *getCleanupPad() const {
4711 return cast<CleanupPadInst>(Op<0>());
4712 }
4713 void setCleanupPad(CleanupPadInst *CleanupPad) {
4714 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4714, __extension__
__PRETTY_FUNCTION__))
;
4715 Op<0>() = CleanupPad;
4716 }