Bug Summary

File:llvm/lib/CodeGen/WinEHPrepare.cpp
Warning:line 212, column 30
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WinEHPrepare.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp

1//===-- WinEHPrepare - Prepare exception handling for code generation ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers LLVM IR exception handling into something closer to what the
10// backend wants for functions using a personality function from a runtime
11// provided by MSVC. Functions with other personality functions are left alone
12// and may be prepared by other passes. In particular, all supported MSVC
13// personality functions require cleanup code to be outlined, and the C++
14// personality requires catch handler code to be outlined.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/Analysis/CFG.h"
23#include "llvm/Analysis/EHPersonalities.h"
24#include "llvm/CodeGen/MachineBasicBlock.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/CodeGen/WinEHFuncInfo.h"
27#include "llvm/IR/Verifier.h"
28#include "llvm/InitializePasses.h"
29#include "llvm/MC/MCSymbol.h"
30#include "llvm/Pass.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include "llvm/Transforms/Utils/Cloning.h"
36#include "llvm/Transforms/Utils/Local.h"
37#include "llvm/Transforms/Utils/SSAUpdater.h"
38
39using namespace llvm;
40
41#define DEBUG_TYPE"winehprepare" "winehprepare"
42
43static cl::opt<bool> DisableDemotion(
44 "disable-demotion", cl::Hidden,
45 cl::desc(
46 "Clone multicolor basic blocks but do not demote cross scopes"),
47 cl::init(false));
48
49static cl::opt<bool> DisableCleanups(
50 "disable-cleanups", cl::Hidden,
51 cl::desc("Do not remove implausible terminators or other similar cleanups"),
52 cl::init(false));
53
54static cl::opt<bool> DemoteCatchSwitchPHIOnlyOpt(
55 "demote-catchswitch-only", cl::Hidden,
56 cl::desc("Demote catchswitch BBs only (for wasm EH)"), cl::init(false));
57
58namespace {
59
60class WinEHPrepare : public FunctionPass {
61public:
62 static char ID; // Pass identification, replacement for typeid.
63 WinEHPrepare(bool DemoteCatchSwitchPHIOnly = false)
64 : FunctionPass(ID), DemoteCatchSwitchPHIOnly(DemoteCatchSwitchPHIOnly) {}
65
66 bool runOnFunction(Function &Fn) override;
67
68 bool doFinalization(Module &M) override;
69
70 void getAnalysisUsage(AnalysisUsage &AU) const override;
71
72 StringRef getPassName() const override {
73 return "Windows exception handling preparation";
74 }
75
76private:
77 void insertPHIStores(PHINode *OriginalPHI, AllocaInst *SpillSlot);
78 void
79 insertPHIStore(BasicBlock *PredBlock, Value *PredVal, AllocaInst *SpillSlot,
80 SmallVectorImpl<std::pair<BasicBlock *, Value *>> &Worklist);
81 AllocaInst *insertPHILoads(PHINode *PN, Function &F);
82 void replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
83 DenseMap<BasicBlock *, Value *> &Loads, Function &F);
84 bool prepareExplicitEH(Function &F);
85 void colorFunclets(Function &F);
86
87 void demotePHIsOnFunclets(Function &F, bool DemoteCatchSwitchPHIOnly);
88 void cloneCommonBlocks(Function &F);
89 void removeImplausibleInstructions(Function &F);
90 void cleanupPreparedFunclets(Function &F);
91 void verifyPreparedFunclets(Function &F);
92
93 bool DemoteCatchSwitchPHIOnly;
94
95 // All fields are reset by runOnFunction.
96 EHPersonality Personality = EHPersonality::Unknown;
97
98 const DataLayout *DL = nullptr;
99 DenseMap<BasicBlock *, ColorVector> BlockColors;
100 MapVector<BasicBlock *, std::vector<BasicBlock *>> FuncletBlocks;
101};
102
103} // end anonymous namespace
104
105char WinEHPrepare::ID = 0;
106INITIALIZE_PASS(WinEHPrepare, DEBUG_TYPE, "Prepare Windows exceptions",static void *initializeWinEHPreparePassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Prepare Windows exceptions"
, "winehprepare", &WinEHPrepare::ID, PassInfo::NormalCtor_t
(callDefaultCtor<WinEHPrepare>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeWinEHPreparePassFlag; void llvm::initializeWinEHPreparePass
(PassRegistry &Registry) { llvm::call_once(InitializeWinEHPreparePassFlag
, initializeWinEHPreparePassOnce, std::ref(Registry)); }
107 false, false)static void *initializeWinEHPreparePassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Prepare Windows exceptions"
, "winehprepare", &WinEHPrepare::ID, PassInfo::NormalCtor_t
(callDefaultCtor<WinEHPrepare>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeWinEHPreparePassFlag; void llvm::initializeWinEHPreparePass
(PassRegistry &Registry) { llvm::call_once(InitializeWinEHPreparePassFlag
, initializeWinEHPreparePassOnce, std::ref(Registry)); }
108
109FunctionPass *llvm::createWinEHPass(bool DemoteCatchSwitchPHIOnly) {
110 return new WinEHPrepare(DemoteCatchSwitchPHIOnly);
111}
112
113bool WinEHPrepare::runOnFunction(Function &Fn) {
114 if (!Fn.hasPersonalityFn())
115 return false;
116
117 // Classify the personality to see what kind of preparation we need.
118 Personality = classifyEHPersonality(Fn.getPersonalityFn());
119
120 // Do nothing if this is not a scope-based personality.
121 if (!isScopedEHPersonality(Personality))
122 return false;
123
124 DL = &Fn.getParent()->getDataLayout();
125 return prepareExplicitEH(Fn);
126}
127
128bool WinEHPrepare::doFinalization(Module &M) { return false; }
129
130void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {}
131
132static int addUnwindMapEntry(WinEHFuncInfo &FuncInfo, int ToState,
133 const BasicBlock *BB) {
134 CxxUnwindMapEntry UME;
135 UME.ToState = ToState;
136 UME.Cleanup = BB;
137 FuncInfo.CxxUnwindMap.push_back(UME);
138 return FuncInfo.getLastStateNumber();
139}
140
141static void addTryBlockMapEntry(WinEHFuncInfo &FuncInfo, int TryLow,
142 int TryHigh, int CatchHigh,
143 ArrayRef<const CatchPadInst *> Handlers) {
144 WinEHTryBlockMapEntry TBME;
145 TBME.TryLow = TryLow;
146 TBME.TryHigh = TryHigh;
147 TBME.CatchHigh = CatchHigh;
148 assert(TBME.TryLow <= TBME.TryHigh)(static_cast <bool> (TBME.TryLow <= TBME.TryHigh) ? void
(0) : __assert_fail ("TBME.TryLow <= TBME.TryHigh", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 148, __extension__ __PRETTY_FUNCTION__))
;
149 for (const CatchPadInst *CPI : Handlers) {
150 WinEHHandlerType HT;
151 Constant *TypeInfo = cast<Constant>(CPI->getArgOperand(0));
152 if (TypeInfo->isNullValue())
153 HT.TypeDescriptor = nullptr;
154 else
155 HT.TypeDescriptor = cast<GlobalVariable>(TypeInfo->stripPointerCasts());
156 HT.Adjectives = cast<ConstantInt>(CPI->getArgOperand(1))->getZExtValue();
157 HT.Handler = CPI->getParent();
158 if (auto *AI =
159 dyn_cast<AllocaInst>(CPI->getArgOperand(2)->stripPointerCasts()))
160 HT.CatchObj.Alloca = AI;
161 else
162 HT.CatchObj.Alloca = nullptr;
163 TBME.HandlerArray.push_back(HT);
164 }
165 FuncInfo.TryBlockMap.push_back(TBME);
166}
167
168static BasicBlock *getCleanupRetUnwindDest(const CleanupPadInst *CleanupPad) {
169 for (const User *U : CleanupPad->users())
170 if (const auto *CRI = dyn_cast<CleanupReturnInst>(U))
171 return CRI->getUnwindDest();
172 return nullptr;
173}
174
175static void calculateStateNumbersForInvokes(const Function *Fn,
176 WinEHFuncInfo &FuncInfo) {
177 auto *F = const_cast<Function *>(Fn);
178 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*F);
179 for (BasicBlock &BB : *F) {
180 auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
6
Assuming the object is a 'InvokeInst'
181 if (!II
6.1
'II' is non-null
6.1
'II' is non-null
6.1
'II' is non-null
)
7
Taking false branch
182 continue;
183
184 auto &BBColors = BlockColors[&BB];
185 assert(BBColors.size() == 1 && "multi-color BB not removed by preparation")(static_cast <bool> (BBColors.size() == 1 && "multi-color BB not removed by preparation"
) ? void (0) : __assert_fail ("BBColors.size() == 1 && \"multi-color BB not removed by preparation\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 185, __extension__ __PRETTY_FUNCTION__))
;
8
Assuming the condition is true
9
'?' condition is true
186 BasicBlock *FuncletEntryBB = BBColors.front();
187
188 BasicBlock *FuncletUnwindDest;
189 auto *FuncletPad =
190 dyn_cast<FuncletPadInst>(FuncletEntryBB->getFirstNonPHI());
10
Assuming the object is not a 'FuncletPadInst'
191 assert(FuncletPad || FuncletEntryBB == &Fn->getEntryBlock())(static_cast <bool> (FuncletPad || FuncletEntryBB == &
Fn->getEntryBlock()) ? void (0) : __assert_fail ("FuncletPad || FuncletEntryBB == &Fn->getEntryBlock()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 191, __extension__ __PRETTY_FUNCTION__))
;
11
Assuming the condition is true
12
'?' condition is true
192 if (!FuncletPad
12.1
'FuncletPad' is null
12.1
'FuncletPad' is null
12.1
'FuncletPad' is null
)
13
Taking true branch
193 FuncletUnwindDest = nullptr;
194 else if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
195 FuncletUnwindDest = CatchPad->getCatchSwitch()->getUnwindDest();
196 else if (auto *CleanupPad = dyn_cast<CleanupPadInst>(FuncletPad))
197 FuncletUnwindDest = getCleanupRetUnwindDest(CleanupPad);
198 else
199 llvm_unreachable("unexpected funclet pad!")::llvm::llvm_unreachable_internal("unexpected funclet pad!", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 199)
;
200
201 BasicBlock *InvokeUnwindDest = II->getUnwindDest();
14
Calling 'InvokeInst::getUnwindDest'
30
Returning from 'InvokeInst::getUnwindDest'
31
'InvokeUnwindDest' initialized here
202 int BaseState = -1;
203 if (FuncletUnwindDest == InvokeUnwindDest) {
32
Assuming 'FuncletUnwindDest' is equal to 'InvokeUnwindDest'
33
Taking true branch
204 auto BaseStateI = FuncInfo.FuncletBaseStateMap.find(FuncletPad);
205 if (BaseStateI != FuncInfo.FuncletBaseStateMap.end())
34
Taking false branch
206 BaseState = BaseStateI->second;
207 }
208
209 if (BaseState != -1) {
35
Taking false branch
210 FuncInfo.InvokeStateMap[II] = BaseState;
211 } else {
212 Instruction *PadInst = InvokeUnwindDest->getFirstNonPHI();
36
Called C++ object pointer is null
213 assert(FuncInfo.EHPadStateMap.count(PadInst) && "EH Pad has no state!")(static_cast <bool> (FuncInfo.EHPadStateMap.count(PadInst
) && "EH Pad has no state!") ? void (0) : __assert_fail
("FuncInfo.EHPadStateMap.count(PadInst) && \"EH Pad has no state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 213, __extension__ __PRETTY_FUNCTION__))
;
214 FuncInfo.InvokeStateMap[II] = FuncInfo.EHPadStateMap[PadInst];
215 }
216 }
217}
218
219// Given BB which ends in an unwind edge, return the EHPad that this BB belongs
220// to. If the unwind edge came from an invoke, return null.
221static const BasicBlock *getEHPadFromPredecessor(const BasicBlock *BB,
222 Value *ParentPad) {
223 const Instruction *TI = BB->getTerminator();
224 if (isa<InvokeInst>(TI))
225 return nullptr;
226 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
227 if (CatchSwitch->getParentPad() != ParentPad)
228 return nullptr;
229 return BB;
230 }
231 assert(!TI->isEHPad() && "unexpected EHPad!")(static_cast <bool> (!TI->isEHPad() && "unexpected EHPad!"
) ? void (0) : __assert_fail ("!TI->isEHPad() && \"unexpected EHPad!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 231, __extension__ __PRETTY_FUNCTION__))
;
232 auto *CleanupPad = cast<CleanupReturnInst>(TI)->getCleanupPad();
233 if (CleanupPad->getParentPad() != ParentPad)
234 return nullptr;
235 return CleanupPad->getParent();
236}
237
238// Starting from a EHPad, Backward walk through control-flow graph
239// to produce two primary outputs:
240// FuncInfo.EHPadStateMap[] and FuncInfo.CxxUnwindMap[]
241static void calculateCXXStateNumbers(WinEHFuncInfo &FuncInfo,
242 const Instruction *FirstNonPHI,
243 int ParentState) {
244 const BasicBlock *BB = FirstNonPHI->getParent();
245 assert(BB->isEHPad() && "not a funclet!")(static_cast <bool> (BB->isEHPad() && "not a funclet!"
) ? void (0) : __assert_fail ("BB->isEHPad() && \"not a funclet!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 245, __extension__ __PRETTY_FUNCTION__))
;
246
247 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FirstNonPHI)) {
248 assert(FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 &&(static_cast <bool> (FuncInfo.EHPadStateMap.count(CatchSwitch
) == 0 && "shouldn't revist catch funclets!") ? void (
0) : __assert_fail ("FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 && \"shouldn't revist catch funclets!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 249, __extension__ __PRETTY_FUNCTION__))
249 "shouldn't revist catch funclets!")(static_cast <bool> (FuncInfo.EHPadStateMap.count(CatchSwitch
) == 0 && "shouldn't revist catch funclets!") ? void (
0) : __assert_fail ("FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 && \"shouldn't revist catch funclets!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 249, __extension__ __PRETTY_FUNCTION__))
;
250
251 SmallVector<const CatchPadInst *, 2> Handlers;
252 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
253 auto *CatchPad = cast<CatchPadInst>(CatchPadBB->getFirstNonPHI());
254 Handlers.push_back(CatchPad);
255 }
256 int TryLow = addUnwindMapEntry(FuncInfo, ParentState, nullptr);
257 FuncInfo.EHPadStateMap[CatchSwitch] = TryLow;
258 for (const BasicBlock *PredBlock : predecessors(BB))
259 if ((PredBlock = getEHPadFromPredecessor(PredBlock,
260 CatchSwitch->getParentPad())))
261 calculateCXXStateNumbers(FuncInfo, PredBlock->getFirstNonPHI(),
262 TryLow);
263 int CatchLow = addUnwindMapEntry(FuncInfo, ParentState, nullptr);
264
265 // catchpads are separate funclets in C++ EH due to the way rethrow works.
266 int TryHigh = CatchLow - 1;
267
268 // MSVC FrameHandler3/4 on x64&Arm64 expect Catch Handlers in $tryMap$
269 // stored in pre-order (outer first, inner next), not post-order
270 // Add to map here. Fix the CatchHigh after children are processed
271 const Module *Mod = BB->getParent()->getParent();
272 bool IsPreOrder = Triple(Mod->getTargetTriple()).isArch64Bit();
273 if (IsPreOrder)
274 addTryBlockMapEntry(FuncInfo, TryLow, TryHigh, CatchLow, Handlers);
275 unsigned TBMEIdx = FuncInfo.TryBlockMap.size() - 1;
276
277 for (const auto *CatchPad : Handlers) {
278 FuncInfo.FuncletBaseStateMap[CatchPad] = CatchLow;
279 for (const User *U : CatchPad->users()) {
280 const auto *UserI = cast<Instruction>(U);
281 if (auto *InnerCatchSwitch = dyn_cast<CatchSwitchInst>(UserI)) {
282 BasicBlock *UnwindDest = InnerCatchSwitch->getUnwindDest();
283 if (!UnwindDest || UnwindDest == CatchSwitch->getUnwindDest())
284 calculateCXXStateNumbers(FuncInfo, UserI, CatchLow);
285 }
286 if (auto *InnerCleanupPad = dyn_cast<CleanupPadInst>(UserI)) {
287 BasicBlock *UnwindDest = getCleanupRetUnwindDest(InnerCleanupPad);
288 // If a nested cleanup pad reports a null unwind destination and the
289 // enclosing catch pad doesn't it must be post-dominated by an
290 // unreachable instruction.
291 if (!UnwindDest || UnwindDest == CatchSwitch->getUnwindDest())
292 calculateCXXStateNumbers(FuncInfo, UserI, CatchLow);
293 }
294 }
295 }
296 int CatchHigh = FuncInfo.getLastStateNumber();
297 // Now child Catches are processed, update CatchHigh
298 if (IsPreOrder)
299 FuncInfo.TryBlockMap[TBMEIdx].CatchHigh = CatchHigh;
300 else // PostOrder
301 addTryBlockMapEntry(FuncInfo, TryLow, TryHigh, CatchHigh, Handlers);
302
303 LLVM_DEBUG(dbgs() << "TryLow[" << BB->getName() << "]: " << TryLow << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "TryLow[" << BB->
getName() << "]: " << TryLow << '\n'; } } while
(false)
;
304 LLVM_DEBUG(dbgs() << "TryHigh[" << BB->getName() << "]: " << TryHighdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "TryHigh[" << BB->
getName() << "]: " << TryHigh << '\n'; } } while
(false)
305 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "TryHigh[" << BB->
getName() << "]: " << TryHigh << '\n'; } } while
(false)
;
306 LLVM_DEBUG(dbgs() << "CatchHigh[" << BB->getName() << "]: " << CatchHighdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "CatchHigh[" << BB->
getName() << "]: " << CatchHigh << '\n'; } }
while (false)
307 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "CatchHigh[" << BB->
getName() << "]: " << CatchHigh << '\n'; } }
while (false)
;
308 } else {
309 auto *CleanupPad = cast<CleanupPadInst>(FirstNonPHI);
310
311 // It's possible for a cleanup to be visited twice: it might have multiple
312 // cleanupret instructions.
313 if (FuncInfo.EHPadStateMap.count(CleanupPad))
314 return;
315
316 int CleanupState = addUnwindMapEntry(FuncInfo, ParentState, BB);
317 FuncInfo.EHPadStateMap[CleanupPad] = CleanupState;
318 LLVM_DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
CleanupState << " to BB " << BB->getName() <<
'\n'; } } while (false)
319 << BB->getName() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
CleanupState << " to BB " << BB->getName() <<
'\n'; } } while (false)
;
320 for (const BasicBlock *PredBlock : predecessors(BB)) {
321 if ((PredBlock = getEHPadFromPredecessor(PredBlock,
322 CleanupPad->getParentPad()))) {
323 calculateCXXStateNumbers(FuncInfo, PredBlock->getFirstNonPHI(),
324 CleanupState);
325 }
326 }
327 for (const User *U : CleanupPad->users()) {
328 const auto *UserI = cast<Instruction>(U);
329 if (UserI->isEHPad())
330 report_fatal_error("Cleanup funclets for the MSVC++ personality cannot "
331 "contain exceptional actions");
332 }
333 }
334}
335
336static int addSEHExcept(WinEHFuncInfo &FuncInfo, int ParentState,
337 const Function *Filter, const BasicBlock *Handler) {
338 SEHUnwindMapEntry Entry;
339 Entry.ToState = ParentState;
340 Entry.IsFinally = false;
341 Entry.Filter = Filter;
342 Entry.Handler = Handler;
343 FuncInfo.SEHUnwindMap.push_back(Entry);
344 return FuncInfo.SEHUnwindMap.size() - 1;
345}
346
347static int addSEHFinally(WinEHFuncInfo &FuncInfo, int ParentState,
348 const BasicBlock *Handler) {
349 SEHUnwindMapEntry Entry;
350 Entry.ToState = ParentState;
351 Entry.IsFinally = true;
352 Entry.Filter = nullptr;
353 Entry.Handler = Handler;
354 FuncInfo.SEHUnwindMap.push_back(Entry);
355 return FuncInfo.SEHUnwindMap.size() - 1;
356}
357
358// Starting from a EHPad, Backward walk through control-flow graph
359// to produce two primary outputs:
360// FuncInfo.EHPadStateMap[] and FuncInfo.SEHUnwindMap[]
361static void calculateSEHStateNumbers(WinEHFuncInfo &FuncInfo,
362 const Instruction *FirstNonPHI,
363 int ParentState) {
364 const BasicBlock *BB = FirstNonPHI->getParent();
365 assert(BB->isEHPad() && "no a funclet!")(static_cast <bool> (BB->isEHPad() && "no a funclet!"
) ? void (0) : __assert_fail ("BB->isEHPad() && \"no a funclet!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 365, __extension__ __PRETTY_FUNCTION__))
;
366
367 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FirstNonPHI)) {
368 assert(FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 &&(static_cast <bool> (FuncInfo.EHPadStateMap.count(CatchSwitch
) == 0 && "shouldn't revist catch funclets!") ? void (
0) : __assert_fail ("FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 && \"shouldn't revist catch funclets!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 369, __extension__ __PRETTY_FUNCTION__))
369 "shouldn't revist catch funclets!")(static_cast <bool> (FuncInfo.EHPadStateMap.count(CatchSwitch
) == 0 && "shouldn't revist catch funclets!") ? void (
0) : __assert_fail ("FuncInfo.EHPadStateMap.count(CatchSwitch) == 0 && \"shouldn't revist catch funclets!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 369, __extension__ __PRETTY_FUNCTION__))
;
370
371 // Extract the filter function and the __except basic block and create a
372 // state for them.
373 assert(CatchSwitch->getNumHandlers() == 1 &&(static_cast <bool> (CatchSwitch->getNumHandlers() ==
1 && "SEH doesn't have multiple handlers per __try")
? void (0) : __assert_fail ("CatchSwitch->getNumHandlers() == 1 && \"SEH doesn't have multiple handlers per __try\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 374, __extension__ __PRETTY_FUNCTION__))
374 "SEH doesn't have multiple handlers per __try")(static_cast <bool> (CatchSwitch->getNumHandlers() ==
1 && "SEH doesn't have multiple handlers per __try")
? void (0) : __assert_fail ("CatchSwitch->getNumHandlers() == 1 && \"SEH doesn't have multiple handlers per __try\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 374, __extension__ __PRETTY_FUNCTION__))
;
375 const auto *CatchPad =
376 cast<CatchPadInst>((*CatchSwitch->handler_begin())->getFirstNonPHI());
377 const BasicBlock *CatchPadBB = CatchPad->getParent();
378 const Constant *FilterOrNull =
379 cast<Constant>(CatchPad->getArgOperand(0)->stripPointerCasts());
380 const Function *Filter = dyn_cast<Function>(FilterOrNull);
381 assert((Filter || FilterOrNull->isNullValue()) &&(static_cast <bool> ((Filter || FilterOrNull->isNullValue
()) && "unexpected filter value") ? void (0) : __assert_fail
("(Filter || FilterOrNull->isNullValue()) && \"unexpected filter value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 382, __extension__ __PRETTY_FUNCTION__))
382 "unexpected filter value")(static_cast <bool> ((Filter || FilterOrNull->isNullValue
()) && "unexpected filter value") ? void (0) : __assert_fail
("(Filter || FilterOrNull->isNullValue()) && \"unexpected filter value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 382, __extension__ __PRETTY_FUNCTION__))
;
383 int TryState = addSEHExcept(FuncInfo, ParentState, Filter, CatchPadBB);
384
385 // Everything in the __try block uses TryState as its parent state.
386 FuncInfo.EHPadStateMap[CatchSwitch] = TryState;
387 LLVM_DEBUG(dbgs() << "Assigning state #" << TryState << " to BB "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
TryState << " to BB " << CatchPadBB->getName(
) << '\n'; } } while (false)
388 << CatchPadBB->getName() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
TryState << " to BB " << CatchPadBB->getName(
) << '\n'; } } while (false)
;
389 for (const BasicBlock *PredBlock : predecessors(BB))
390 if ((PredBlock = getEHPadFromPredecessor(PredBlock,
391 CatchSwitch->getParentPad())))
392 calculateSEHStateNumbers(FuncInfo, PredBlock->getFirstNonPHI(),
393 TryState);
394
395 // Everything in the __except block unwinds to ParentState, just like code
396 // outside the __try.
397 for (const User *U : CatchPad->users()) {
398 const auto *UserI = cast<Instruction>(U);
399 if (auto *InnerCatchSwitch = dyn_cast<CatchSwitchInst>(UserI)) {
400 BasicBlock *UnwindDest = InnerCatchSwitch->getUnwindDest();
401 if (!UnwindDest || UnwindDest == CatchSwitch->getUnwindDest())
402 calculateSEHStateNumbers(FuncInfo, UserI, ParentState);
403 }
404 if (auto *InnerCleanupPad = dyn_cast<CleanupPadInst>(UserI)) {
405 BasicBlock *UnwindDest = getCleanupRetUnwindDest(InnerCleanupPad);
406 // If a nested cleanup pad reports a null unwind destination and the
407 // enclosing catch pad doesn't it must be post-dominated by an
408 // unreachable instruction.
409 if (!UnwindDest || UnwindDest == CatchSwitch->getUnwindDest())
410 calculateSEHStateNumbers(FuncInfo, UserI, ParentState);
411 }
412 }
413 } else {
414 auto *CleanupPad = cast<CleanupPadInst>(FirstNonPHI);
415
416 // It's possible for a cleanup to be visited twice: it might have multiple
417 // cleanupret instructions.
418 if (FuncInfo.EHPadStateMap.count(CleanupPad))
419 return;
420
421 int CleanupState = addSEHFinally(FuncInfo, ParentState, BB);
422 FuncInfo.EHPadStateMap[CleanupPad] = CleanupState;
423 LLVM_DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
CleanupState << " to BB " << BB->getName() <<
'\n'; } } while (false)
424 << BB->getName() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { dbgs() << "Assigning state #" <<
CleanupState << " to BB " << BB->getName() <<
'\n'; } } while (false)
;
425 for (const BasicBlock *PredBlock : predecessors(BB))
426 if ((PredBlock =
427 getEHPadFromPredecessor(PredBlock, CleanupPad->getParentPad())))
428 calculateSEHStateNumbers(FuncInfo, PredBlock->getFirstNonPHI(),
429 CleanupState);
430 for (const User *U : CleanupPad->users()) {
431 const auto *UserI = cast<Instruction>(U);
432 if (UserI->isEHPad())
433 report_fatal_error("Cleanup funclets for the SEH personality cannot "
434 "contain exceptional actions");
435 }
436 }
437}
438
439static bool isTopLevelPadForMSVC(const Instruction *EHPad) {
440 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(EHPad))
441 return isa<ConstantTokenNone>(CatchSwitch->getParentPad()) &&
442 CatchSwitch->unwindsToCaller();
443 if (auto *CleanupPad = dyn_cast<CleanupPadInst>(EHPad))
444 return isa<ConstantTokenNone>(CleanupPad->getParentPad()) &&
445 getCleanupRetUnwindDest(CleanupPad) == nullptr;
446 if (isa<CatchPadInst>(EHPad))
447 return false;
448 llvm_unreachable("unexpected EHPad!")::llvm::llvm_unreachable_internal("unexpected EHPad!", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 448)
;
449}
450
451void llvm::calculateSEHStateNumbers(const Function *Fn,
452 WinEHFuncInfo &FuncInfo) {
453 // Don't compute state numbers twice.
454 if (!FuncInfo.SEHUnwindMap.empty())
455 return;
456
457 for (const BasicBlock &BB : *Fn) {
458 if (!BB.isEHPad())
459 continue;
460 const Instruction *FirstNonPHI = BB.getFirstNonPHI();
461 if (!isTopLevelPadForMSVC(FirstNonPHI))
462 continue;
463 ::calculateSEHStateNumbers(FuncInfo, FirstNonPHI, -1);
464 }
465
466 calculateStateNumbersForInvokes(Fn, FuncInfo);
467}
468
469void llvm::calculateWinCXXEHStateNumbers(const Function *Fn,
470 WinEHFuncInfo &FuncInfo) {
471 // Return if it's already been done.
472 if (!FuncInfo.EHPadStateMap.empty())
473 return;
474
475 for (const BasicBlock &BB : *Fn) {
476 if (!BB.isEHPad())
477 continue;
478 const Instruction *FirstNonPHI = BB.getFirstNonPHI();
479 if (!isTopLevelPadForMSVC(FirstNonPHI))
480 continue;
481 calculateCXXStateNumbers(FuncInfo, FirstNonPHI, -1);
482 }
483
484 calculateStateNumbersForInvokes(Fn, FuncInfo);
485}
486
487static int addClrEHHandler(WinEHFuncInfo &FuncInfo, int HandlerParentState,
488 int TryParentState, ClrHandlerType HandlerType,
489 uint32_t TypeToken, const BasicBlock *Handler) {
490 ClrEHUnwindMapEntry Entry;
491 Entry.HandlerParentState = HandlerParentState;
492 Entry.TryParentState = TryParentState;
493 Entry.Handler = Handler;
494 Entry.HandlerType = HandlerType;
495 Entry.TypeToken = TypeToken;
496 FuncInfo.ClrEHUnwindMap.push_back(Entry);
497 return FuncInfo.ClrEHUnwindMap.size() - 1;
498}
499
500void llvm::calculateClrEHStateNumbers(const Function *Fn,
501 WinEHFuncInfo &FuncInfo) {
502 // Return if it's already been done.
503 if (!FuncInfo.EHPadStateMap.empty())
1
Assuming the condition is false
2
Taking false branch
504 return;
505
506 // This numbering assigns one state number to each catchpad and cleanuppad.
507 // It also computes two tree-like relations over states:
508 // 1) Each state has a "HandlerParentState", which is the state of the next
509 // outer handler enclosing this state's handler (same as nearest ancestor
510 // per the ParentPad linkage on EH pads, but skipping over catchswitches).
511 // 2) Each state has a "TryParentState", which:
512 // a) for a catchpad that's not the last handler on its catchswitch, is
513 // the state of the next catchpad on that catchswitch
514 // b) for all other pads, is the state of the pad whose try region is the
515 // next outer try region enclosing this state's try region. The "try
516 // regions are not present as such in the IR, but will be inferred
517 // based on the placement of invokes and pads which reach each other
518 // by exceptional exits
519 // Catchswitches do not get their own states, but each gets mapped to the
520 // state of its first catchpad.
521
522 // Step one: walk down from outermost to innermost funclets, assigning each
523 // catchpad and cleanuppad a state number. Add an entry to the
524 // ClrEHUnwindMap for each state, recording its HandlerParentState and
525 // handler attributes. Record the TryParentState as well for each catchpad
526 // that's not the last on its catchswitch, but initialize all other entries'
527 // TryParentStates to a sentinel -1 value that the next pass will update.
528
529 // Seed a worklist with pads that have no parent.
530 SmallVector<std::pair<const Instruction *, int>, 8> Worklist;
531 for (const BasicBlock &BB : *Fn) {
532 const Instruction *FirstNonPHI = BB.getFirstNonPHI();
533 const Value *ParentPad;
534 if (const auto *CPI = dyn_cast<CleanupPadInst>(FirstNonPHI))
535 ParentPad = CPI->getParentPad();
536 else if (const auto *CSI = dyn_cast<CatchSwitchInst>(FirstNonPHI))
537 ParentPad = CSI->getParentPad();
538 else
539 continue;
540 if (isa<ConstantTokenNone>(ParentPad))
541 Worklist.emplace_back(FirstNonPHI, -1);
542 }
543
544 // Use the worklist to visit all pads, from outer to inner. Record
545 // HandlerParentState for all pads. Record TryParentState only for catchpads
546 // that aren't the last on their catchswitch (setting all other entries'
547 // TryParentStates to an initial value of -1). This loop is also responsible
548 // for setting the EHPadStateMap entry for all catchpads, cleanuppads, and
549 // catchswitches.
550 while (!Worklist.empty()) {
3
Loop condition is false. Execution continues on line 604
551 const Instruction *Pad;
552 int HandlerParentState;
553 std::tie(Pad, HandlerParentState) = Worklist.pop_back_val();
554
555 if (const auto *Cleanup = dyn_cast<CleanupPadInst>(Pad)) {
556 // Create the entry for this cleanup with the appropriate handler
557 // properties. Finally and fault handlers are distinguished by arity.
558 ClrHandlerType HandlerType =
559 (Cleanup->getNumArgOperands() ? ClrHandlerType::Fault
560 : ClrHandlerType::Finally);
561 int CleanupState = addClrEHHandler(FuncInfo, HandlerParentState, -1,
562 HandlerType, 0, Pad->getParent());
563 // Queue any child EH pads on the worklist.
564 for (const User *U : Cleanup->users())
565 if (const auto *I = dyn_cast<Instruction>(U))
566 if (I->isEHPad())
567 Worklist.emplace_back(I, CleanupState);
568 // Remember this pad's state.
569 FuncInfo.EHPadStateMap[Cleanup] = CleanupState;
570 } else {
571 // Walk the handlers of this catchswitch in reverse order since all but
572 // the last need to set the following one as its TryParentState.
573 const auto *CatchSwitch = cast<CatchSwitchInst>(Pad);
574 int CatchState = -1, FollowerState = -1;
575 SmallVector<const BasicBlock *, 4> CatchBlocks(CatchSwitch->handlers());
576 for (auto CBI = CatchBlocks.rbegin(), CBE = CatchBlocks.rend();
577 CBI != CBE; ++CBI, FollowerState = CatchState) {
578 const BasicBlock *CatchBlock = *CBI;
579 // Create the entry for this catch with the appropriate handler
580 // properties.
581 const auto *Catch = cast<CatchPadInst>(CatchBlock->getFirstNonPHI());
582 uint32_t TypeToken = static_cast<uint32_t>(
583 cast<ConstantInt>(Catch->getArgOperand(0))->getZExtValue());
584 CatchState =
585 addClrEHHandler(FuncInfo, HandlerParentState, FollowerState,
586 ClrHandlerType::Catch, TypeToken, CatchBlock);
587 // Queue any child EH pads on the worklist.
588 for (const User *U : Catch->users())
589 if (const auto *I = dyn_cast<Instruction>(U))
590 if (I->isEHPad())
591 Worklist.emplace_back(I, CatchState);
592 // Remember this catch's state.
593 FuncInfo.EHPadStateMap[Catch] = CatchState;
594 }
595 // Associate the catchswitch with the state of its first catch.
596 assert(CatchSwitch->getNumHandlers())(static_cast <bool> (CatchSwitch->getNumHandlers()) ?
void (0) : __assert_fail ("CatchSwitch->getNumHandlers()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 FuncInfo.EHPadStateMap[CatchSwitch] = CatchState;
598 }
599 }
600
601 // Step two: record the TryParentState of each state. For cleanuppads that
602 // don't have cleanuprets, we may need to infer this from their child pads,
603 // so visit pads in descendant-most to ancestor-most order.
604 for (auto Entry = FuncInfo.ClrEHUnwindMap.rbegin(),
4
Loop condition is false. Execution continues on line 699
605 End = FuncInfo.ClrEHUnwindMap.rend();
606 Entry != End; ++Entry) {
607 const Instruction *Pad =
608 Entry->Handler.get<const BasicBlock *>()->getFirstNonPHI();
609 // For most pads, the TryParentState is the state associated with the
610 // unwind dest of exceptional exits from it.
611 const BasicBlock *UnwindDest;
612 if (const auto *Catch = dyn_cast<CatchPadInst>(Pad)) {
613 // If a catch is not the last in its catchswitch, its TryParentState is
614 // the state associated with the next catch in the switch, even though
615 // that's not the unwind dest of exceptions escaping the catch. Those
616 // cases were already assigned a TryParentState in the first pass, so
617 // skip them.
618 if (Entry->TryParentState != -1)
619 continue;
620 // Otherwise, get the unwind dest from the catchswitch.
621 UnwindDest = Catch->getCatchSwitch()->getUnwindDest();
622 } else {
623 const auto *Cleanup = cast<CleanupPadInst>(Pad);
624 UnwindDest = nullptr;
625 for (const User *U : Cleanup->users()) {
626 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
627 // Common and unambiguous case -- cleanupret indicates cleanup's
628 // unwind dest.
629 UnwindDest = CleanupRet->getUnwindDest();
630 break;
631 }
632
633 // Get an unwind dest for the user
634 const BasicBlock *UserUnwindDest = nullptr;
635 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
636 UserUnwindDest = Invoke->getUnwindDest();
637 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(U)) {
638 UserUnwindDest = CatchSwitch->getUnwindDest();
639 } else if (auto *ChildCleanup = dyn_cast<CleanupPadInst>(U)) {
640 int UserState = FuncInfo.EHPadStateMap[ChildCleanup];
641 int UserUnwindState =
642 FuncInfo.ClrEHUnwindMap[UserState].TryParentState;
643 if (UserUnwindState != -1)
644 UserUnwindDest = FuncInfo.ClrEHUnwindMap[UserUnwindState]
645 .Handler.get<const BasicBlock *>();
646 }
647
648 // Not having an unwind dest for this user might indicate that it
649 // doesn't unwind, so can't be taken as proof that the cleanup itself
650 // may unwind to caller (see e.g. SimplifyUnreachable and
651 // RemoveUnwindEdge).
652 if (!UserUnwindDest)
653 continue;
654
655 // Now we have an unwind dest for the user, but we need to see if it
656 // unwinds all the way out of the cleanup or if it stays within it.
657 const Instruction *UserUnwindPad = UserUnwindDest->getFirstNonPHI();
658 const Value *UserUnwindParent;
659 if (auto *CSI = dyn_cast<CatchSwitchInst>(UserUnwindPad))
660 UserUnwindParent = CSI->getParentPad();
661 else
662 UserUnwindParent =
663 cast<CleanupPadInst>(UserUnwindPad)->getParentPad();
664
665 // The unwind stays within the cleanup iff it targets a child of the
666 // cleanup.
667 if (UserUnwindParent == Cleanup)
668 continue;
669
670 // This unwind exits the cleanup, so its dest is the cleanup's dest.
671 UnwindDest = UserUnwindDest;
672 break;
673 }
674 }
675
676 // Record the state of the unwind dest as the TryParentState.
677 int UnwindDestState;
678
679 // If UnwindDest is null at this point, either the pad in question can
680 // be exited by unwind to caller, or it cannot be exited by unwind. In
681 // either case, reporting such cases as unwinding to caller is correct.
682 // This can lead to EH tables that "look strange" -- if this pad's is in
683 // a parent funclet which has other children that do unwind to an enclosing
684 // pad, the try region for this pad will be missing the "duplicate" EH
685 // clause entries that you'd expect to see covering the whole parent. That
686 // should be benign, since the unwind never actually happens. If it were
687 // an issue, we could add a subsequent pass that pushes unwind dests down
688 // from parents that have them to children that appear to unwind to caller.
689 if (!UnwindDest) {
690 UnwindDestState = -1;
691 } else {
692 UnwindDestState = FuncInfo.EHPadStateMap[UnwindDest->getFirstNonPHI()];
693 }
694
695 Entry->TryParentState = UnwindDestState;
696 }
697
698 // Step three: transfer information from pads to invokes.
699 calculateStateNumbersForInvokes(Fn, FuncInfo);
5
Calling 'calculateStateNumbersForInvokes'
700}
701
702void WinEHPrepare::colorFunclets(Function &F) {
703 BlockColors = colorEHFunclets(F);
704
705 // Invert the map from BB to colors to color to BBs.
706 for (BasicBlock &BB : F) {
707 ColorVector &Colors = BlockColors[&BB];
708 for (BasicBlock *Color : Colors)
709 FuncletBlocks[Color].push_back(&BB);
710 }
711}
712
713void WinEHPrepare::demotePHIsOnFunclets(Function &F,
714 bool DemoteCatchSwitchPHIOnly) {
715 // Strip PHI nodes off of EH pads.
716 SmallVector<PHINode *, 16> PHINodes;
717 for (BasicBlock &BB : make_early_inc_range(F)) {
718 if (!BB.isEHPad())
719 continue;
720 if (DemoteCatchSwitchPHIOnly && !isa<CatchSwitchInst>(BB.getFirstNonPHI()))
721 continue;
722
723 for (Instruction &I : make_early_inc_range(BB)) {
724 auto *PN = dyn_cast<PHINode>(&I);
725 // Stop at the first non-PHI.
726 if (!PN)
727 break;
728
729 AllocaInst *SpillSlot = insertPHILoads(PN, F);
730 if (SpillSlot)
731 insertPHIStores(PN, SpillSlot);
732
733 PHINodes.push_back(PN);
734 }
735 }
736
737 for (auto *PN : PHINodes) {
738 // There may be lingering uses on other EH PHIs being removed
739 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
740 PN->eraseFromParent();
741 }
742}
743
744void WinEHPrepare::cloneCommonBlocks(Function &F) {
745 // We need to clone all blocks which belong to multiple funclets. Values are
746 // remapped throughout the funclet to propagate both the new instructions
747 // *and* the new basic blocks themselves.
748 for (auto &Funclets : FuncletBlocks) {
749 BasicBlock *FuncletPadBB = Funclets.first;
750 std::vector<BasicBlock *> &BlocksInFunclet = Funclets.second;
751 Value *FuncletToken;
752 if (FuncletPadBB == &F.getEntryBlock())
753 FuncletToken = ConstantTokenNone::get(F.getContext());
754 else
755 FuncletToken = FuncletPadBB->getFirstNonPHI();
756
757 std::vector<std::pair<BasicBlock *, BasicBlock *>> Orig2Clone;
758 ValueToValueMapTy VMap;
759 for (BasicBlock *BB : BlocksInFunclet) {
760 ColorVector &ColorsForBB = BlockColors[BB];
761 // We don't need to do anything if the block is monochromatic.
762 size_t NumColorsForBB = ColorsForBB.size();
763 if (NumColorsForBB == 1)
764 continue;
765
766 DEBUG_WITH_TYPE("winehprepare-coloring",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Cloning block \'"
<< BB->getName() << "\' for funclet \'" <<
FuncletPadBB->getName() << "\'.\n"; } } while (false
)
767 dbgs() << " Cloning block \'" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Cloning block \'"
<< BB->getName() << "\' for funclet \'" <<
FuncletPadBB->getName() << "\'.\n"; } } while (false
)
768 << "\' for funclet \'" << FuncletPadBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Cloning block \'"
<< BB->getName() << "\' for funclet \'" <<
FuncletPadBB->getName() << "\'.\n"; } } while (false
)
769 << "\'.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Cloning block \'"
<< BB->getName() << "\' for funclet \'" <<
FuncletPadBB->getName() << "\'.\n"; } } while (false
)
;
770
771 // Create a new basic block and copy instructions into it!
772 BasicBlock *CBB =
773 CloneBasicBlock(BB, VMap, Twine(".for.", FuncletPadBB->getName()));
774 // Insert the clone immediately after the original to ensure determinism
775 // and to keep the same relative ordering of any funclet's blocks.
776 CBB->insertInto(&F, BB->getNextNode());
777
778 // Add basic block mapping.
779 VMap[BB] = CBB;
780
781 // Record delta operations that we need to perform to our color mappings.
782 Orig2Clone.emplace_back(BB, CBB);
783 }
784
785 // If nothing was cloned, we're done cloning in this funclet.
786 if (Orig2Clone.empty())
787 continue;
788
789 // Update our color mappings to reflect that one block has lost a color and
790 // another has gained a color.
791 for (auto &BBMapping : Orig2Clone) {
792 BasicBlock *OldBlock = BBMapping.first;
793 BasicBlock *NewBlock = BBMapping.second;
794
795 BlocksInFunclet.push_back(NewBlock);
796 ColorVector &NewColors = BlockColors[NewBlock];
797 assert(NewColors.empty() && "A new block should only have one color!")(static_cast <bool> (NewColors.empty() && "A new block should only have one color!"
) ? void (0) : __assert_fail ("NewColors.empty() && \"A new block should only have one color!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 797, __extension__ __PRETTY_FUNCTION__))
;
798 NewColors.push_back(FuncletPadBB);
799
800 DEBUG_WITH_TYPE("winehprepare-coloring",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Assigned color \'"
<< FuncletPadBB->getName() << "\' to block \'"
<< NewBlock->getName() << "\'.\n"; } } while (
false)
801 dbgs() << " Assigned color \'" << FuncletPadBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Assigned color \'"
<< FuncletPadBB->getName() << "\' to block \'"
<< NewBlock->getName() << "\'.\n"; } } while (
false)
802 << "\' to block \'" << NewBlock->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Assigned color \'"
<< FuncletPadBB->getName() << "\' to block \'"
<< NewBlock->getName() << "\'.\n"; } } while (
false)
803 << "\'.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Assigned color \'"
<< FuncletPadBB->getName() << "\' to block \'"
<< NewBlock->getName() << "\'.\n"; } } while (
false)
;
804
805 llvm::erase_value(BlocksInFunclet, OldBlock);
806 ColorVector &OldColors = BlockColors[OldBlock];
807 llvm::erase_value(OldColors, FuncletPadBB);
808
809 DEBUG_WITH_TYPE("winehprepare-coloring",do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Removed color \'"
<< FuncletPadBB->getName() << "\' from block \'"
<< OldBlock->getName() << "\'.\n"; } } while (
false)
810 dbgs() << " Removed color \'" << FuncletPadBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Removed color \'"
<< FuncletPadBB->getName() << "\' from block \'"
<< OldBlock->getName() << "\'.\n"; } } while (
false)
811 << "\' from block \'" << OldBlock->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Removed color \'"
<< FuncletPadBB->getName() << "\' from block \'"
<< OldBlock->getName() << "\'.\n"; } } while (
false)
812 << "\'.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare-coloring")) { dbgs() << " Removed color \'"
<< FuncletPadBB->getName() << "\' from block \'"
<< OldBlock->getName() << "\'.\n"; } } while (
false)
;
813 }
814
815 // Loop over all of the instructions in this funclet, fixing up operand
816 // references as we go. This uses VMap to do all the hard work.
817 for (BasicBlock *BB : BlocksInFunclet)
818 // Loop over all instructions, fixing each one as we find it...
819 for (Instruction &I : *BB)
820 RemapInstruction(&I, VMap,
821 RF_IgnoreMissingLocals | RF_NoModuleLevelChanges);
822
823 // Catchrets targeting cloned blocks need to be updated separately from
824 // the loop above because they are not in the current funclet.
825 SmallVector<CatchReturnInst *, 2> FixupCatchrets;
826 for (auto &BBMapping : Orig2Clone) {
827 BasicBlock *OldBlock = BBMapping.first;
828 BasicBlock *NewBlock = BBMapping.second;
829
830 FixupCatchrets.clear();
831 for (BasicBlock *Pred : predecessors(OldBlock))
832 if (auto *CatchRet = dyn_cast<CatchReturnInst>(Pred->getTerminator()))
833 if (CatchRet->getCatchSwitchParentPad() == FuncletToken)
834 FixupCatchrets.push_back(CatchRet);
835
836 for (CatchReturnInst *CatchRet : FixupCatchrets)
837 CatchRet->setSuccessor(NewBlock);
838 }
839
840 auto UpdatePHIOnClonedBlock = [&](PHINode *PN, bool IsForOldBlock) {
841 unsigned NumPreds = PN->getNumIncomingValues();
842 for (unsigned PredIdx = 0, PredEnd = NumPreds; PredIdx != PredEnd;
843 ++PredIdx) {
844 BasicBlock *IncomingBlock = PN->getIncomingBlock(PredIdx);
845 bool EdgeTargetsFunclet;
846 if (auto *CRI =
847 dyn_cast<CatchReturnInst>(IncomingBlock->getTerminator())) {
848 EdgeTargetsFunclet = (CRI->getCatchSwitchParentPad() == FuncletToken);
849 } else {
850 ColorVector &IncomingColors = BlockColors[IncomingBlock];
851 assert(!IncomingColors.empty() && "Block not colored!")(static_cast <bool> (!IncomingColors.empty() &&
"Block not colored!") ? void (0) : __assert_fail ("!IncomingColors.empty() && \"Block not colored!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 851, __extension__ __PRETTY_FUNCTION__))
;
852 assert((IncomingColors.size() == 1 ||(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
853 llvm::all_of(IncomingColors,(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
854 [&](BasicBlock *Color) {(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
855 return Color != FuncletPadBB;(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
856 })) &&(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
857 "Cloning should leave this funclet's blocks monochromatic")(static_cast <bool> ((IncomingColors.size() == 1 || llvm
::all_of(IncomingColors, [&](BasicBlock *Color) { return Color
!= FuncletPadBB; })) && "Cloning should leave this funclet's blocks monochromatic"
) ? void (0) : __assert_fail ("(IncomingColors.size() == 1 || llvm::all_of(IncomingColors, [&](BasicBlock *Color) { return Color != FuncletPadBB; })) && \"Cloning should leave this funclet's blocks monochromatic\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 857, __extension__ __PRETTY_FUNCTION__))
;
858 EdgeTargetsFunclet = (IncomingColors.front() == FuncletPadBB);
859 }
860 if (IsForOldBlock != EdgeTargetsFunclet)
861 continue;
862 PN->removeIncomingValue(IncomingBlock, /*DeletePHIIfEmpty=*/false);
863 // Revisit the next entry.
864 --PredIdx;
865 --PredEnd;
866 }
867 };
868
869 for (auto &BBMapping : Orig2Clone) {
870 BasicBlock *OldBlock = BBMapping.first;
871 BasicBlock *NewBlock = BBMapping.second;
872 for (PHINode &OldPN : OldBlock->phis()) {
873 UpdatePHIOnClonedBlock(&OldPN, /*IsForOldBlock=*/true);
874 }
875 for (PHINode &NewPN : NewBlock->phis()) {
876 UpdatePHIOnClonedBlock(&NewPN, /*IsForOldBlock=*/false);
877 }
878 }
879
880 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to
881 // the PHI nodes for NewBB now.
882 for (auto &BBMapping : Orig2Clone) {
883 BasicBlock *OldBlock = BBMapping.first;
884 BasicBlock *NewBlock = BBMapping.second;
885 for (BasicBlock *SuccBB : successors(NewBlock)) {
886 for (PHINode &SuccPN : SuccBB->phis()) {
887 // Ok, we have a PHI node. Figure out what the incoming value was for
888 // the OldBlock.
889 int OldBlockIdx = SuccPN.getBasicBlockIndex(OldBlock);
890 if (OldBlockIdx == -1)
891 break;
892 Value *IV = SuccPN.getIncomingValue(OldBlockIdx);
893
894 // Remap the value if necessary.
895 if (auto *Inst = dyn_cast<Instruction>(IV)) {
896 ValueToValueMapTy::iterator I = VMap.find(Inst);
897 if (I != VMap.end())
898 IV = I->second;
899 }
900
901 SuccPN.addIncoming(IV, NewBlock);
902 }
903 }
904 }
905
906 for (ValueToValueMapTy::value_type VT : VMap) {
907 // If there were values defined in BB that are used outside the funclet,
908 // then we now have to update all uses of the value to use either the
909 // original value, the cloned value, or some PHI derived value. This can
910 // require arbitrary PHI insertion, of which we are prepared to do, clean
911 // these up now.
912 SmallVector<Use *, 16> UsesToRename;
913
914 auto *OldI = dyn_cast<Instruction>(const_cast<Value *>(VT.first));
915 if (!OldI)
916 continue;
917 auto *NewI = cast<Instruction>(VT.second);
918 // Scan all uses of this instruction to see if it is used outside of its
919 // funclet, and if so, record them in UsesToRename.
920 for (Use &U : OldI->uses()) {
921 Instruction *UserI = cast<Instruction>(U.getUser());
922 BasicBlock *UserBB = UserI->getParent();
923 ColorVector &ColorsForUserBB = BlockColors[UserBB];
924 assert(!ColorsForUserBB.empty())(static_cast <bool> (!ColorsForUserBB.empty()) ? void (
0) : __assert_fail ("!ColorsForUserBB.empty()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 if (ColorsForUserBB.size() > 1 ||
926 *ColorsForUserBB.begin() != FuncletPadBB)
927 UsesToRename.push_back(&U);
928 }
929
930 // If there are no uses outside the block, we're done with this
931 // instruction.
932 if (UsesToRename.empty())
933 continue;
934
935 // We found a use of OldI outside of the funclet. Rename all uses of OldI
936 // that are outside its funclet to be uses of the appropriate PHI node
937 // etc.
938 SSAUpdater SSAUpdate;
939 SSAUpdate.Initialize(OldI->getType(), OldI->getName());
940 SSAUpdate.AddAvailableValue(OldI->getParent(), OldI);
941 SSAUpdate.AddAvailableValue(NewI->getParent(), NewI);
942
943 while (!UsesToRename.empty())
944 SSAUpdate.RewriteUseAfterInsertions(*UsesToRename.pop_back_val());
945 }
946 }
947}
948
949void WinEHPrepare::removeImplausibleInstructions(Function &F) {
950 // Remove implausible terminators and replace them with UnreachableInst.
951 for (auto &Funclet : FuncletBlocks) {
952 BasicBlock *FuncletPadBB = Funclet.first;
953 std::vector<BasicBlock *> &BlocksInFunclet = Funclet.second;
954 Instruction *FirstNonPHI = FuncletPadBB->getFirstNonPHI();
955 auto *FuncletPad = dyn_cast<FuncletPadInst>(FirstNonPHI);
956 auto *CatchPad = dyn_cast_or_null<CatchPadInst>(FuncletPad);
957 auto *CleanupPad = dyn_cast_or_null<CleanupPadInst>(FuncletPad);
958
959 for (BasicBlock *BB : BlocksInFunclet) {
960 for (Instruction &I : *BB) {
961 auto *CB = dyn_cast<CallBase>(&I);
962 if (!CB)
963 continue;
964
965 Value *FuncletBundleOperand = nullptr;
966 if (auto BU = CB->getOperandBundle(LLVMContext::OB_funclet))
967 FuncletBundleOperand = BU->Inputs.front();
968
969 if (FuncletBundleOperand == FuncletPad)
970 continue;
971
972 // Skip call sites which are nounwind intrinsics or inline asm.
973 auto *CalledFn =
974 dyn_cast<Function>(CB->getCalledOperand()->stripPointerCasts());
975 if (CalledFn && ((CalledFn->isIntrinsic() && CB->doesNotThrow()) ||
976 CB->isInlineAsm()))
977 continue;
978
979 // This call site was not part of this funclet, remove it.
980 if (isa<InvokeInst>(CB)) {
981 // Remove the unwind edge if it was an invoke.
982 removeUnwindEdge(BB);
983 // Get a pointer to the new call.
984 BasicBlock::iterator CallI =
985 std::prev(BB->getTerminator()->getIterator());
986 auto *CI = cast<CallInst>(&*CallI);
987 changeToUnreachable(CI);
988 } else {
989 changeToUnreachable(&I);
990 }
991
992 // There are no more instructions in the block (except for unreachable),
993 // we are done.
994 break;
995 }
996
997 Instruction *TI = BB->getTerminator();
998 // CatchPadInst and CleanupPadInst can't transfer control to a ReturnInst.
999 bool IsUnreachableRet = isa<ReturnInst>(TI) && FuncletPad;
1000 // The token consumed by a CatchReturnInst must match the funclet token.
1001 bool IsUnreachableCatchret = false;
1002 if (auto *CRI = dyn_cast<CatchReturnInst>(TI))
1003 IsUnreachableCatchret = CRI->getCatchPad() != CatchPad;
1004 // The token consumed by a CleanupReturnInst must match the funclet token.
1005 bool IsUnreachableCleanupret = false;
1006 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI))
1007 IsUnreachableCleanupret = CRI->getCleanupPad() != CleanupPad;
1008 if (IsUnreachableRet || IsUnreachableCatchret ||
1009 IsUnreachableCleanupret) {
1010 changeToUnreachable(TI);
1011 } else if (isa<InvokeInst>(TI)) {
1012 if (Personality == EHPersonality::MSVC_CXX && CleanupPad) {
1013 // Invokes within a cleanuppad for the MSVC++ personality never
1014 // transfer control to their unwind edge: the personality will
1015 // terminate the program.
1016 removeUnwindEdge(BB);
1017 }
1018 }
1019 }
1020 }
1021}
1022
1023void WinEHPrepare::cleanupPreparedFunclets(Function &F) {
1024 // Clean-up some of the mess we made by removing useles PHI nodes, trivial
1025 // branches, etc.
1026 for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
1027 SimplifyInstructionsInBlock(&BB);
1028 ConstantFoldTerminator(&BB, /*DeleteDeadConditions=*/true);
1029 MergeBlockIntoPredecessor(&BB);
1030 }
1031
1032 // We might have some unreachable blocks after cleaning up some impossible
1033 // control flow.
1034 removeUnreachableBlocks(F);
1035}
1036
1037#ifndef NDEBUG
1038void WinEHPrepare::verifyPreparedFunclets(Function &F) {
1039 for (BasicBlock &BB : F) {
1040 size_t NumColors = BlockColors[&BB].size();
1041 assert(NumColors == 1 && "Expected monochromatic BB!")(static_cast <bool> (NumColors == 1 && "Expected monochromatic BB!"
) ? void (0) : __assert_fail ("NumColors == 1 && \"Expected monochromatic BB!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1041, __extension__ __PRETTY_FUNCTION__))
;
1042 if (NumColors == 0)
1043 report_fatal_error("Uncolored BB!");
1044 if (NumColors > 1)
1045 report_fatal_error("Multicolor BB!");
1046 assert((DisableDemotion || !(BB.isEHPad() && isa<PHINode>(BB.begin()))) &&(static_cast <bool> ((DisableDemotion || !(BB.isEHPad()
&& isa<PHINode>(BB.begin()))) && "EH Pad still has a PHI!"
) ? void (0) : __assert_fail ("(DisableDemotion || !(BB.isEHPad() && isa<PHINode>(BB.begin()))) && \"EH Pad still has a PHI!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1047, __extension__ __PRETTY_FUNCTION__))
1047 "EH Pad still has a PHI!")(static_cast <bool> ((DisableDemotion || !(BB.isEHPad()
&& isa<PHINode>(BB.begin()))) && "EH Pad still has a PHI!"
) ? void (0) : __assert_fail ("(DisableDemotion || !(BB.isEHPad() && isa<PHINode>(BB.begin()))) && \"EH Pad still has a PHI!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1047, __extension__ __PRETTY_FUNCTION__))
;
1048 }
1049}
1050#endif
1051
1052bool WinEHPrepare::prepareExplicitEH(Function &F) {
1053 // Remove unreachable blocks. It is not valuable to assign them a color and
1054 // their existence can trick us into thinking values are alive when they are
1055 // not.
1056 removeUnreachableBlocks(F);
1057
1058 // Determine which blocks are reachable from which funclet entries.
1059 colorFunclets(F);
1060
1061 cloneCommonBlocks(F);
1062
1063 if (!DisableDemotion)
1064 demotePHIsOnFunclets(F, DemoteCatchSwitchPHIOnly ||
1065 DemoteCatchSwitchPHIOnlyOpt);
1066
1067 if (!DisableCleanups) {
1068 assert(!verifyFunction(F, &dbgs()))(static_cast <bool> (!verifyFunction(F, &dbgs())) ?
void (0) : __assert_fail ("!verifyFunction(F, &dbgs())",
"/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1068, __extension__ __PRETTY_FUNCTION__))
;
1069 removeImplausibleInstructions(F);
1070
1071 assert(!verifyFunction(F, &dbgs()))(static_cast <bool> (!verifyFunction(F, &dbgs())) ?
void (0) : __assert_fail ("!verifyFunction(F, &dbgs())",
"/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
;
1072 cleanupPreparedFunclets(F);
1073 }
1074
1075 LLVM_DEBUG(verifyPreparedFunclets(F))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { verifyPreparedFunclets(F); } } while (false
)
;
1076 // Recolor the CFG to verify that all is well.
1077 LLVM_DEBUG(colorFunclets(F))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { colorFunclets(F); } } while (false)
;
1078 LLVM_DEBUG(verifyPreparedFunclets(F))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("winehprepare")) { verifyPreparedFunclets(F); } } while (false
)
;
1079
1080 BlockColors.clear();
1081 FuncletBlocks.clear();
1082
1083 return true;
1084}
1085
1086// TODO: Share loads when one use dominates another, or when a catchpad exit
1087// dominates uses (needs dominators).
1088AllocaInst *WinEHPrepare::insertPHILoads(PHINode *PN, Function &F) {
1089 BasicBlock *PHIBlock = PN->getParent();
1090 AllocaInst *SpillSlot = nullptr;
1091 Instruction *EHPad = PHIBlock->getFirstNonPHI();
1092
1093 if (!EHPad->isTerminator()) {
1094 // If the EHPad isn't a terminator, then we can insert a load in this block
1095 // that will dominate all uses.
1096 SpillSlot = new AllocaInst(PN->getType(), DL->getAllocaAddrSpace(), nullptr,
1097 Twine(PN->getName(), ".wineh.spillslot"),
1098 &F.getEntryBlock().front());
1099 Value *V = new LoadInst(PN->getType(), SpillSlot,
1100 Twine(PN->getName(), ".wineh.reload"),
1101 &*PHIBlock->getFirstInsertionPt());
1102 PN->replaceAllUsesWith(V);
1103 return SpillSlot;
1104 }
1105
1106 // Otherwise, we have a PHI on a terminator EHPad, and we give up and insert
1107 // loads of the slot before every use.
1108 DenseMap<BasicBlock *, Value *> Loads;
1109 for (Use &U : llvm::make_early_inc_range(PN->uses())) {
1110 auto *UsingInst = cast<Instruction>(U.getUser());
1111 if (isa<PHINode>(UsingInst) && UsingInst->getParent()->isEHPad()) {
1112 // Use is on an EH pad phi. Leave it alone; we'll insert loads and
1113 // stores for it separately.
1114 continue;
1115 }
1116 replaceUseWithLoad(PN, U, SpillSlot, Loads, F);
1117 }
1118 return SpillSlot;
1119}
1120
1121// TODO: improve store placement. Inserting at def is probably good, but need
1122// to be careful not to introduce interfering stores (needs liveness analysis).
1123// TODO: identify related phi nodes that can share spill slots, and share them
1124// (also needs liveness).
1125void WinEHPrepare::insertPHIStores(PHINode *OriginalPHI,
1126 AllocaInst *SpillSlot) {
1127 // Use a worklist of (Block, Value) pairs -- the given Value needs to be
1128 // stored to the spill slot by the end of the given Block.
1129 SmallVector<std::pair<BasicBlock *, Value *>, 4> Worklist;
1130
1131 Worklist.push_back({OriginalPHI->getParent(), OriginalPHI});
1132
1133 while (!Worklist.empty()) {
1134 BasicBlock *EHBlock;
1135 Value *InVal;
1136 std::tie(EHBlock, InVal) = Worklist.pop_back_val();
1137
1138 PHINode *PN = dyn_cast<PHINode>(InVal);
1139 if (PN && PN->getParent() == EHBlock) {
1140 // The value is defined by another PHI we need to remove, with no room to
1141 // insert a store after the PHI, so each predecessor needs to store its
1142 // incoming value.
1143 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1144 Value *PredVal = PN->getIncomingValue(i);
1145
1146 // Undef can safely be skipped.
1147 if (isa<UndefValue>(PredVal))
1148 continue;
1149
1150 insertPHIStore(PN->getIncomingBlock(i), PredVal, SpillSlot, Worklist);
1151 }
1152 } else {
1153 // We need to store InVal, which dominates EHBlock, but can't put a store
1154 // in EHBlock, so need to put stores in each predecessor.
1155 for (BasicBlock *PredBlock : predecessors(EHBlock)) {
1156 insertPHIStore(PredBlock, InVal, SpillSlot, Worklist);
1157 }
1158 }
1159 }
1160}
1161
1162void WinEHPrepare::insertPHIStore(
1163 BasicBlock *PredBlock, Value *PredVal, AllocaInst *SpillSlot,
1164 SmallVectorImpl<std::pair<BasicBlock *, Value *>> &Worklist) {
1165
1166 if (PredBlock->isEHPad() && PredBlock->getFirstNonPHI()->isTerminator()) {
1167 // Pred is unsplittable, so we need to queue it on the worklist.
1168 Worklist.push_back({PredBlock, PredVal});
1169 return;
1170 }
1171
1172 // Otherwise, insert the store at the end of the basic block.
1173 new StoreInst(PredVal, SpillSlot, PredBlock->getTerminator());
1174}
1175
1176void WinEHPrepare::replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
1177 DenseMap<BasicBlock *, Value *> &Loads,
1178 Function &F) {
1179 // Lazilly create the spill slot.
1180 if (!SpillSlot)
1181 SpillSlot = new AllocaInst(V->getType(), DL->getAllocaAddrSpace(), nullptr,
1182 Twine(V->getName(), ".wineh.spillslot"),
1183 &F.getEntryBlock().front());
1184
1185 auto *UsingInst = cast<Instruction>(U.getUser());
1186 if (auto *UsingPHI = dyn_cast<PHINode>(UsingInst)) {
1187 // If this is a PHI node, we can't insert a load of the value before
1188 // the use. Instead insert the load in the predecessor block
1189 // corresponding to the incoming value.
1190 //
1191 // Note that if there are multiple edges from a basic block to this
1192 // PHI node that we cannot have multiple loads. The problem is that
1193 // the resulting PHI node will have multiple values (from each load)
1194 // coming in from the same block, which is illegal SSA form.
1195 // For this reason, we keep track of and reuse loads we insert.
1196 BasicBlock *IncomingBlock = UsingPHI->getIncomingBlock(U);
1197 if (auto *CatchRet =
1198 dyn_cast<CatchReturnInst>(IncomingBlock->getTerminator())) {
1199 // Putting a load above a catchret and use on the phi would still leave
1200 // a cross-funclet def/use. We need to split the edge, change the
1201 // catchret to target the new block, and put the load there.
1202 BasicBlock *PHIBlock = UsingInst->getParent();
1203 BasicBlock *NewBlock = SplitEdge(IncomingBlock, PHIBlock);
1204 // SplitEdge gives us:
1205 // IncomingBlock:
1206 // ...
1207 // br label %NewBlock
1208 // NewBlock:
1209 // catchret label %PHIBlock
1210 // But we need:
1211 // IncomingBlock:
1212 // ...
1213 // catchret label %NewBlock
1214 // NewBlock:
1215 // br label %PHIBlock
1216 // So move the terminators to each others' blocks and swap their
1217 // successors.
1218 BranchInst *Goto = cast<BranchInst>(IncomingBlock->getTerminator());
1219 Goto->removeFromParent();
1220 CatchRet->removeFromParent();
1221 IncomingBlock->getInstList().push_back(CatchRet);
1222 NewBlock->getInstList().push_back(Goto);
1223 Goto->setSuccessor(0, PHIBlock);
1224 CatchRet->setSuccessor(NewBlock);
1225 // Update the color mapping for the newly split edge.
1226 // Grab a reference to the ColorVector to be inserted before getting the
1227 // reference to the vector we are copying because inserting the new
1228 // element in BlockColors might cause the map to be reallocated.
1229 ColorVector &ColorsForNewBlock = BlockColors[NewBlock];
1230 ColorVector &ColorsForPHIBlock = BlockColors[PHIBlock];
1231 ColorsForNewBlock = ColorsForPHIBlock;
1232 for (BasicBlock *FuncletPad : ColorsForPHIBlock)
1233 FuncletBlocks[FuncletPad].push_back(NewBlock);
1234 // Treat the new block as incoming for load insertion.
1235 IncomingBlock = NewBlock;
1236 }
1237 Value *&Load = Loads[IncomingBlock];
1238 // Insert the load into the predecessor block
1239 if (!Load)
1240 Load = new LoadInst(V->getType(), SpillSlot,
1241 Twine(V->getName(), ".wineh.reload"),
1242 /*isVolatile=*/false, IncomingBlock->getTerminator());
1243
1244 U.set(Load);
1245 } else {
1246 // Reload right before the old use.
1247 auto *Load = new LoadInst(V->getType(), SpillSlot,
1248 Twine(V->getName(), ".wineh.reload"),
1249 /*isVolatile=*/false, UsingInst);
1250 U.set(Load);
1251 }
1252}
1253
1254void WinEHFuncInfo::addIPToStateRange(const InvokeInst *II,
1255 MCSymbol *InvokeBegin,
1256 MCSymbol *InvokeEnd) {
1257 assert(InvokeStateMap.count(II) &&(static_cast <bool> (InvokeStateMap.count(II) &&
"should get invoke with precomputed state") ? void (0) : __assert_fail
("InvokeStateMap.count(II) && \"should get invoke with precomputed state\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1258, __extension__ __PRETTY_FUNCTION__))
1258 "should get invoke with precomputed state")(static_cast <bool> (InvokeStateMap.count(II) &&
"should get invoke with precomputed state") ? void (0) : __assert_fail
("InvokeStateMap.count(II) && \"should get invoke with precomputed state\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/CodeGen/WinEHPrepare.cpp"
, 1258, __extension__ __PRETTY_FUNCTION__))
;
1259 LabelToStateMap[InvokeBegin] = std::make_pair(InvokeStateMap[II], InvokeEnd);
1260}
1261
1262WinEHFuncInfo::WinEHFuncInfo() {}

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Get allocation size in bits. Returns None if size can't be determined,
109 /// e.g. in case of a VLA.
110 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
111
112 /// Return the type that is being allocated by the instruction.
113 Type *getAllocatedType() const { return AllocatedType; }
114 /// for use only in special circumstances that need to generically
115 /// transform a whole instruction (eg: IR linking and vectorization).
116 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117
118 /// Return the alignment of the memory that is being allocated by the
119 /// instruction.
120 Align getAlign() const {
121 return Align(1ULL << getSubclassData<AlignmentField>());
122 }
123
124 void setAlignment(Align Align) {
125 setSubclassData<AlignmentField>(Log2(Align));
126 }
127
128 // FIXME: Remove this one transition to Align is over.
129 unsigned getAlignment() const { return getAlign().value(); }
130
131 /// Return true if this alloca is in the entry block of the function and is a
132 /// constant size. If so, the code generator will fold it into the
133 /// prolog/epilog code, so it is basically free.
134 bool isStaticAlloca() const;
135
136 /// Return true if this alloca is used as an inalloca argument to a call. Such
137 /// allocas are never considered static even if they are in the entry block.
138 bool isUsedWithInAlloca() const {
139 return getSubclassData<UsedWithInAllocaField>();
140 }
141
142 /// Specify whether this alloca is used to represent the arguments to a call.
143 void setUsedWithInAlloca(bool V) {
144 setSubclassData<UsedWithInAllocaField>(V);
145 }
146
147 /// Return true if this alloca is used as a swifterror argument to a call.
148 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149 /// Specify whether this alloca is used to represent a swifterror.
150 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151
152 // Methods for support type inquiry through isa, cast, and dyn_cast:
153 static bool classof(const Instruction *I) {
154 return (I->getOpcode() == Instruction::Alloca);
155 }
156 static bool classof(const Value *V) {
157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
158 }
159
160private:
161 // Shadow Instruction::setInstructionSubclassData with a private forwarding
162 // method so that subclasses cannot accidentally use it.
163 template <typename Bitfield>
164 void setSubclassData(typename Bitfield::Type Value) {
165 Instruction::setSubclassData<Bitfield>(Value);
166 }
167};
168
169//===----------------------------------------------------------------------===//
170// LoadInst Class
171//===----------------------------------------------------------------------===//
172
173/// An instruction for reading from memory. This uses the SubclassData field in
174/// Value to store whether or not the load is volatile.
175class LoadInst : public UnaryInstruction {
176 using VolatileField = BoolBitfieldElementT<0>;
177 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
178 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
179 static_assert(
180 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181 "Bitfields must be contiguous");
182
183 void AssertOK();
184
185protected:
186 // Note: Instruction needs to be a friend here to call cloneImpl.
187 friend class Instruction;
188
189 LoadInst *cloneImpl() const;
190
191public:
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193 Instruction *InsertBefore);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 Instruction *InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 BasicBlock *InsertAtEnd);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 Align Align, Instruction *InsertBefore = nullptr);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, BasicBlock *InsertAtEnd);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, AtomicOrdering Order,
205 SyncScope::ID SSID = SyncScope::System,
206 Instruction *InsertBefore = nullptr);
207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
209 BasicBlock *InsertAtEnd);
210
211 /// Return true if this is a load from a volatile memory location.
212 bool isVolatile() const { return getSubclassData<VolatileField>(); }
213
214 /// Specify whether this is a volatile load or not.
215 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216
217 /// Return the alignment of the access that is being performed.
218 /// FIXME: Remove this function once transition to Align is over.
219 /// Use getAlign() instead.
220 unsigned getAlignment() const { return getAlign().value(); }
221
222 /// Return the alignment of the access that is being performed.
223 Align getAlign() const {
224 return Align(1ULL << (getSubclassData<AlignmentField>()));
225 }
226
227 void setAlignment(Align Align) {
228 setSubclassData<AlignmentField>(Log2(Align));
229 }
230
231 /// Returns the ordering constraint of this load instruction.
232 AtomicOrdering getOrdering() const {
233 return getSubclassData<OrderingField>();
234 }
235 /// Sets the ordering constraint of this load instruction. May not be Release
236 /// or AcquireRelease.
237 void setOrdering(AtomicOrdering Ordering) {
238 setSubclassData<OrderingField>(Ordering);
239 }
240
241 /// Returns the synchronization scope ID of this load instruction.
242 SyncScope::ID getSyncScopeID() const {
243 return SSID;
244 }
245
246 /// Sets the synchronization scope ID of this load instruction.
247 void setSyncScopeID(SyncScope::ID SSID) {
248 this->SSID = SSID;
249 }
250
251 /// Sets the ordering constraint and the synchronization scope ID of this load
252 /// instruction.
253 void setAtomic(AtomicOrdering Ordering,
254 SyncScope::ID SSID = SyncScope::System) {
255 setOrdering(Ordering);
256 setSyncScopeID(SSID);
257 }
258
259 bool isSimple() const { return !isAtomic() && !isVolatile(); }
260
261 bool isUnordered() const {
262 return (getOrdering() == AtomicOrdering::NotAtomic ||
263 getOrdering() == AtomicOrdering::Unordered) &&
264 !isVolatile();
265 }
266
267 Value *getPointerOperand() { return getOperand(0); }
268 const Value *getPointerOperand() const { return getOperand(0); }
269 static unsigned getPointerOperandIndex() { return 0U; }
270 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
271
272 /// Returns the address space of the pointer operand.
273 unsigned getPointerAddressSpace() const {
274 return getPointerOperandType()->getPointerAddressSpace();
275 }
276
277 // Methods for support type inquiry through isa, cast, and dyn_cast:
278 static bool classof(const Instruction *I) {
279 return I->getOpcode() == Instruction::Load;
280 }
281 static bool classof(const Value *V) {
282 return isa<Instruction>(V) && classof(cast<Instruction>(V));
283 }
284
285private:
286 // Shadow Instruction::setInstructionSubclassData with a private forwarding
287 // method so that subclasses cannot accidentally use it.
288 template <typename Bitfield>
289 void setSubclassData(typename Bitfield::Type Value) {
290 Instruction::setSubclassData<Bitfield>(Value);
291 }
292
293 /// The synchronization scope ID of this load instruction. Not quite enough
294 /// room in SubClassData for everything, so synchronization scope ID gets its
295 /// own field.
296 SyncScope::ID SSID;
297};
298
299//===----------------------------------------------------------------------===//
300// StoreInst Class
301//===----------------------------------------------------------------------===//
302
303/// An instruction for storing to memory.
304class StoreInst : public Instruction {
305 using VolatileField = BoolBitfieldElementT<0>;
306 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
307 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
308 static_assert(
309 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310 "Bitfields must be contiguous");
311
312 void AssertOK();
313
314protected:
315 // Note: Instruction needs to be a friend here to call cloneImpl.
316 friend class Instruction;
317
318 StoreInst *cloneImpl() const;
319
320public:
321 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326 Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328 BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334
335 // allocate space for exactly two operands
336 void *operator new(size_t S) { return User::operator new(S, 2); }
337 void operator delete(void *Ptr) { User::operator delete(Ptr); }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t S) { return User::operator new(S, 0); }
466 void operator delete(void *Ptr) { User::operator delete(Ptr); }
467
468 /// Returns the ordering constraint of this fence instruction.
469 AtomicOrdering getOrdering() const {
470 return getSubclassData<OrderingField>();
471 }
472
473 /// Sets the ordering constraint of this fence instruction. May only be
474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475 void setOrdering(AtomicOrdering Ordering) {
476 setSubclassData<OrderingField>(Ordering);
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 template <typename Bitfield>
501 void setSubclassData(typename Bitfield::Type Value) {
502 Instruction::setSubclassData<Bitfield>(Value);
503 }
504
505 /// The synchronization scope ID of this fence instruction. Not quite enough
506 /// room in SubClassData for everything, so synchronization scope ID gets its
507 /// own field.
508 SyncScope::ID SSID;
509};
510
511//===----------------------------------------------------------------------===//
512// AtomicCmpXchgInst Class
513//===----------------------------------------------------------------------===//
514
515/// An instruction that atomically checks whether a
516/// specified value is in a memory location, and, if it is, stores a new value
517/// there. The value returned by this instruction is a pair containing the
518/// original value as first element, and an i1 indicating success (true) or
519/// failure (false) as second element.
520///
521class AtomicCmpXchgInst : public Instruction {
522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524 SyncScope::ID SSID);
525
526 template <unsigned Offset>
527 using AtomicOrderingBitfieldElement =
528 typename Bitfield::Element<AtomicOrdering, Offset, 3,
529 AtomicOrdering::LAST>;
530
531protected:
532 // Note: Instruction needs to be a friend here to call cloneImpl.
533 friend class Instruction;
534
535 AtomicCmpXchgInst *cloneImpl() const;
536
537public:
538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539 AtomicOrdering SuccessOrdering,
540 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541 Instruction *InsertBefore = nullptr);
542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543 AtomicOrdering SuccessOrdering,
544 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545 BasicBlock *InsertAtEnd);
546
547 // allocate space for exactly three operands
548 void *operator new(size_t S) { return User::operator new(S, 3); }
549 void operator delete(void *Ptr) { User::operator delete(Ptr); }
550
551 using VolatileField = BoolBitfieldElementT<0>;
552 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
553 using SuccessOrderingField =
554 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
555 using FailureOrderingField =
556 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
557 using AlignmentField =
558 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
559 static_assert(
560 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
561 FailureOrderingField, AlignmentField>(),
562 "Bitfields must be contiguous");
563
564 /// Return the alignment of the memory that is being allocated by the
565 /// instruction.
566 Align getAlign() const {
567 return Align(1ULL << getSubclassData<AlignmentField>());
568 }
569
570 void setAlignment(Align Align) {
571 setSubclassData<AlignmentField>(Log2(Align));
572 }
573
574 /// Return true if this is a cmpxchg from a volatile memory
575 /// location.
576 ///
577 bool isVolatile() const { return getSubclassData<VolatileField>(); }
578
579 /// Specify whether this is a volatile cmpxchg.
580 ///
581 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582
583 /// Return true if this cmpxchg may spuriously fail.
584 bool isWeak() const { return getSubclassData<WeakField>(); }
585
586 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587
588 /// Transparently provide more efficient getOperand methods.
589 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
590
591 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592 return Ordering != AtomicOrdering::NotAtomic &&
593 Ordering != AtomicOrdering::Unordered;
594 }
595
596 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered &&
599 Ordering != AtomicOrdering::AcquireRelease &&
600 Ordering != AtomicOrdering::Release;
601 }
602
603 /// Returns the success ordering constraint of this cmpxchg instruction.
604 AtomicOrdering getSuccessOrdering() const {
605 return getSubclassData<SuccessOrderingField>();
606 }
607
608 /// Sets the success ordering constraint of this cmpxchg instruction.
609 void setSuccessOrdering(AtomicOrdering Ordering) {
610 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 611, __extension__ __PRETTY_FUNCTION__))
611 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 setSubclassData<SuccessOrderingField>(Ordering);
613 }
614
615 /// Returns the failure ordering constraint of this cmpxchg instruction.
616 AtomicOrdering getFailureOrdering() const {
617 return getSubclassData<FailureOrderingField>();
618 }
619
620 /// Sets the failure ordering constraint of this cmpxchg instruction.
621 void setFailureOrdering(AtomicOrdering Ordering) {
622 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 623, __extension__ __PRETTY_FUNCTION__))
623 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 623, __extension__ __PRETTY_FUNCTION__))
;
624 setSubclassData<FailureOrderingField>(Ordering);
625 }
626
627 /// Returns a single ordering which is at least as strong as both the
628 /// success and failure orderings for this cmpxchg.
629 AtomicOrdering getMergedOrdering() const {
630 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
631 return AtomicOrdering::SequentiallyConsistent;
632 if (getFailureOrdering() == AtomicOrdering::Acquire) {
633 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
634 return AtomicOrdering::Acquire;
635 if (getSuccessOrdering() == AtomicOrdering::Release)
636 return AtomicOrdering::AcquireRelease;
637 }
638 return getSuccessOrdering();
639 }
640
641 /// Returns the synchronization scope ID of this cmpxchg instruction.
642 SyncScope::ID getSyncScopeID() const {
643 return SSID;
644 }
645
646 /// Sets the synchronization scope ID of this cmpxchg instruction.
647 void setSyncScopeID(SyncScope::ID SSID) {
648 this->SSID = SSID;
649 }
650
651 Value *getPointerOperand() { return getOperand(0); }
652 const Value *getPointerOperand() const { return getOperand(0); }
653 static unsigned getPointerOperandIndex() { return 0U; }
654
655 Value *getCompareOperand() { return getOperand(1); }
656 const Value *getCompareOperand() const { return getOperand(1); }
657
658 Value *getNewValOperand() { return getOperand(2); }
659 const Value *getNewValOperand() const { return getOperand(2); }
660
661 /// Returns the address space of the pointer operand.
662 unsigned getPointerAddressSpace() const {
663 return getPointerOperand()->getType()->getPointerAddressSpace();
664 }
665
666 /// Returns the strongest permitted ordering on failure, given the
667 /// desired ordering on success.
668 ///
669 /// If the comparison in a cmpxchg operation fails, there is no atomic store
670 /// so release semantics cannot be provided. So this function drops explicit
671 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672 /// operation would remain SequentiallyConsistent.
673 static AtomicOrdering
674 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
675 switch (SuccessOrdering) {
676 default:
677 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 677)
;
678 case AtomicOrdering::Release:
679 case AtomicOrdering::Monotonic:
680 return AtomicOrdering::Monotonic;
681 case AtomicOrdering::AcquireRelease:
682 case AtomicOrdering::Acquire:
683 return AtomicOrdering::Acquire;
684 case AtomicOrdering::SequentiallyConsistent:
685 return AtomicOrdering::SequentiallyConsistent;
686 }
687 }
688
689 // Methods for support type inquiry through isa, cast, and dyn_cast:
690 static bool classof(const Instruction *I) {
691 return I->getOpcode() == Instruction::AtomicCmpXchg;
692 }
693 static bool classof(const Value *V) {
694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
695 }
696
697private:
698 // Shadow Instruction::setInstructionSubclassData with a private forwarding
699 // method so that subclasses cannot accidentally use it.
700 template <typename Bitfield>
701 void setSubclassData(typename Bitfield::Type Value) {
702 Instruction::setSubclassData<Bitfield>(Value);
703 }
704
705 /// The synchronization scope ID of this cmpxchg instruction. Not quite
706 /// enough room in SubClassData for everything, so synchronization scope ID
707 /// gets its own field.
708 SyncScope::ID SSID;
709};
710
711template <>
712struct OperandTraits<AtomicCmpXchgInst> :
713 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714};
715
716DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 716, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 716, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
717
718//===----------------------------------------------------------------------===//
719// AtomicRMWInst Class
720//===----------------------------------------------------------------------===//
721
722/// an instruction that atomically reads a memory location,
723/// combines it with another value, and then stores the result back. Returns
724/// the old value.
725///
726class AtomicRMWInst : public Instruction {
727protected:
728 // Note: Instruction needs to be a friend here to call cloneImpl.
729 friend class Instruction;
730
731 AtomicRMWInst *cloneImpl() const;
732
733public:
734 /// This enumeration lists the possible modifications atomicrmw can make. In
735 /// the descriptions, 'p' is the pointer to the instruction's memory location,
736 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737 /// instruction. These instructions always return 'old'.
738 enum BinOp : unsigned {
739 /// *p = v
740 Xchg,
741 /// *p = old + v
742 Add,
743 /// *p = old - v
744 Sub,
745 /// *p = old & v
746 And,
747 /// *p = ~(old & v)
748 Nand,
749 /// *p = old | v
750 Or,
751 /// *p = old ^ v
752 Xor,
753 /// *p = old >signed v ? old : v
754 Max,
755 /// *p = old <signed v ? old : v
756 Min,
757 /// *p = old >unsigned v ? old : v
758 UMax,
759 /// *p = old <unsigned v ? old : v
760 UMin,
761
762 /// *p = old + v
763 FAdd,
764
765 /// *p = old - v
766 FSub,
767
768 FIRST_BINOP = Xchg,
769 LAST_BINOP = FSub,
770 BAD_BINOP
771 };
772
773private:
774 template <unsigned Offset>
775 using AtomicOrderingBitfieldElement =
776 typename Bitfield::Element<AtomicOrdering, Offset, 3,
777 AtomicOrdering::LAST>;
778
779 template <unsigned Offset>
780 using BinOpBitfieldElement =
781 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
782
783public:
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 Instruction *InsertBefore = nullptr);
787 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788 AtomicOrdering Ordering, SyncScope::ID SSID,
789 BasicBlock *InsertAtEnd);
790
791 // allocate space for exactly two operands
792 void *operator new(size_t S) { return User::operator new(S, 2); }
793 void operator delete(void *Ptr) { User::operator delete(Ptr); }
794
795 using VolatileField = BoolBitfieldElementT<0>;
796 using AtomicOrderingField =
797 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
798 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
799 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
800 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
801 OperationField, AlignmentField>(),
802 "Bitfields must be contiguous");
803
804 BinOp getOperation() const { return getSubclassData<OperationField>(); }
805
806 static StringRef getOperationName(BinOp Op);
807
808 static bool isFPOperation(BinOp Op) {
809 switch (Op) {
810 case AtomicRMWInst::FAdd:
811 case AtomicRMWInst::FSub:
812 return true;
813 default:
814 return false;
815 }
816 }
817
818 void setOperation(BinOp Operation) {
819 setSubclassData<OperationField>(Operation);
820 }
821
822 /// Return the alignment of the memory that is being allocated by the
823 /// instruction.
824 Align getAlign() const {
825 return Align(1ULL << getSubclassData<AlignmentField>());
826 }
827
828 void setAlignment(Align Align) {
829 setSubclassData<AlignmentField>(Log2(Align));
830 }
831
832 /// Return true if this is a RMW on a volatile memory location.
833 ///
834 bool isVolatile() const { return getSubclassData<VolatileField>(); }
835
836 /// Specify whether this is a volatile RMW or not.
837 ///
838 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839
840 /// Transparently provide more efficient getOperand methods.
841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
842
843 /// Returns the ordering constraint of this rmw instruction.
844 AtomicOrdering getOrdering() const {
845 return getSubclassData<AtomicOrderingField>();
846 }
847
848 /// Sets the ordering constraint of this rmw instruction.
849 void setOrdering(AtomicOrdering Ordering) {
850 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 851, __extension__ __PRETTY_FUNCTION__))
851 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 851, __extension__ __PRETTY_FUNCTION__))
;
852 setSubclassData<AtomicOrderingField>(Ordering);
853 }
854
855 /// Returns the synchronization scope ID of this rmw instruction.
856 SyncScope::ID getSyncScopeID() const {
857 return SSID;
858 }
859
860 /// Sets the synchronization scope ID of this rmw instruction.
861 void setSyncScopeID(SyncScope::ID SSID) {
862 this->SSID = SSID;
863 }
864
865 Value *getPointerOperand() { return getOperand(0); }
866 const Value *getPointerOperand() const { return getOperand(0); }
867 static unsigned getPointerOperandIndex() { return 0U; }
868
869 Value *getValOperand() { return getOperand(1); }
870 const Value *getValOperand() const { return getOperand(1); }
871
872 /// Returns the address space of the pointer operand.
873 unsigned getPointerAddressSpace() const {
874 return getPointerOperand()->getType()->getPointerAddressSpace();
875 }
876
877 bool isFloatingPointOperation() const {
878 return isFPOperation(getOperation());
879 }
880
881 // Methods for support type inquiry through isa, cast, and dyn_cast:
882 static bool classof(const Instruction *I) {
883 return I->getOpcode() == Instruction::AtomicRMW;
884 }
885 static bool classof(const Value *V) {
886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
887 }
888
889private:
890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891 AtomicOrdering Ordering, SyncScope::ID SSID);
892
893 // Shadow Instruction::setInstructionSubclassData with a private forwarding
894 // method so that subclasses cannot accidentally use it.
895 template <typename Bitfield>
896 void setSubclassData(typename Bitfield::Type Value) {
897 Instruction::setSubclassData<Bitfield>(Value);
898 }
899
900 /// The synchronization scope ID of this rmw instruction. Not quite enough
901 /// room in SubClassData for everything, so synchronization scope ID gets its
902 /// own field.
903 SyncScope::ID SSID;
904};
905
906template <>
907struct OperandTraits<AtomicRMWInst>
908 : public FixedNumOperandTraits<AtomicRMWInst,2> {
909};
910
911DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 911, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 911, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
912
913//===----------------------------------------------------------------------===//
914// GetElementPtrInst Class
915//===----------------------------------------------------------------------===//
916
917// checkGEPType - Simple wrapper function to give a better assertion failure
918// message on bad indexes for a gep instruction.
919//
920inline Type *checkGEPType(Type *Ty) {
921 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 921, __extension__ __PRETTY_FUNCTION__))
;
922 return Ty;
923}
924
925/// an instruction for type-safe pointer arithmetic to
926/// access elements of arrays and structs
927///
928class GetElementPtrInst : public Instruction {
929 Type *SourceElementType;
930 Type *ResultElementType;
931
932 GetElementPtrInst(const GetElementPtrInst &GEPI);
933
934 /// Constructors - Create a getelementptr instruction with a base pointer an
935 /// list of indices. The first ctor can optionally insert before an existing
936 /// instruction, the second appends the new instruction to the specified
937 /// BasicBlock.
938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList, unsigned Values,
940 const Twine &NameStr, Instruction *InsertBefore);
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, BasicBlock *InsertAtEnd);
944
945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946
947protected:
948 // Note: Instruction needs to be a friend here to call cloneImpl.
949 friend class Instruction;
950
951 GetElementPtrInst *cloneImpl() const;
952
953public:
954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955 ArrayRef<Value *> IdxList,
956 const Twine &NameStr = "",
957 Instruction *InsertBefore = nullptr) {
958 unsigned Values = 1 + unsigned(IdxList.size());
959 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 961, __extension__ __PRETTY_FUNCTION__))
961 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 961, __extension__ __PRETTY_FUNCTION__))
;
962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963 NameStr, InsertBefore);
964 }
965
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr,
969 BasicBlock *InsertAtEnd) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 971, __extension__ __PRETTY_FUNCTION__))
;
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 973, __extension__ __PRETTY_FUNCTION__))
973 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 973, __extension__ __PRETTY_FUNCTION__))
;
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertAtEnd);
976 }
977
978 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
979 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
980 Instruction *InsertBefore = nullptr),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
981 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
{
982 return CreateInBounds(
983 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984 NameStr, InsertBefore);
985 }
986
987 /// Create an "inbounds" getelementptr. See the documentation for the
988 /// "inbounds" flag in LangRef.html for details.
989 static GetElementPtrInst *
990 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991 const Twine &NameStr = "",
992 Instruction *InsertBefore = nullptr) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1000 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1001 BasicBlock *InsertAtEnd),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1002 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
{
1003 return CreateInBounds(
1004 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005 NameStr, InsertAtEnd);
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 GetElementPtrInst *GEP =
1013 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014 GEP->setIsInBounds(true);
1015 return GEP;
1016 }
1017
1018 /// Transparently provide more efficient getOperand methods.
1019 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1020
1021 Type *getSourceElementType() const { return SourceElementType; }
1022
1023 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025
1026 Type *getResultElementType() const {
1027 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
1028 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
;
1029 return ResultElementType;
1030 }
1031
1032 /// Returns the address space of this instruction's pointer type.
1033 unsigned getAddressSpace() const {
1034 // Note that this is always the same as the pointer operand's address space
1035 // and that is cheaper to compute, so cheat here.
1036 return getPointerAddressSpace();
1037 }
1038
1039 /// Returns the result type of a getelementptr with the given source
1040 /// element type and indexes.
1041 ///
1042 /// Null is returned if the indices are invalid for the specified
1043 /// source element type.
1044 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047
1048 /// Return the type of the element at the given index of an indexable
1049 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050 ///
1051 /// Returns null if the type can't be indexed, or the given index is not
1052 /// legal for the given type.
1053 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055
1056 inline op_iterator idx_begin() { return op_begin()+1; }
1057 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058 inline op_iterator idx_end() { return op_end(); }
1059 inline const_op_iterator idx_end() const { return op_end(); }
1060
1061 inline iterator_range<op_iterator> indices() {
1062 return make_range(idx_begin(), idx_end());
1063 }
1064
1065 inline iterator_range<const_op_iterator> indices() const {
1066 return make_range(idx_begin(), idx_end());
1067 }
1068
1069 Value *getPointerOperand() {
1070 return getOperand(0);
1071 }
1072 const Value *getPointerOperand() const {
1073 return getOperand(0);
1074 }
1075 static unsigned getPointerOperandIndex() {
1076 return 0U; // get index for modifying correct operand.
1077 }
1078
1079 /// Method to return the pointer operand as a
1080 /// PointerType.
1081 Type *getPointerOperandType() const {
1082 return getPointerOperand()->getType();
1083 }
1084
1085 /// Returns the address space of the pointer operand.
1086 unsigned getPointerAddressSpace() const {
1087 return getPointerOperandType()->getPointerAddressSpace();
1088 }
1089
1090 /// Returns the pointer type returned by the GEP
1091 /// instruction, which may be a vector of pointers.
1092 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093 ArrayRef<Value *> IdxList) {
1094 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097 Type *PtrTy = OrigPtrTy->isOpaque()
1098 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099 : PointerType::get(ResultElemTy, AddrSpace);
1100 // Vector GEP
1101 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102 ElementCount EltCount = PtrVTy->getElementCount();
1103 return VectorType::get(PtrTy, EltCount);
1104 }
1105 for (Value *Index : IdxList)
1106 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107 ElementCount EltCount = IndexVTy->getElementCount();
1108 return VectorType::get(PtrTy, EltCount);
1109 }
1110 // Scalar GEP
1111 return PtrTy;
1112 }
1113
1114 unsigned getNumIndices() const { // Note: always non-negative
1115 return getNumOperands() - 1;
1116 }
1117
1118 bool hasIndices() const {
1119 return getNumOperands() > 1;
1120 }
1121
1122 /// Return true if all of the indices of this GEP are
1123 /// zeros. If so, the result pointer and the first operand have the same
1124 /// value, just potentially different types.
1125 bool hasAllZeroIndices() const;
1126
1127 /// Return true if all of the indices of this GEP are
1128 /// constant integers. If so, the result pointer and the first operand have
1129 /// a constant offset between them.
1130 bool hasAllConstantIndices() const;
1131
1132 /// Set or clear the inbounds flag on this GEP instruction.
1133 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134 void setIsInBounds(bool b = true);
1135
1136 /// Determine whether the GEP has the inbounds flag.
1137 bool isInBounds() const;
1138
1139 /// Accumulate the constant address offset of this GEP if possible.
1140 ///
1141 /// This routine accepts an APInt into which it will accumulate the constant
1142 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143 /// all-constant, it returns false and the value of the offset APInt is
1144 /// undefined (it is *not* preserved!). The APInt passed into this routine
1145 /// must be at least as wide as the IntPtr type for the address space of
1146 /// the base GEP pointer.
1147 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149 MapVector<Value *, APInt> &VariableOffsets,
1150 APInt &ConstantOffset) const;
1151 // Methods for support type inquiry through isa, cast, and dyn_cast:
1152 static bool classof(const Instruction *I) {
1153 return (I->getOpcode() == Instruction::GetElementPtr);
1154 }
1155 static bool classof(const Value *V) {
1156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157 }
1158};
1159
1160template <>
1161struct OperandTraits<GetElementPtrInst> :
1162 public VariadicOperandTraits<GetElementPtrInst, 1> {
1163};
1164
1165GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166 ArrayRef<Value *> IdxList, unsigned Values,
1167 const Twine &NameStr,
1168 Instruction *InsertBefore)
1169 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171 Values, InsertBefore),
1172 SourceElementType(PointeeType),
1173 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1175, __extension__ __PRETTY_FUNCTION__))
1175 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1175, __extension__ __PRETTY_FUNCTION__))
;
1176 init(Ptr, IdxList, NameStr);
1177}
1178
1179GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180 ArrayRef<Value *> IdxList, unsigned Values,
1181 const Twine &NameStr,
1182 BasicBlock *InsertAtEnd)
1183 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185 Values, InsertAtEnd),
1186 SourceElementType(PointeeType),
1187 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 init(Ptr, IdxList, NameStr);
1191}
1192
1193DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1193, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1193, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1194
1195//===----------------------------------------------------------------------===//
1196// ICmpInst Class
1197//===----------------------------------------------------------------------===//
1198
1199/// This instruction compares its operands according to the predicate given
1200/// to the constructor. It only operates on integers or pointers. The operands
1201/// must be identical types.
1202/// Represent an integer comparison operator.
1203class ICmpInst: public CmpInst {
1204 void AssertOK() {
1205 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1206, __extension__ __PRETTY_FUNCTION__))
1206 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1206, __extension__ __PRETTY_FUNCTION__))
;
1207 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
1208 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
;
1209 // Check that the operands are the right type
1210 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
1211 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
1212 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
;
1213 }
1214
1215protected:
1216 // Note: Instruction needs to be a friend here to call cloneImpl.
1217 friend class Instruction;
1218
1219 /// Clone an identical ICmpInst
1220 ICmpInst *cloneImpl() const;
1221
1222public:
1223 /// Constructor with insert-before-instruction semantics.
1224 ICmpInst(
1225 Instruction *InsertBefore, ///< Where to insert
1226 Predicate pred, ///< The predicate to use for the comparison
1227 Value *LHS, ///< The left-hand-side of the expression
1228 Value *RHS, ///< The right-hand-side of the expression
1229 const Twine &NameStr = "" ///< Name of the instruction
1230 ) : CmpInst(makeCmpResultType(LHS->getType()),
1231 Instruction::ICmp, pred, LHS, RHS, NameStr,
1232 InsertBefore) {
1233#ifndef NDEBUG
1234 AssertOK();
1235#endif
1236 }
1237
1238 /// Constructor with insert-at-end semantics.
1239 ICmpInst(
1240 BasicBlock &InsertAtEnd, ///< Block to insert into.
1241 Predicate pred, ///< The predicate to use for the comparison
1242 Value *LHS, ///< The left-hand-side of the expression
1243 Value *RHS, ///< The right-hand-side of the expression
1244 const Twine &NameStr = "" ///< Name of the instruction
1245 ) : CmpInst(makeCmpResultType(LHS->getType()),
1246 Instruction::ICmp, pred, LHS, RHS, NameStr,
1247 &InsertAtEnd) {
1248#ifndef NDEBUG
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// Constructor with no-insertion semantics
1254 ICmpInst(
1255 Predicate pred, ///< The predicate to use for the comparison
1256 Value *LHS, ///< The left-hand-side of the expression
1257 Value *RHS, ///< The right-hand-side of the expression
1258 const Twine &NameStr = "" ///< Name of the instruction
1259 ) : CmpInst(makeCmpResultType(LHS->getType()),
1260 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261#ifndef NDEBUG
1262 AssertOK();
1263#endif
1264 }
1265
1266 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267 /// @returns the predicate that would be the result if the operand were
1268 /// regarded as signed.
1269 /// Return the signed version of the predicate
1270 Predicate getSignedPredicate() const {
1271 return getSignedPredicate(getPredicate());
1272 }
1273
1274 /// This is a static version that you can use without an instruction.
1275 /// Return the signed version of the predicate.
1276 static Predicate getSignedPredicate(Predicate pred);
1277
1278 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279 /// @returns the predicate that would be the result if the operand were
1280 /// regarded as unsigned.
1281 /// Return the unsigned version of the predicate
1282 Predicate getUnsignedPredicate() const {
1283 return getUnsignedPredicate(getPredicate());
1284 }
1285
1286 /// This is a static version that you can use without an instruction.
1287 /// Return the unsigned version of the predicate.
1288 static Predicate getUnsignedPredicate(Predicate pred);
1289
1290 /// Return true if this predicate is either EQ or NE. This also
1291 /// tests for commutativity.
1292 static bool isEquality(Predicate P) {
1293 return P == ICMP_EQ || P == ICMP_NE;
1294 }
1295
1296 /// Return true if this predicate is either EQ or NE. This also
1297 /// tests for commutativity.
1298 bool isEquality() const {
1299 return isEquality(getPredicate());
1300 }
1301
1302 /// @returns true if the predicate of this ICmpInst is commutative
1303 /// Determine if this relation is commutative.
1304 bool isCommutative() const { return isEquality(); }
1305
1306 /// Return true if the predicate is relational (not EQ or NE).
1307 ///
1308 bool isRelational() const {
1309 return !isEquality();
1310 }
1311
1312 /// Return true if the predicate is relational (not EQ or NE).
1313 ///
1314 static bool isRelational(Predicate P) {
1315 return !isEquality(P);
1316 }
1317
1318 /// Return true if the predicate is SGT or UGT.
1319 ///
1320 static bool isGT(Predicate P) {
1321 return P == ICMP_SGT || P == ICMP_UGT;
1322 }
1323
1324 /// Return true if the predicate is SLT or ULT.
1325 ///
1326 static bool isLT(Predicate P) {
1327 return P == ICMP_SLT || P == ICMP_ULT;
1328 }
1329
1330 /// Return true if the predicate is SGE or UGE.
1331 ///
1332 static bool isGE(Predicate P) {
1333 return P == ICMP_SGE || P == ICMP_UGE;
1334 }
1335
1336 /// Return true if the predicate is SLE or ULE.
1337 ///
1338 static bool isLE(Predicate P) {
1339 return P == ICMP_SLE || P == ICMP_ULE;
1340 }
1341
1342 /// Exchange the two operands to this instruction in such a way that it does
1343 /// not modify the semantics of the instruction. The predicate value may be
1344 /// changed to retain the same result if the predicate is order dependent
1345 /// (e.g. ult).
1346 /// Swap operands and adjust predicate.
1347 void swapOperands() {
1348 setPredicate(getSwappedPredicate());
1349 Op<0>().swap(Op<1>());
1350 }
1351
1352 // Methods for support type inquiry through isa, cast, and dyn_cast:
1353 static bool classof(const Instruction *I) {
1354 return I->getOpcode() == Instruction::ICmp;
1355 }
1356 static bool classof(const Value *V) {
1357 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358 }
1359};
1360
1361//===----------------------------------------------------------------------===//
1362// FCmpInst Class
1363//===----------------------------------------------------------------------===//
1364
1365/// This instruction compares its operands according to the predicate given
1366/// to the constructor. It only operates on floating point values or packed
1367/// vectors of floating point values. The operands must be identical types.
1368/// Represents a floating point comparison operator.
1369class FCmpInst: public CmpInst {
1370 void AssertOK() {
1371 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1371, __extension__ __PRETTY_FUNCTION__))
;
1372 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1373, __extension__ __PRETTY_FUNCTION__))
1373 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1373, __extension__ __PRETTY_FUNCTION__))
;
1374 // Check that the operands are the right type
1375 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1376, __extension__ __PRETTY_FUNCTION__))
1376 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1376, __extension__ __PRETTY_FUNCTION__))
;
1377 }
1378
1379protected:
1380 // Note: Instruction needs to be a friend here to call cloneImpl.
1381 friend class Instruction;
1382
1383 /// Clone an identical FCmpInst
1384 FCmpInst *cloneImpl() const;
1385
1386public:
1387 /// Constructor with insert-before-instruction semantics.
1388 FCmpInst(
1389 Instruction *InsertBefore, ///< Where to insert
1390 Predicate pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "" ///< Name of the instruction
1394 ) : CmpInst(makeCmpResultType(LHS->getType()),
1395 Instruction::FCmp, pred, LHS, RHS, NameStr,
1396 InsertBefore) {
1397 AssertOK();
1398 }
1399
1400 /// Constructor with insert-at-end semantics.
1401 FCmpInst(
1402 BasicBlock &InsertAtEnd, ///< Block to insert into.
1403 Predicate pred, ///< The predicate to use for the comparison
1404 Value *LHS, ///< The left-hand-side of the expression
1405 Value *RHS, ///< The right-hand-side of the expression
1406 const Twine &NameStr = "" ///< Name of the instruction
1407 ) : CmpInst(makeCmpResultType(LHS->getType()),
1408 Instruction::FCmp, pred, LHS, RHS, NameStr,
1409 &InsertAtEnd) {
1410 AssertOK();
1411 }
1412
1413 /// Constructor with no-insertion semantics
1414 FCmpInst(
1415 Predicate Pred, ///< The predicate to use for the comparison
1416 Value *LHS, ///< The left-hand-side of the expression
1417 Value *RHS, ///< The right-hand-side of the expression
1418 const Twine &NameStr = "", ///< Name of the instruction
1419 Instruction *FlagsSource = nullptr
1420 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421 RHS, NameStr, nullptr, FlagsSource) {
1422 AssertOK();
1423 }
1424
1425 /// @returns true if the predicate of this instruction is EQ or NE.
1426 /// Determine if this is an equality predicate.
1427 static bool isEquality(Predicate Pred) {
1428 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429 Pred == FCMP_UNE;
1430 }
1431
1432 /// @returns true if the predicate of this instruction is EQ or NE.
1433 /// Determine if this is an equality predicate.
1434 bool isEquality() const { return isEquality(getPredicate()); }
1435
1436 /// @returns true if the predicate of this instruction is commutative.
1437 /// Determine if this is a commutative predicate.
1438 bool isCommutative() const {
1439 return isEquality() ||
1440 getPredicate() == FCMP_FALSE ||
1441 getPredicate() == FCMP_TRUE ||
1442 getPredicate() == FCMP_ORD ||
1443 getPredicate() == FCMP_UNO;
1444 }
1445
1446 /// @returns true if the predicate is relational (not EQ or NE).
1447 /// Determine if this a relational predicate.
1448 bool isRelational() const { return !isEquality(); }
1449
1450 /// Exchange the two operands to this instruction in such a way that it does
1451 /// not modify the semantics of the instruction. The predicate value may be
1452 /// changed to retain the same result if the predicate is order dependent
1453 /// (e.g. ult).
1454 /// Swap operands and adjust predicate.
1455 void swapOperands() {
1456 setPredicate(getSwappedPredicate());
1457 Op<0>().swap(Op<1>());
1458 }
1459
1460 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461 static bool classof(const Instruction *I) {
1462 return I->getOpcode() == Instruction::FCmp;
1463 }
1464 static bool classof(const Value *V) {
1465 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466 }
1467};
1468
1469//===----------------------------------------------------------------------===//
1470/// This class represents a function call, abstracting a target
1471/// machine's calling convention. This class uses low bit of the SubClassData
1472/// field to indicate whether or not this is a tail call. The rest of the bits
1473/// hold the calling convention of the call.
1474///
1475class CallInst : public CallBase {
1476 CallInst(const CallInst &CI);
1477
1478 /// Construct a CallInst given a range of arguments.
1479 /// Construct a CallInst from a range of arguments
1480 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482 Instruction *InsertBefore);
1483
1484 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485 const Twine &NameStr, Instruction *InsertBefore)
1486 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487
1488 /// Construct a CallInst given a range of arguments.
1489 /// Construct a CallInst from a range of arguments
1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492 BasicBlock *InsertAtEnd);
1493
1494 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495 Instruction *InsertBefore);
1496
1497 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498 BasicBlock *InsertAtEnd);
1499
1500 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503
1504 /// Compute the number of operands to allocate.
1505 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506 // We need one operand for the called function, plus the input operand
1507 // counts provided.
1508 return 1 + NumArgs + NumBundleInputs;
1509 }
1510
1511protected:
1512 // Note: Instruction needs to be a friend here to call cloneImpl.
1513 friend class Instruction;
1514
1515 CallInst *cloneImpl() const;
1516
1517public:
1518 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519 Instruction *InsertBefore = nullptr) {
1520 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521 }
1522
1523 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1524 const Twine &NameStr,
1525 Instruction *InsertBefore = nullptr) {
1526 return new (ComputeNumOperands(Args.size()))
1527 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528 }
1529
1530 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1531 ArrayRef<OperandBundleDef> Bundles = None,
1532 const Twine &NameStr = "",
1533 Instruction *InsertBefore = nullptr) {
1534 const int NumOperands =
1535 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537
1538 return new (NumOperands, DescriptorBytes)
1539 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540 }
1541
1542 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543 BasicBlock *InsertAtEnd) {
1544 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545 }
1546
1547 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1548 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549 return new (ComputeNumOperands(Args.size()))
1550 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551 }
1552
1553 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1554 ArrayRef<OperandBundleDef> Bundles,
1555 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556 const int NumOperands =
1557 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559
1560 return new (NumOperands, DescriptorBytes)
1561 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562 }
1563
1564 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565 Instruction *InsertBefore = nullptr) {
1566 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567 InsertBefore);
1568 }
1569
1570 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1571 ArrayRef<OperandBundleDef> Bundles = None,
1572 const Twine &NameStr = "",
1573 Instruction *InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575 NameStr, InsertBefore);
1576 }
1577
1578 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1579 const Twine &NameStr,
1580 Instruction *InsertBefore = nullptr) {
1581 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582 InsertBefore);
1583 }
1584
1585 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586 BasicBlock *InsertAtEnd) {
1587 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588 InsertAtEnd);
1589 }
1590
1591 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1592 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594 InsertAtEnd);
1595 }
1596
1597 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1598 ArrayRef<OperandBundleDef> Bundles,
1599 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601 NameStr, InsertAtEnd);
1602 }
1603
1604 /// Create a clone of \p CI with a different set of operand bundles and
1605 /// insert it before \p InsertPt.
1606 ///
1607 /// The returned call instruction is identical \p CI in every way except that
1608 /// the operand bundles for the new instruction are set to the operand bundles
1609 /// in \p Bundles.
1610 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611 Instruction *InsertPt = nullptr);
1612
1613 /// Generate the IR for a call to malloc:
1614 /// 1. Compute the malloc call's argument as the specified type's size,
1615 /// possibly multiplied by the array size if the array size is not
1616 /// constant 1.
1617 /// 2. Call malloc with that argument.
1618 /// 3. Bitcast the result of the malloc call to the specified type.
1619 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620 Type *AllocTy, Value *AllocSize,
1621 Value *ArraySize = nullptr,
1622 Function *MallocF = nullptr,
1623 const Twine &Name = "");
1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625 Type *AllocTy, Value *AllocSize,
1626 Value *ArraySize = nullptr,
1627 Function *MallocF = nullptr,
1628 const Twine &Name = "");
1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630 Type *AllocTy, Value *AllocSize,
1631 Value *ArraySize = nullptr,
1632 ArrayRef<OperandBundleDef> Bundles = None,
1633 Function *MallocF = nullptr,
1634 const Twine &Name = "");
1635 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636 Type *AllocTy, Value *AllocSize,
1637 Value *ArraySize = nullptr,
1638 ArrayRef<OperandBundleDef> Bundles = None,
1639 Function *MallocF = nullptr,
1640 const Twine &Name = "");
1641 /// Generate the IR for a call to the builtin free function.
1642 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1644 static Instruction *CreateFree(Value *Source,
1645 ArrayRef<OperandBundleDef> Bundles,
1646 Instruction *InsertBefore);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 BasicBlock *InsertAtEnd);
1650
1651 // Note that 'musttail' implies 'tail'.
1652 enum TailCallKind : unsigned {
1653 TCK_None = 0,
1654 TCK_Tail = 1,
1655 TCK_MustTail = 2,
1656 TCK_NoTail = 3,
1657 TCK_LAST = TCK_NoTail
1658 };
1659
1660 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1661 static_assert(
1662 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663 "Bitfields must be contiguous");
1664
1665 TailCallKind getTailCallKind() const {
1666 return getSubclassData<TailCallKindField>();
1667 }
1668
1669 bool isTailCall() const {
1670 TailCallKind Kind = getTailCallKind();
1671 return Kind == TCK_Tail || Kind == TCK_MustTail;
1672 }
1673
1674 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675
1676 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677
1678 void setTailCallKind(TailCallKind TCK) {
1679 setSubclassData<TailCallKindField>(TCK);
1680 }
1681
1682 void setTailCall(bool IsTc = true) {
1683 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684 }
1685
1686 /// Return true if the call can return twice
1687 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1688 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1689
1690 // Methods for support type inquiry through isa, cast, and dyn_cast:
1691 static bool classof(const Instruction *I) {
1692 return I->getOpcode() == Instruction::Call;
1693 }
1694 static bool classof(const Value *V) {
1695 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1696 }
1697
1698 /// Updates profile metadata by scaling it by \p S / \p T.
1699 void updateProfWeight(uint64_t S, uint64_t T);
1700
1701private:
1702 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1703 // method so that subclasses cannot accidentally use it.
1704 template <typename Bitfield>
1705 void setSubclassData(typename Bitfield::Type Value) {
1706 Instruction::setSubclassData<Bitfield>(Value);
1707 }
1708};
1709
1710CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1711 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1712 BasicBlock *InsertAtEnd)
1713 : CallBase(Ty->getReturnType(), Instruction::Call,
1714 OperandTraits<CallBase>::op_end(this) -
1715 (Args.size() + CountBundleInputs(Bundles) + 1),
1716 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1717 InsertAtEnd) {
1718 init(Ty, Func, Args, Bundles, NameStr);
1719}
1720
1721CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1722 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1723 Instruction *InsertBefore)
1724 : CallBase(Ty->getReturnType(), Instruction::Call,
1725 OperandTraits<CallBase>::op_end(this) -
1726 (Args.size() + CountBundleInputs(Bundles) + 1),
1727 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1728 InsertBefore) {
1729 init(Ty, Func, Args, Bundles, NameStr);
1730}
1731
1732//===----------------------------------------------------------------------===//
1733// SelectInst Class
1734//===----------------------------------------------------------------------===//
1735
1736/// This class represents the LLVM 'select' instruction.
1737///
1738class SelectInst : public Instruction {
1739 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1740 Instruction *InsertBefore)
1741 : Instruction(S1->getType(), Instruction::Select,
1742 &Op<0>(), 3, InsertBefore) {
1743 init(C, S1, S2);
1744 setName(NameStr);
1745 }
1746
1747 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1748 BasicBlock *InsertAtEnd)
1749 : Instruction(S1->getType(), Instruction::Select,
1750 &Op<0>(), 3, InsertAtEnd) {
1751 init(C, S1, S2);
1752 setName(NameStr);
1753 }
1754
1755 void init(Value *C, Value *S1, Value *S2) {
1756 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1756, __extension__ __PRETTY_FUNCTION__))
;
1757 Op<0>() = C;
1758 Op<1>() = S1;
1759 Op<2>() = S2;
1760 }
1761
1762protected:
1763 // Note: Instruction needs to be a friend here to call cloneImpl.
1764 friend class Instruction;
1765
1766 SelectInst *cloneImpl() const;
1767
1768public:
1769 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1770 const Twine &NameStr = "",
1771 Instruction *InsertBefore = nullptr,
1772 Instruction *MDFrom = nullptr) {
1773 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1774 if (MDFrom)
1775 Sel->copyMetadata(*MDFrom);
1776 return Sel;
1777 }
1778
1779 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1780 const Twine &NameStr,
1781 BasicBlock *InsertAtEnd) {
1782 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1783 }
1784
1785 const Value *getCondition() const { return Op<0>(); }
1786 const Value *getTrueValue() const { return Op<1>(); }
1787 const Value *getFalseValue() const { return Op<2>(); }
1788 Value *getCondition() { return Op<0>(); }
1789 Value *getTrueValue() { return Op<1>(); }
1790 Value *getFalseValue() { return Op<2>(); }
1791
1792 void setCondition(Value *V) { Op<0>() = V; }
1793 void setTrueValue(Value *V) { Op<1>() = V; }
1794 void setFalseValue(Value *V) { Op<2>() = V; }
1795
1796 /// Swap the true and false values of the select instruction.
1797 /// This doesn't swap prof metadata.
1798 void swapValues() { Op<1>().swap(Op<2>()); }
1799
1800 /// Return a string if the specified operands are invalid
1801 /// for a select operation, otherwise return null.
1802 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1803
1804 /// Transparently provide more efficient getOperand methods.
1805 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1806
1807 OtherOps getOpcode() const {
1808 return static_cast<OtherOps>(Instruction::getOpcode());
1809 }
1810
1811 // Methods for support type inquiry through isa, cast, and dyn_cast:
1812 static bool classof(const Instruction *I) {
1813 return I->getOpcode() == Instruction::Select;
1814 }
1815 static bool classof(const Value *V) {
1816 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1817 }
1818};
1819
1820template <>
1821struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1822};
1823
1824DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1824, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1824, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1825
1826//===----------------------------------------------------------------------===//
1827// VAArgInst Class
1828//===----------------------------------------------------------------------===//
1829
1830/// This class represents the va_arg llvm instruction, which returns
1831/// an argument of the specified type given a va_list and increments that list
1832///
1833class VAArgInst : public UnaryInstruction {
1834protected:
1835 // Note: Instruction needs to be a friend here to call cloneImpl.
1836 friend class Instruction;
1837
1838 VAArgInst *cloneImpl() const;
1839
1840public:
1841 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1842 Instruction *InsertBefore = nullptr)
1843 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1844 setName(NameStr);
1845 }
1846
1847 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1848 BasicBlock *InsertAtEnd)
1849 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1850 setName(NameStr);
1851 }
1852
1853 Value *getPointerOperand() { return getOperand(0); }
1854 const Value *getPointerOperand() const { return getOperand(0); }
1855 static unsigned getPointerOperandIndex() { return 0U; }
1856
1857 // Methods for support type inquiry through isa, cast, and dyn_cast:
1858 static bool classof(const Instruction *I) {
1859 return I->getOpcode() == VAArg;
1860 }
1861 static bool classof(const Value *V) {
1862 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1863 }
1864};
1865
1866//===----------------------------------------------------------------------===//
1867// ExtractElementInst Class
1868//===----------------------------------------------------------------------===//
1869
1870/// This instruction extracts a single (scalar)
1871/// element from a VectorType value
1872///
1873class ExtractElementInst : public Instruction {
1874 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1875 Instruction *InsertBefore = nullptr);
1876 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1877 BasicBlock *InsertAtEnd);
1878
1879protected:
1880 // Note: Instruction needs to be a friend here to call cloneImpl.
1881 friend class Instruction;
1882
1883 ExtractElementInst *cloneImpl() const;
1884
1885public:
1886 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1887 const Twine &NameStr = "",
1888 Instruction *InsertBefore = nullptr) {
1889 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1890 }
1891
1892 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1893 const Twine &NameStr,
1894 BasicBlock *InsertAtEnd) {
1895 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1896 }
1897
1898 /// Return true if an extractelement instruction can be
1899 /// formed with the specified operands.
1900 static bool isValidOperands(const Value *Vec, const Value *Idx);
1901
1902 Value *getVectorOperand() { return Op<0>(); }
1903 Value *getIndexOperand() { return Op<1>(); }
1904 const Value *getVectorOperand() const { return Op<0>(); }
1905 const Value *getIndexOperand() const { return Op<1>(); }
1906
1907 VectorType *getVectorOperandType() const {
1908 return cast<VectorType>(getVectorOperand()->getType());
1909 }
1910
1911 /// Transparently provide more efficient getOperand methods.
1912 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1913
1914 // Methods for support type inquiry through isa, cast, and dyn_cast:
1915 static bool classof(const Instruction *I) {
1916 return I->getOpcode() == Instruction::ExtractElement;
1917 }
1918 static bool classof(const Value *V) {
1919 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1920 }
1921};
1922
1923template <>
1924struct OperandTraits<ExtractElementInst> :
1925 public FixedNumOperandTraits<ExtractElementInst, 2> {
1926};
1927
1928DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1928, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1928, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1929
1930//===----------------------------------------------------------------------===//
1931// InsertElementInst Class
1932//===----------------------------------------------------------------------===//
1933
1934/// This instruction inserts a single (scalar)
1935/// element into a VectorType value
1936///
1937class InsertElementInst : public Instruction {
1938 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1939 const Twine &NameStr = "",
1940 Instruction *InsertBefore = nullptr);
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1942 BasicBlock *InsertAtEnd);
1943
1944protected:
1945 // Note: Instruction needs to be a friend here to call cloneImpl.
1946 friend class Instruction;
1947
1948 InsertElementInst *cloneImpl() const;
1949
1950public:
1951 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1952 const Twine &NameStr = "",
1953 Instruction *InsertBefore = nullptr) {
1954 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1955 }
1956
1957 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1958 const Twine &NameStr,
1959 BasicBlock *InsertAtEnd) {
1960 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1961 }
1962
1963 /// Return true if an insertelement instruction can be
1964 /// formed with the specified operands.
1965 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1966 const Value *Idx);
1967
1968 /// Overload to return most specific vector type.
1969 ///
1970 VectorType *getType() const {
1971 return cast<VectorType>(Instruction::getType());
1972 }
1973
1974 /// Transparently provide more efficient getOperand methods.
1975 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1976
1977 // Methods for support type inquiry through isa, cast, and dyn_cast:
1978 static bool classof(const Instruction *I) {
1979 return I->getOpcode() == Instruction::InsertElement;
1980 }
1981 static bool classof(const Value *V) {
1982 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1983 }
1984};
1985
1986template <>
1987struct OperandTraits<InsertElementInst> :
1988 public FixedNumOperandTraits<InsertElementInst, 3> {
1989};
1990
1991DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1991, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 1991, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1992
1993//===----------------------------------------------------------------------===//
1994// ShuffleVectorInst Class
1995//===----------------------------------------------------------------------===//
1996
1997constexpr int UndefMaskElem = -1;
1998
1999/// This instruction constructs a fixed permutation of two
2000/// input vectors.
2001///
2002/// For each element of the result vector, the shuffle mask selects an element
2003/// from one of the input vectors to copy to the result. Non-negative elements
2004/// in the mask represent an index into the concatenated pair of input vectors.
2005/// UndefMaskElem (-1) specifies that the result element is undefined.
2006///
2007/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2008/// requirement may be relaxed in the future.
2009class ShuffleVectorInst : public Instruction {
2010 SmallVector<int, 4> ShuffleMask;
2011 Constant *ShuffleMaskForBitcode;
2012
2013protected:
2014 // Note: Instruction needs to be a friend here to call cloneImpl.
2015 friend class Instruction;
2016
2017 ShuffleVectorInst *cloneImpl() const;
2018
2019public:
2020 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2021 Instruction *InsertBefore = nullptr);
2022 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2023 BasicBlock *InsertAtEnd);
2024 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2025 Instruction *InsertBefore = nullptr);
2026 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2027 BasicBlock *InsertAtEnd);
2028 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2029 const Twine &NameStr = "",
2030 Instruction *InsertBefor = nullptr);
2031 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2032 const Twine &NameStr, BasicBlock *InsertAtEnd);
2033 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2034 const Twine &NameStr = "",
2035 Instruction *InsertBefor = nullptr);
2036 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2037 const Twine &NameStr, BasicBlock *InsertAtEnd);
2038
2039 void *operator new(size_t S) { return User::operator new(S, 2); }
2040 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2041
2042 /// Swap the operands and adjust the mask to preserve the semantics
2043 /// of the instruction.
2044 void commute();
2045
2046 /// Return true if a shufflevector instruction can be
2047 /// formed with the specified operands.
2048 static bool isValidOperands(const Value *V1, const Value *V2,
2049 const Value *Mask);
2050 static bool isValidOperands(const Value *V1, const Value *V2,
2051 ArrayRef<int> Mask);
2052
2053 /// Overload to return most specific vector type.
2054 ///
2055 VectorType *getType() const {
2056 return cast<VectorType>(Instruction::getType());
2057 }
2058
2059 /// Transparently provide more efficient getOperand methods.
2060 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2061
2062 /// Return the shuffle mask value of this instruction for the given element
2063 /// index. Return UndefMaskElem if the element is undef.
2064 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2065
2066 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2067 /// elements of the mask are returned as UndefMaskElem.
2068 static void getShuffleMask(const Constant *Mask,
2069 SmallVectorImpl<int> &Result);
2070
2071 /// Return the mask for this instruction as a vector of integers. Undefined
2072 /// elements of the mask are returned as UndefMaskElem.
2073 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2074 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2075 }
2076
2077 /// Return the mask for this instruction, for use in bitcode.
2078 ///
2079 /// TODO: This is temporary until we decide a new bitcode encoding for
2080 /// shufflevector.
2081 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2082
2083 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2084 Type *ResultTy);
2085
2086 void setShuffleMask(ArrayRef<int> Mask);
2087
2088 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2089
2090 /// Return true if this shuffle returns a vector with a different number of
2091 /// elements than its source vectors.
2092 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2093 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2094 bool changesLength() const {
2095 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2096 ->getElementCount()
2097 .getKnownMinValue();
2098 unsigned NumMaskElts = ShuffleMask.size();
2099 return NumSourceElts != NumMaskElts;
2100 }
2101
2102 /// Return true if this shuffle returns a vector with a greater number of
2103 /// elements than its source vectors.
2104 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2105 bool increasesLength() const {
2106 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2107 ->getElementCount()
2108 .getKnownMinValue();
2109 unsigned NumMaskElts = ShuffleMask.size();
2110 return NumSourceElts < NumMaskElts;
2111 }
2112
2113 /// Return true if this shuffle mask chooses elements from exactly one source
2114 /// vector.
2115 /// Example: <7,5,undef,7>
2116 /// This assumes that vector operands are the same length as the mask.
2117 static bool isSingleSourceMask(ArrayRef<int> Mask);
2118 static bool isSingleSourceMask(const Constant *Mask) {
2119 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2119, __extension__ __PRETTY_FUNCTION__))
;
2120 SmallVector<int, 16> MaskAsInts;
2121 getShuffleMask(Mask, MaskAsInts);
2122 return isSingleSourceMask(MaskAsInts);
2123 }
2124
2125 /// Return true if this shuffle chooses elements from exactly one source
2126 /// vector without changing the length of that vector.
2127 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2128 /// TODO: Optionally allow length-changing shuffles.
2129 bool isSingleSource() const {
2130 return !changesLength() && isSingleSourceMask(ShuffleMask);
2131 }
2132
2133 /// Return true if this shuffle mask chooses elements from exactly one source
2134 /// vector without lane crossings. A shuffle using this mask is not
2135 /// necessarily a no-op because it may change the number of elements from its
2136 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2137 /// Example: <undef,undef,2,3>
2138 static bool isIdentityMask(ArrayRef<int> Mask);
2139 static bool isIdentityMask(const Constant *Mask) {
2140 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2140, __extension__ __PRETTY_FUNCTION__))
;
2141 SmallVector<int, 16> MaskAsInts;
2142 getShuffleMask(Mask, MaskAsInts);
2143 return isIdentityMask(MaskAsInts);
2144 }
2145
2146 /// Return true if this shuffle chooses elements from exactly one source
2147 /// vector without lane crossings and does not change the number of elements
2148 /// from its input vectors.
2149 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2150 bool isIdentity() const {
2151 return !changesLength() && isIdentityMask(ShuffleMask);
2152 }
2153
2154 /// Return true if this shuffle lengthens exactly one source vector with
2155 /// undefs in the high elements.
2156 bool isIdentityWithPadding() const;
2157
2158 /// Return true if this shuffle extracts the first N elements of exactly one
2159 /// source vector.
2160 bool isIdentityWithExtract() const;
2161
2162 /// Return true if this shuffle concatenates its 2 source vectors. This
2163 /// returns false if either input is undefined. In that case, the shuffle is
2164 /// is better classified as an identity with padding operation.
2165 bool isConcat() const;
2166
2167 /// Return true if this shuffle mask chooses elements from its source vectors
2168 /// without lane crossings. A shuffle using this mask would be
2169 /// equivalent to a vector select with a constant condition operand.
2170 /// Example: <4,1,6,undef>
2171 /// This returns false if the mask does not choose from both input vectors.
2172 /// In that case, the shuffle is better classified as an identity shuffle.
2173 /// This assumes that vector operands are the same length as the mask
2174 /// (a length-changing shuffle can never be equivalent to a vector select).
2175 static bool isSelectMask(ArrayRef<int> Mask);
2176 static bool isSelectMask(const Constant *Mask) {
2177 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2177, __extension__ __PRETTY_FUNCTION__))
;
2178 SmallVector<int, 16> MaskAsInts;
2179 getShuffleMask(Mask, MaskAsInts);
2180 return isSelectMask(MaskAsInts);
2181 }
2182
2183 /// Return true if this shuffle chooses elements from its source vectors
2184 /// without lane crossings and all operands have the same number of elements.
2185 /// In other words, this shuffle is equivalent to a vector select with a
2186 /// constant condition operand.
2187 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2188 /// This returns false if the mask does not choose from both input vectors.
2189 /// In that case, the shuffle is better classified as an identity shuffle.
2190 /// TODO: Optionally allow length-changing shuffles.
2191 bool isSelect() const {
2192 return !changesLength() && isSelectMask(ShuffleMask);
2193 }
2194
2195 /// Return true if this shuffle mask swaps the order of elements from exactly
2196 /// one source vector.
2197 /// Example: <7,6,undef,4>
2198 /// This assumes that vector operands are the same length as the mask.
2199 static bool isReverseMask(ArrayRef<int> Mask);
2200 static bool isReverseMask(const Constant *Mask) {
2201 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2201, __extension__ __PRETTY_FUNCTION__))
;
2202 SmallVector<int, 16> MaskAsInts;
2203 getShuffleMask(Mask, MaskAsInts);
2204 return isReverseMask(MaskAsInts);
2205 }
2206
2207 /// Return true if this shuffle swaps the order of elements from exactly
2208 /// one source vector.
2209 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2210 /// TODO: Optionally allow length-changing shuffles.
2211 bool isReverse() const {
2212 return !changesLength() && isReverseMask(ShuffleMask);
2213 }
2214
2215 /// Return true if this shuffle mask chooses all elements with the same value
2216 /// as the first element of exactly one source vector.
2217 /// Example: <4,undef,undef,4>
2218 /// This assumes that vector operands are the same length as the mask.
2219 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2220 static bool isZeroEltSplatMask(const Constant *Mask) {
2221 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2221, __extension__ __PRETTY_FUNCTION__))
;
2222 SmallVector<int, 16> MaskAsInts;
2223 getShuffleMask(Mask, MaskAsInts);
2224 return isZeroEltSplatMask(MaskAsInts);
2225 }
2226
2227 /// Return true if all elements of this shuffle are the same value as the
2228 /// first element of exactly one source vector without changing the length
2229 /// of that vector.
2230 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2231 /// TODO: Optionally allow length-changing shuffles.
2232 /// TODO: Optionally allow splats from other elements.
2233 bool isZeroEltSplat() const {
2234 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2235 }
2236
2237 /// Return true if this shuffle mask is a transpose mask.
2238 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2239 /// even- or odd-numbered vector elements from two n-dimensional source
2240 /// vectors and write each result into consecutive elements of an
2241 /// n-dimensional destination vector. Two shuffles are necessary to complete
2242 /// the transpose, one for the even elements and another for the odd elements.
2243 /// This description closely follows how the TRN1 and TRN2 AArch64
2244 /// instructions operate.
2245 ///
2246 /// For example, a simple 2x2 matrix can be transposed with:
2247 ///
2248 /// ; Original matrix
2249 /// m0 = < a, b >
2250 /// m1 = < c, d >
2251 ///
2252 /// ; Transposed matrix
2253 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2254 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2255 ///
2256 /// For matrices having greater than n columns, the resulting nx2 transposed
2257 /// matrix is stored in two result vectors such that one vector contains
2258 /// interleaved elements from all the even-numbered rows and the other vector
2259 /// contains interleaved elements from all the odd-numbered rows. For example,
2260 /// a 2x4 matrix can be transposed with:
2261 ///
2262 /// ; Original matrix
2263 /// m0 = < a, b, c, d >
2264 /// m1 = < e, f, g, h >
2265 ///
2266 /// ; Transposed matrix
2267 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2268 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2269 static bool isTransposeMask(ArrayRef<int> Mask);
2270 static bool isTransposeMask(const Constant *Mask) {
2271 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2271, __extension__ __PRETTY_FUNCTION__))
;
2272 SmallVector<int, 16> MaskAsInts;
2273 getShuffleMask(Mask, MaskAsInts);
2274 return isTransposeMask(MaskAsInts);
2275 }
2276
2277 /// Return true if this shuffle transposes the elements of its inputs without
2278 /// changing the length of the vectors. This operation may also be known as a
2279 /// merge or interleave. See the description for isTransposeMask() for the
2280 /// exact specification.
2281 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2282 bool isTranspose() const {
2283 return !changesLength() && isTransposeMask(ShuffleMask);
2284 }
2285
2286 /// Return true if this shuffle mask is an extract subvector mask.
2287 /// A valid extract subvector mask returns a smaller vector from a single
2288 /// source operand. The base extraction index is returned as well.
2289 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2290 int &Index);
2291 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2292 int &Index) {
2293 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2293, __extension__ __PRETTY_FUNCTION__))
;
2294 // Not possible to express a shuffle mask for a scalable vector for this
2295 // case.
2296 if (isa<ScalableVectorType>(Mask->getType()))
2297 return false;
2298 SmallVector<int, 16> MaskAsInts;
2299 getShuffleMask(Mask, MaskAsInts);
2300 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2301 }
2302
2303 /// Return true if this shuffle mask is an extract subvector mask.
2304 bool isExtractSubvectorMask(int &Index) const {
2305 // Not possible to express a shuffle mask for a scalable vector for this
2306 // case.
2307 if (isa<ScalableVectorType>(getType()))
2308 return false;
2309
2310 int NumSrcElts =
2311 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2312 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2313 }
2314
2315 /// Return true if this shuffle mask is an insert subvector mask.
2316 /// A valid insert subvector mask inserts the lowest elements of a second
2317 /// source operand into an in-place first source operand operand.
2318 /// Both the sub vector width and the insertion index is returned.
2319 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2320 int &NumSubElts, int &Index);
2321 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2322 int &NumSubElts, int &Index) {
2323 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2323, __extension__ __PRETTY_FUNCTION__))
;
2324 // Not possible to express a shuffle mask for a scalable vector for this
2325 // case.
2326 if (isa<ScalableVectorType>(Mask->getType()))
2327 return false;
2328 SmallVector<int, 16> MaskAsInts;
2329 getShuffleMask(Mask, MaskAsInts);
2330 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2331 }
2332
2333 /// Return true if this shuffle mask is an insert subvector mask.
2334 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2335 // Not possible to express a shuffle mask for a scalable vector for this
2336 // case.
2337 if (isa<ScalableVectorType>(getType()))
2338 return false;
2339
2340 int NumSrcElts =
2341 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2342 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2343 }
2344
2345 /// Change values in a shuffle permute mask assuming the two vector operands
2346 /// of length InVecNumElts have swapped position.
2347 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2348 unsigned InVecNumElts) {
2349 for (int &Idx : Mask) {
2350 if (Idx == -1)
2351 continue;
2352 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2353 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2354, __extension__ __PRETTY_FUNCTION__))
2354 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2354, __extension__ __PRETTY_FUNCTION__))
;
2355 }
2356 }
2357
2358 // Methods for support type inquiry through isa, cast, and dyn_cast:
2359 static bool classof(const Instruction *I) {
2360 return I->getOpcode() == Instruction::ShuffleVector;
2361 }
2362 static bool classof(const Value *V) {
2363 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2364 }
2365};
2366
2367template <>
2368struct OperandTraits<ShuffleVectorInst>
2369 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2370
2371DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2371, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2371, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2372
2373//===----------------------------------------------------------------------===//
2374// ExtractValueInst Class
2375//===----------------------------------------------------------------------===//
2376
2377/// This instruction extracts a struct member or array
2378/// element value from an aggregate value.
2379///
2380class ExtractValueInst : public UnaryInstruction {
2381 SmallVector<unsigned, 4> Indices;
2382
2383 ExtractValueInst(const ExtractValueInst &EVI);
2384
2385 /// Constructors - Create a extractvalue instruction with a base aggregate
2386 /// value and a list of indices. The first ctor can optionally insert before
2387 /// an existing instruction, the second appends the new instruction to the
2388 /// specified BasicBlock.
2389 inline ExtractValueInst(Value *Agg,
2390 ArrayRef<unsigned> Idxs,
2391 const Twine &NameStr,
2392 Instruction *InsertBefore);
2393 inline ExtractValueInst(Value *Agg,
2394 ArrayRef<unsigned> Idxs,
2395 const Twine &NameStr, BasicBlock *InsertAtEnd);
2396
2397 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2398
2399protected:
2400 // Note: Instruction needs to be a friend here to call cloneImpl.
2401 friend class Instruction;
2402
2403 ExtractValueInst *cloneImpl() const;
2404
2405public:
2406 static ExtractValueInst *Create(Value *Agg,
2407 ArrayRef<unsigned> Idxs,
2408 const Twine &NameStr = "",
2409 Instruction *InsertBefore = nullptr) {
2410 return new
2411 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2412 }
2413
2414 static ExtractValueInst *Create(Value *Agg,
2415 ArrayRef<unsigned> Idxs,
2416 const Twine &NameStr,
2417 BasicBlock *InsertAtEnd) {
2418 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2419 }
2420
2421 /// Returns the type of the element that would be extracted
2422 /// with an extractvalue instruction with the specified parameters.
2423 ///
2424 /// Null is returned if the indices are invalid for the specified type.
2425 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2426
2427 using idx_iterator = const unsigned*;
2428
2429 inline idx_iterator idx_begin() const { return Indices.begin(); }
2430 inline idx_iterator idx_end() const { return Indices.end(); }
2431 inline iterator_range<idx_iterator> indices() const {
2432 return make_range(idx_begin(), idx_end());
2433 }
2434
2435 Value *getAggregateOperand() {
2436 return getOperand(0);
2437 }
2438 const Value *getAggregateOperand() const {
2439 return getOperand(0);
2440 }
2441 static unsigned getAggregateOperandIndex() {
2442 return 0U; // get index for modifying correct operand
2443 }
2444
2445 ArrayRef<unsigned> getIndices() const {
2446 return Indices;
2447 }
2448
2449 unsigned getNumIndices() const {
2450 return (unsigned)Indices.size();
2451 }
2452
2453 bool hasIndices() const {
2454 return true;
2455 }
2456
2457 // Methods for support type inquiry through isa, cast, and dyn_cast:
2458 static bool classof(const Instruction *I) {
2459 return I->getOpcode() == Instruction::ExtractValue;
2460 }
2461 static bool classof(const Value *V) {
2462 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2463 }
2464};
2465
2466ExtractValueInst::ExtractValueInst(Value *Agg,
2467 ArrayRef<unsigned> Idxs,
2468 const Twine &NameStr,
2469 Instruction *InsertBefore)
2470 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2471 ExtractValue, Agg, InsertBefore) {
2472 init(Idxs, NameStr);
2473}
2474
2475ExtractValueInst::ExtractValueInst(Value *Agg,
2476 ArrayRef<unsigned> Idxs,
2477 const Twine &NameStr,
2478 BasicBlock *InsertAtEnd)
2479 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2480 ExtractValue, Agg, InsertAtEnd) {
2481 init(Idxs, NameStr);
2482}
2483
2484//===----------------------------------------------------------------------===//
2485// InsertValueInst Class
2486//===----------------------------------------------------------------------===//
2487
2488/// This instruction inserts a struct field of array element
2489/// value into an aggregate value.
2490///
2491class InsertValueInst : public Instruction {
2492 SmallVector<unsigned, 4> Indices;
2493
2494 InsertValueInst(const InsertValueInst &IVI);
2495
2496 /// Constructors - Create a insertvalue instruction with a base aggregate
2497 /// value, a value to insert, and a list of indices. The first ctor can
2498 /// optionally insert before an existing instruction, the second appends
2499 /// the new instruction to the specified BasicBlock.
2500 inline InsertValueInst(Value *Agg, Value *Val,
2501 ArrayRef<unsigned> Idxs,
2502 const Twine &NameStr,
2503 Instruction *InsertBefore);
2504 inline InsertValueInst(Value *Agg, Value *Val,
2505 ArrayRef<unsigned> Idxs,
2506 const Twine &NameStr, BasicBlock *InsertAtEnd);
2507
2508 /// Constructors - These two constructors are convenience methods because one
2509 /// and two index insertvalue instructions are so common.
2510 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2511 const Twine &NameStr = "",
2512 Instruction *InsertBefore = nullptr);
2513 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2514 BasicBlock *InsertAtEnd);
2515
2516 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2517 const Twine &NameStr);
2518
2519protected:
2520 // Note: Instruction needs to be a friend here to call cloneImpl.
2521 friend class Instruction;
2522
2523 InsertValueInst *cloneImpl() const;
2524
2525public:
2526 // allocate space for exactly two operands
2527 void *operator new(size_t S) { return User::operator new(S, 2); }
2528 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2529
2530 static InsertValueInst *Create(Value *Agg, Value *Val,
2531 ArrayRef<unsigned> Idxs,
2532 const Twine &NameStr = "",
2533 Instruction *InsertBefore = nullptr) {
2534 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2535 }
2536
2537 static InsertValueInst *Create(Value *Agg, Value *Val,
2538 ArrayRef<unsigned> Idxs,
2539 const Twine &NameStr,
2540 BasicBlock *InsertAtEnd) {
2541 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2542 }
2543
2544 /// Transparently provide more efficient getOperand methods.
2545 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2546
2547 using idx_iterator = const unsigned*;
2548
2549 inline idx_iterator idx_begin() const { return Indices.begin(); }
2550 inline idx_iterator idx_end() const { return Indices.end(); }
2551 inline iterator_range<idx_iterator> indices() const {
2552 return make_range(idx_begin(), idx_end());
2553 }
2554
2555 Value *getAggregateOperand() {
2556 return getOperand(0);
2557 }
2558 const Value *getAggregateOperand() const {
2559 return getOperand(0);
2560 }
2561 static unsigned getAggregateOperandIndex() {
2562 return 0U; // get index for modifying correct operand
2563 }
2564
2565 Value *getInsertedValueOperand() {
2566 return getOperand(1);
2567 }
2568 const Value *getInsertedValueOperand() const {
2569 return getOperand(1);
2570 }
2571 static unsigned getInsertedValueOperandIndex() {
2572 return 1U; // get index for modifying correct operand
2573 }
2574
2575 ArrayRef<unsigned> getIndices() const {
2576 return Indices;
2577 }
2578
2579 unsigned getNumIndices() const {
2580 return (unsigned)Indices.size();
2581 }
2582
2583 bool hasIndices() const {
2584 return true;
2585 }
2586
2587 // Methods for support type inquiry through isa, cast, and dyn_cast:
2588 static bool classof(const Instruction *I) {
2589 return I->getOpcode() == Instruction::InsertValue;
2590 }
2591 static bool classof(const Value *V) {
2592 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2593 }
2594};
2595
2596template <>
2597struct OperandTraits<InsertValueInst> :
2598 public FixedNumOperandTraits<InsertValueInst, 2> {
2599};
2600
2601InsertValueInst::InsertValueInst(Value *Agg,
2602 Value *Val,
2603 ArrayRef<unsigned> Idxs,
2604 const Twine &NameStr,
2605 Instruction *InsertBefore)
2606 : Instruction(Agg->getType(), InsertValue,
2607 OperandTraits<InsertValueInst>::op_begin(this),
2608 2, InsertBefore) {
2609 init(Agg, Val, Idxs, NameStr);
2610}
2611
2612InsertValueInst::InsertValueInst(Value *Agg,
2613 Value *Val,
2614 ArrayRef<unsigned> Idxs,
2615 const Twine &NameStr,
2616 BasicBlock *InsertAtEnd)
2617 : Instruction(Agg->getType(), InsertValue,
2618 OperandTraits<InsertValueInst>::op_begin(this),
2619 2, InsertAtEnd) {
2620 init(Agg, Val, Idxs, NameStr);
2621}
2622
2623DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2623, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2623, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2624
2625//===----------------------------------------------------------------------===//
2626// PHINode Class
2627//===----------------------------------------------------------------------===//
2628
2629// PHINode - The PHINode class is used to represent the magical mystical PHI
2630// node, that can not exist in nature, but can be synthesized in a computer
2631// scientist's overactive imagination.
2632//
2633class PHINode : public Instruction {
2634 /// The number of operands actually allocated. NumOperands is
2635 /// the number actually in use.
2636 unsigned ReservedSpace;
2637
2638 PHINode(const PHINode &PN);
2639
2640 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2641 const Twine &NameStr = "",
2642 Instruction *InsertBefore = nullptr)
2643 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2644 ReservedSpace(NumReservedValues) {
2645 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2645, __extension__ __PRETTY_FUNCTION__))
;
2646 setName(NameStr);
2647 allocHungoffUses(ReservedSpace);
2648 }
2649
2650 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2651 BasicBlock *InsertAtEnd)
2652 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2653 ReservedSpace(NumReservedValues) {
2654 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2654, __extension__ __PRETTY_FUNCTION__))
;
2655 setName(NameStr);
2656 allocHungoffUses(ReservedSpace);
2657 }
2658
2659protected:
2660 // Note: Instruction needs to be a friend here to call cloneImpl.
2661 friend class Instruction;
2662
2663 PHINode *cloneImpl() const;
2664
2665 // allocHungoffUses - this is more complicated than the generic
2666 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2667 // values and pointers to the incoming blocks, all in one allocation.
2668 void allocHungoffUses(unsigned N) {
2669 User::allocHungoffUses(N, /* IsPhi */ true);
2670 }
2671
2672public:
2673 /// Constructors - NumReservedValues is a hint for the number of incoming
2674 /// edges that this phi node will have (use 0 if you really have no idea).
2675 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2676 const Twine &NameStr = "",
2677 Instruction *InsertBefore = nullptr) {
2678 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2679 }
2680
2681 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2682 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2683 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2684 }
2685
2686 /// Provide fast operand accessors
2687 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2688
2689 // Block iterator interface. This provides access to the list of incoming
2690 // basic blocks, which parallels the list of incoming values.
2691
2692 using block_iterator = BasicBlock **;
2693 using const_block_iterator = BasicBlock * const *;
2694
2695 block_iterator block_begin() {
2696 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2697 }
2698
2699 const_block_iterator block_begin() const {
2700 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2701 }
2702
2703 block_iterator block_end() {
2704 return block_begin() + getNumOperands();
2705 }
2706
2707 const_block_iterator block_end() const {
2708 return block_begin() + getNumOperands();
2709 }
2710
2711 iterator_range<block_iterator> blocks() {
2712 return make_range(block_begin(), block_end());
2713 }
2714
2715 iterator_range<const_block_iterator> blocks() const {
2716 return make_range(block_begin(), block_end());
2717 }
2718
2719 op_range incoming_values() { return operands(); }
2720
2721 const_op_range incoming_values() const { return operands(); }
2722
2723 /// Return the number of incoming edges
2724 ///
2725 unsigned getNumIncomingValues() const { return getNumOperands(); }
2726
2727 /// Return incoming value number x
2728 ///
2729 Value *getIncomingValue(unsigned i) const {
2730 return getOperand(i);
2731 }
2732 void setIncomingValue(unsigned i, Value *V) {
2733 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__))
;
2734 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2735, __extension__ __PRETTY_FUNCTION__))
2735 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2735, __extension__ __PRETTY_FUNCTION__))
;
2736 setOperand(i, V);
2737 }
2738
2739 static unsigned getOperandNumForIncomingValue(unsigned i) {
2740 return i;
2741 }
2742
2743 static unsigned getIncomingValueNumForOperand(unsigned i) {
2744 return i;
2745 }
2746
2747 /// Return incoming basic block number @p i.
2748 ///
2749 BasicBlock *getIncomingBlock(unsigned i) const {
2750 return block_begin()[i];
2751 }
2752
2753 /// Return incoming basic block corresponding
2754 /// to an operand of the PHI.
2755 ///
2756 BasicBlock *getIncomingBlock(const Use &U) const {
2757 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2757, __extension__ __PRETTY_FUNCTION__))
;
2758 return getIncomingBlock(unsigned(&U - op_begin()));
2759 }
2760
2761 /// Return incoming basic block corresponding
2762 /// to value use iterator.
2763 ///
2764 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2765 return getIncomingBlock(I.getUse());
2766 }
2767
2768 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2769 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 block_begin()[i] = BB;
2771 }
2772
2773 /// Replace every incoming basic block \p Old to basic block \p New.
2774 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2775 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2775, __extension__ __PRETTY_FUNCTION__))
;
2776 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2777 if (getIncomingBlock(Op) == Old)
2778 setIncomingBlock(Op, New);
2779 }
2780
2781 /// Add an incoming value to the end of the PHI list
2782 ///
2783 void addIncoming(Value *V, BasicBlock *BB) {
2784 if (getNumOperands() == ReservedSpace)
2785 growOperands(); // Get more space!
2786 // Initialize some new operands.
2787 setNumHungOffUseOperands(getNumOperands() + 1);
2788 setIncomingValue(getNumOperands() - 1, V);
2789 setIncomingBlock(getNumOperands() - 1, BB);
2790 }
2791
2792 /// Remove an incoming value. This is useful if a
2793 /// predecessor basic block is deleted. The value removed is returned.
2794 ///
2795 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2796 /// is true), the PHI node is destroyed and any uses of it are replaced with
2797 /// dummy values. The only time there should be zero incoming values to a PHI
2798 /// node is when the block is dead, so this strategy is sound.
2799 ///
2800 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2801
2802 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2803 int Idx = getBasicBlockIndex(BB);
2804 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2804, __extension__ __PRETTY_FUNCTION__))
;
2805 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2806 }
2807
2808 /// Return the first index of the specified basic
2809 /// block in the value list for this PHI. Returns -1 if no instance.
2810 ///
2811 int getBasicBlockIndex(const BasicBlock *BB) const {
2812 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2813 if (block_begin()[i] == BB)
2814 return i;
2815 return -1;
2816 }
2817
2818 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2819 int Idx = getBasicBlockIndex(BB);
2820 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2820, __extension__ __PRETTY_FUNCTION__))
;
2821 return getIncomingValue(Idx);
2822 }
2823
2824 /// Set every incoming value(s) for block \p BB to \p V.
2825 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2826 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2826, __extension__ __PRETTY_FUNCTION__))
;
2827 bool Found = false;
2828 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2829 if (getIncomingBlock(Op) == BB) {
2830 Found = true;
2831 setIncomingValue(Op, V);
2832 }
2833 (void)Found;
2834 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2834, __extension__ __PRETTY_FUNCTION__))
;
2835 }
2836
2837 /// If the specified PHI node always merges together the
2838 /// same value, return the value, otherwise return null.
2839 Value *hasConstantValue() const;
2840
2841 /// Whether the specified PHI node always merges
2842 /// together the same value, assuming undefs are equal to a unique
2843 /// non-undef value.
2844 bool hasConstantOrUndefValue() const;
2845
2846 /// If the PHI node is complete which means all of its parent's predecessors
2847 /// have incoming value in this PHI, return true, otherwise return false.
2848 bool isComplete() const {
2849 return llvm::all_of(predecessors(getParent()),
2850 [this](const BasicBlock *Pred) {
2851 return getBasicBlockIndex(Pred) >= 0;
2852 });
2853 }
2854
2855 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2856 static bool classof(const Instruction *I) {
2857 return I->getOpcode() == Instruction::PHI;
2858 }
2859 static bool classof(const Value *V) {
2860 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2861 }
2862
2863private:
2864 void growOperands();
2865};
2866
2867template <>
2868struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2869};
2870
2871DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2871, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2871, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2872
2873//===----------------------------------------------------------------------===//
2874// LandingPadInst Class
2875//===----------------------------------------------------------------------===//
2876
2877//===---------------------------------------------------------------------------
2878/// The landingpad instruction holds all of the information
2879/// necessary to generate correct exception handling. The landingpad instruction
2880/// cannot be moved from the top of a landing pad block, which itself is
2881/// accessible only from the 'unwind' edge of an invoke. This uses the
2882/// SubclassData field in Value to store whether or not the landingpad is a
2883/// cleanup.
2884///
2885class LandingPadInst : public Instruction {
2886 using CleanupField = BoolBitfieldElementT<0>;
2887
2888 /// The number of operands actually allocated. NumOperands is
2889 /// the number actually in use.
2890 unsigned ReservedSpace;
2891
2892 LandingPadInst(const LandingPadInst &LP);
2893
2894public:
2895 enum ClauseType { Catch, Filter };
2896
2897private:
2898 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2899 const Twine &NameStr, Instruction *InsertBefore);
2900 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2901 const Twine &NameStr, BasicBlock *InsertAtEnd);
2902
2903 // Allocate space for exactly zero operands.
2904 void *operator new(size_t S) { return User::operator new(S); }
2905
2906 void growOperands(unsigned Size);
2907 void init(unsigned NumReservedValues, const Twine &NameStr);
2908
2909protected:
2910 // Note: Instruction needs to be a friend here to call cloneImpl.
2911 friend class Instruction;
2912
2913 LandingPadInst *cloneImpl() const;
2914
2915public:
2916 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2917
2918 /// Constructors - NumReservedClauses is a hint for the number of incoming
2919 /// clauses that this landingpad will have (use 0 if you really have no idea).
2920 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2921 const Twine &NameStr = "",
2922 Instruction *InsertBefore = nullptr);
2923 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2924 const Twine &NameStr, BasicBlock *InsertAtEnd);
2925
2926 /// Provide fast operand accessors
2927 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2928
2929 /// Return 'true' if this landingpad instruction is a
2930 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2931 /// doesn't catch the exception.
2932 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2933
2934 /// Indicate that this landingpad instruction is a cleanup.
2935 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2936
2937 /// Add a catch or filter clause to the landing pad.
2938 void addClause(Constant *ClauseVal);
2939
2940 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2941 /// determine what type of clause this is.
2942 Constant *getClause(unsigned Idx) const {
2943 return cast<Constant>(getOperandList()[Idx]);
2944 }
2945
2946 /// Return 'true' if the clause and index Idx is a catch clause.
2947 bool isCatch(unsigned Idx) const {
2948 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2949 }
2950
2951 /// Return 'true' if the clause and index Idx is a filter clause.
2952 bool isFilter(unsigned Idx) const {
2953 return isa<ArrayType>(getOperandList()[Idx]->getType());
2954 }
2955
2956 /// Get the number of clauses for this landing pad.
2957 unsigned getNumClauses() const { return getNumOperands(); }
2958
2959 /// Grow the size of the operand list to accommodate the new
2960 /// number of clauses.
2961 void reserveClauses(unsigned Size) { growOperands(Size); }
2962
2963 // Methods for support type inquiry through isa, cast, and dyn_cast:
2964 static bool classof(const Instruction *I) {
2965 return I->getOpcode() == Instruction::LandingPad;
2966 }
2967 static bool classof(const Value *V) {
2968 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2969 }
2970};
2971
2972template <>
2973struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2974};
2975
2976DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2976, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 2976, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2977
2978//===----------------------------------------------------------------------===//
2979// ReturnInst Class
2980//===----------------------------------------------------------------------===//
2981
2982//===---------------------------------------------------------------------------
2983/// Return a value (possibly void), from a function. Execution
2984/// does not continue in this function any longer.
2985///
2986class ReturnInst : public Instruction {
2987 ReturnInst(const ReturnInst &RI);
2988
2989private:
2990 // ReturnInst constructors:
2991 // ReturnInst() - 'ret void' instruction
2992 // ReturnInst( null) - 'ret void' instruction
2993 // ReturnInst(Value* X) - 'ret X' instruction
2994 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2995 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2996 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2997 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2998 //
2999 // NOTE: If the Value* passed is of type void then the constructor behaves as
3000 // if it was passed NULL.
3001 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3002 Instruction *InsertBefore = nullptr);
3003 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3004 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3005
3006protected:
3007 // Note: Instruction needs to be a friend here to call cloneImpl.
3008 friend class Instruction;
3009
3010 ReturnInst *cloneImpl() const;
3011
3012public:
3013 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3014 Instruction *InsertBefore = nullptr) {
3015 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3016 }
3017
3018 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3019 BasicBlock *InsertAtEnd) {
3020 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3021 }
3022
3023 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3024 return new(0) ReturnInst(C, InsertAtEnd);
3025 }
3026
3027 /// Provide fast operand accessors
3028 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3029
3030 /// Convenience accessor. Returns null if there is no return value.
3031 Value *getReturnValue() const {
3032 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3033 }
3034
3035 unsigned getNumSuccessors() const { return 0; }
3036
3037 // Methods for support type inquiry through isa, cast, and dyn_cast:
3038 static bool classof(const Instruction *I) {
3039 return (I->getOpcode() == Instruction::Ret);
3040 }
3041 static bool classof(const Value *V) {
3042 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3043 }
3044
3045private:
3046 BasicBlock *getSuccessor(unsigned idx) const {
3047 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3047)
;
3048 }
3049
3050 void setSuccessor(unsigned idx, BasicBlock *B) {
3051 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3051)
;
3052 }
3053};
3054
3055template <>
3056struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3057};
3058
3059DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3060
3061//===----------------------------------------------------------------------===//
3062// BranchInst Class
3063//===----------------------------------------------------------------------===//
3064
3065//===---------------------------------------------------------------------------
3066/// Conditional or Unconditional Branch instruction.
3067///
3068class BranchInst : public Instruction {
3069 /// Ops list - Branches are strange. The operands are ordered:
3070 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3071 /// they don't have to check for cond/uncond branchness. These are mostly
3072 /// accessed relative from op_end().
3073 BranchInst(const BranchInst &BI);
3074 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3075 // BranchInst(BB *B) - 'br B'
3076 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3077 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3078 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3079 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3080 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3081 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3082 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3083 Instruction *InsertBefore = nullptr);
3084 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3085 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3086 BasicBlock *InsertAtEnd);
3087
3088 void AssertOK();
3089
3090protected:
3091 // Note: Instruction needs to be a friend here to call cloneImpl.
3092 friend class Instruction;
3093
3094 BranchInst *cloneImpl() const;
3095
3096public:
3097 /// Iterator type that casts an operand to a basic block.
3098 ///
3099 /// This only makes sense because the successors are stored as adjacent
3100 /// operands for branch instructions.
3101 struct succ_op_iterator
3102 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3103 std::random_access_iterator_tag, BasicBlock *,
3104 ptrdiff_t, BasicBlock *, BasicBlock *> {
3105 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3106
3107 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3108 BasicBlock *operator->() const { return operator*(); }
3109 };
3110
3111 /// The const version of `succ_op_iterator`.
3112 struct const_succ_op_iterator
3113 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3114 std::random_access_iterator_tag,
3115 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3116 const BasicBlock *> {
3117 explicit const_succ_op_iterator(const_value_op_iterator I)
3118 : iterator_adaptor_base(I) {}
3119
3120 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3121 const BasicBlock *operator->() const { return operator*(); }
3122 };
3123
3124 static BranchInst *Create(BasicBlock *IfTrue,
3125 Instruction *InsertBefore = nullptr) {
3126 return new(1) BranchInst(IfTrue, InsertBefore);
3127 }
3128
3129 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3130 Value *Cond, Instruction *InsertBefore = nullptr) {
3131 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3132 }
3133
3134 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3135 return new(1) BranchInst(IfTrue, InsertAtEnd);
3136 }
3137
3138 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3139 Value *Cond, BasicBlock *InsertAtEnd) {
3140 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3141 }
3142
3143 /// Transparently provide more efficient getOperand methods.
3144 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3145
3146 bool isUnconditional() const { return getNumOperands() == 1; }
3147 bool isConditional() const { return getNumOperands() == 3; }
3148
3149 Value *getCondition() const {
3150 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3150, __extension__ __PRETTY_FUNCTION__))
;
3151 return Op<-3>();
3152 }
3153
3154 void setCondition(Value *V) {
3155 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3155, __extension__ __PRETTY_FUNCTION__))
;
3156 Op<-3>() = V;
3157 }
3158
3159 unsigned getNumSuccessors() const { return 1+isConditional(); }
3160
3161 BasicBlock *getSuccessor(unsigned i) const {
3162 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3162, __extension__ __PRETTY_FUNCTION__))
;
3163 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3164 }
3165
3166 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3167 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3167, __extension__ __PRETTY_FUNCTION__))
;
3168 *(&Op<-1>() - idx) = NewSucc;
3169 }
3170
3171 /// Swap the successors of this branch instruction.
3172 ///
3173 /// Swaps the successors of the branch instruction. This also swaps any
3174 /// branch weight metadata associated with the instruction so that it
3175 /// continues to map correctly to each operand.
3176 void swapSuccessors();
3177
3178 iterator_range<succ_op_iterator> successors() {
3179 return make_range(
3180 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3181 succ_op_iterator(value_op_end()));
3182 }
3183
3184 iterator_range<const_succ_op_iterator> successors() const {
3185 return make_range(const_succ_op_iterator(
3186 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3187 const_succ_op_iterator(value_op_end()));
3188 }
3189
3190 // Methods for support type inquiry through isa, cast, and dyn_cast:
3191 static bool classof(const Instruction *I) {
3192 return (I->getOpcode() == Instruction::Br);
3193 }
3194 static bool classof(const Value *V) {
3195 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3196 }
3197};
3198
3199template <>
3200struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3201};
3202
3203DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3203, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3203, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3204
3205//===----------------------------------------------------------------------===//
3206// SwitchInst Class
3207//===----------------------------------------------------------------------===//
3208
3209//===---------------------------------------------------------------------------
3210/// Multiway switch
3211///
3212class SwitchInst : public Instruction {
3213 unsigned ReservedSpace;
3214
3215 // Operand[0] = Value to switch on
3216 // Operand[1] = Default basic block destination
3217 // Operand[2n ] = Value to match
3218 // Operand[2n+1] = BasicBlock to go to on match
3219 SwitchInst(const SwitchInst &SI);
3220
3221 /// Create a new switch instruction, specifying a value to switch on and a
3222 /// default destination. The number of additional cases can be specified here
3223 /// to make memory allocation more efficient. This constructor can also
3224 /// auto-insert before another instruction.
3225 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3226 Instruction *InsertBefore);
3227
3228 /// Create a new switch instruction, specifying a value to switch on and a
3229 /// default destination. The number of additional cases can be specified here
3230 /// to make memory allocation more efficient. This constructor also
3231 /// auto-inserts at the end of the specified BasicBlock.
3232 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3233 BasicBlock *InsertAtEnd);
3234
3235 // allocate space for exactly zero operands
3236 void *operator new(size_t S) { return User::operator new(S); }
3237
3238 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3239 void growOperands();
3240
3241protected:
3242 // Note: Instruction needs to be a friend here to call cloneImpl.
3243 friend class Instruction;
3244
3245 SwitchInst *cloneImpl() const;
3246
3247public:
3248 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3249
3250 // -2
3251 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3252
3253 template <typename CaseHandleT> class CaseIteratorImpl;
3254
3255 /// A handle to a particular switch case. It exposes a convenient interface
3256 /// to both the case value and the successor block.
3257 ///
3258 /// We define this as a template and instantiate it to form both a const and
3259 /// non-const handle.
3260 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3261 class CaseHandleImpl {
3262 // Directly befriend both const and non-const iterators.
3263 friend class SwitchInst::CaseIteratorImpl<
3264 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3265
3266 protected:
3267 // Expose the switch type we're parameterized with to the iterator.
3268 using SwitchInstType = SwitchInstT;
3269
3270 SwitchInstT *SI;
3271 ptrdiff_t Index;
3272
3273 CaseHandleImpl() = default;
3274 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3275
3276 public:
3277 /// Resolves case value for current case.
3278 ConstantIntT *getCaseValue() const {
3279 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3280, __extension__ __PRETTY_FUNCTION__))
3280 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3280, __extension__ __PRETTY_FUNCTION__))
;
3281 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3282 }
3283
3284 /// Resolves successor for current case.
3285 BasicBlockT *getCaseSuccessor() const {
3286 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3288, __extension__ __PRETTY_FUNCTION__))
3287 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3288, __extension__ __PRETTY_FUNCTION__))
3288 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3288, __extension__ __PRETTY_FUNCTION__))
;
3289 return SI->getSuccessor(getSuccessorIndex());
3290 }
3291
3292 /// Returns number of current case.
3293 unsigned getCaseIndex() const { return Index; }
3294
3295 /// Returns successor index for current case successor.
3296 unsigned getSuccessorIndex() const {
3297 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3299, __extension__ __PRETTY_FUNCTION__))
3298 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3299, __extension__ __PRETTY_FUNCTION__))
3299 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3299, __extension__ __PRETTY_FUNCTION__))
;
3300 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3301 }
3302
3303 bool operator==(const CaseHandleImpl &RHS) const {
3304 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3304, __extension__ __PRETTY_FUNCTION__))
;
3305 return Index == RHS.Index;
3306 }
3307 };
3308
3309 using ConstCaseHandle =
3310 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3311
3312 class CaseHandle
3313 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3314 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3315
3316 public:
3317 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3318
3319 /// Sets the new value for current case.
3320 void setValue(ConstantInt *V) {
3321 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3322, __extension__ __PRETTY_FUNCTION__))
3322 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3322, __extension__ __PRETTY_FUNCTION__))
;
3323 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3324 }
3325
3326 /// Sets the new successor for current case.
3327 void setSuccessor(BasicBlock *S) {
3328 SI->setSuccessor(getSuccessorIndex(), S);
3329 }
3330 };
3331
3332 template <typename CaseHandleT>
3333 class CaseIteratorImpl
3334 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3335 std::random_access_iterator_tag,
3336 CaseHandleT> {
3337 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3338
3339 CaseHandleT Case;
3340
3341 public:
3342 /// Default constructed iterator is in an invalid state until assigned to
3343 /// a case for a particular switch.
3344 CaseIteratorImpl() = default;
3345
3346 /// Initializes case iterator for given SwitchInst and for given
3347 /// case number.
3348 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3349
3350 /// Initializes case iterator for given SwitchInst and for given
3351 /// successor index.
3352 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3353 unsigned SuccessorIndex) {
3354 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3355, __extension__ __PRETTY_FUNCTION__))
3355 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3355, __extension__ __PRETTY_FUNCTION__))
;
3356 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3357 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3358 }
3359
3360 /// Support converting to the const variant. This will be a no-op for const
3361 /// variant.
3362 operator CaseIteratorImpl<ConstCaseHandle>() const {
3363 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3364 }
3365
3366 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3367 // Check index correctness after addition.
3368 // Note: Index == getNumCases() means end().
3369 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3371, __extension__ __PRETTY_FUNCTION__))
3370 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3371, __extension__ __PRETTY_FUNCTION__))
3371 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3371, __extension__ __PRETTY_FUNCTION__))
;
3372 Case.Index += N;
3373 return *this;
3374 }
3375 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3376 // Check index correctness after subtraction.
3377 // Note: Case.Index == getNumCases() means end().
3378 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3380, __extension__ __PRETTY_FUNCTION__))
3379 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3380, __extension__ __PRETTY_FUNCTION__))
3380 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3380, __extension__ __PRETTY_FUNCTION__))
;
3381 Case.Index -= N;
3382 return *this;
3383 }
3384 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3385 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3385, __extension__ __PRETTY_FUNCTION__))
;
3386 return Case.Index - RHS.Case.Index;
3387 }
3388 bool operator==(const CaseIteratorImpl &RHS) const {
3389 return Case == RHS.Case;
3390 }
3391 bool operator<(const CaseIteratorImpl &RHS) const {
3392 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3392, __extension__ __PRETTY_FUNCTION__))
;
3393 return Case.Index < RHS.Case.Index;
3394 }
3395 CaseHandleT &operator*() { return Case; }
3396 const CaseHandleT &operator*() const { return Case; }
3397 };
3398
3399 using CaseIt = CaseIteratorImpl<CaseHandle>;
3400 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3401
3402 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3403 unsigned NumCases,
3404 Instruction *InsertBefore = nullptr) {
3405 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3406 }
3407
3408 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3409 unsigned NumCases, BasicBlock *InsertAtEnd) {
3410 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3411 }
3412
3413 /// Provide fast operand accessors
3414 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3415
3416 // Accessor Methods for Switch stmt
3417 Value *getCondition() const { return getOperand(0); }
3418 void setCondition(Value *V) { setOperand(0, V); }
3419
3420 BasicBlock *getDefaultDest() const {
3421 return cast<BasicBlock>(getOperand(1));
3422 }
3423
3424 void setDefaultDest(BasicBlock *DefaultCase) {
3425 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3426 }
3427
3428 /// Return the number of 'cases' in this switch instruction, excluding the
3429 /// default case.
3430 unsigned getNumCases() const {
3431 return getNumOperands()/2 - 1;
3432 }
3433
3434 /// Returns a read/write iterator that points to the first case in the
3435 /// SwitchInst.
3436 CaseIt case_begin() {
3437 return CaseIt(this, 0);
3438 }
3439
3440 /// Returns a read-only iterator that points to the first case in the
3441 /// SwitchInst.
3442 ConstCaseIt case_begin() const {
3443 return ConstCaseIt(this, 0);
3444 }
3445
3446 /// Returns a read/write iterator that points one past the last in the
3447 /// SwitchInst.
3448 CaseIt case_end() {
3449 return CaseIt(this, getNumCases());
3450 }
3451
3452 /// Returns a read-only iterator that points one past the last in the
3453 /// SwitchInst.
3454 ConstCaseIt case_end() const {
3455 return ConstCaseIt(this, getNumCases());
3456 }
3457
3458 /// Iteration adapter for range-for loops.
3459 iterator_range<CaseIt> cases() {
3460 return make_range(case_begin(), case_end());
3461 }
3462
3463 /// Constant iteration adapter for range-for loops.
3464 iterator_range<ConstCaseIt> cases() const {
3465 return make_range(case_begin(), case_end());
3466 }
3467
3468 /// Returns an iterator that points to the default case.
3469 /// Note: this iterator allows to resolve successor only. Attempt
3470 /// to resolve case value causes an assertion.
3471 /// Also note, that increment and decrement also causes an assertion and
3472 /// makes iterator invalid.
3473 CaseIt case_default() {
3474 return CaseIt(this, DefaultPseudoIndex);
3475 }
3476 ConstCaseIt case_default() const {
3477 return ConstCaseIt(this, DefaultPseudoIndex);
3478 }
3479
3480 /// Search all of the case values for the specified constant. If it is
3481 /// explicitly handled, return the case iterator of it, otherwise return
3482 /// default case iterator to indicate that it is handled by the default
3483 /// handler.
3484 CaseIt findCaseValue(const ConstantInt *C) {
3485 CaseIt I = llvm::find_if(
3486 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3487 if (I != case_end())
3488 return I;
3489
3490 return case_default();
3491 }
3492 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3493 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3494 return Case.getCaseValue() == C;
3495 });
3496 if (I != case_end())
3497 return I;
3498
3499 return case_default();
3500 }
3501
3502 /// Finds the unique case value for a given successor. Returns null if the
3503 /// successor is not found, not unique, or is the default case.
3504 ConstantInt *findCaseDest(BasicBlock *BB) {
3505 if (BB == getDefaultDest())
3506 return nullptr;
3507
3508 ConstantInt *CI = nullptr;
3509 for (auto Case : cases()) {
3510 if (Case.getCaseSuccessor() != BB)
3511 continue;
3512
3513 if (CI)
3514 return nullptr; // Multiple cases lead to BB.
3515
3516 CI = Case.getCaseValue();
3517 }
3518
3519 return CI;
3520 }
3521
3522 /// Add an entry to the switch instruction.
3523 /// Note:
3524 /// This action invalidates case_end(). Old case_end() iterator will
3525 /// point to the added case.
3526 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3527
3528 /// This method removes the specified case and its successor from the switch
3529 /// instruction. Note that this operation may reorder the remaining cases at
3530 /// index idx and above.
3531 /// Note:
3532 /// This action invalidates iterators for all cases following the one removed,
3533 /// including the case_end() iterator. It returns an iterator for the next
3534 /// case.
3535 CaseIt removeCase(CaseIt I);
3536
3537 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3538 BasicBlock *getSuccessor(unsigned idx) const {
3539 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3539, __extension__ __PRETTY_FUNCTION__))
;
3540 return cast<BasicBlock>(getOperand(idx*2+1));
3541 }
3542 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3543 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3543, __extension__ __PRETTY_FUNCTION__))
;
3544 setOperand(idx * 2 + 1, NewSucc);
3545 }
3546
3547 // Methods for support type inquiry through isa, cast, and dyn_cast:
3548 static bool classof(const Instruction *I) {
3549 return I->getOpcode() == Instruction::Switch;
3550 }
3551 static bool classof(const Value *V) {
3552 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3553 }
3554};
3555
3556/// A wrapper class to simplify modification of SwitchInst cases along with
3557/// their prof branch_weights metadata.
3558class SwitchInstProfUpdateWrapper {
3559 SwitchInst &SI;
3560 Optional<SmallVector<uint32_t, 8> > Weights = None;
3561 bool Changed = false;
3562
3563protected:
3564 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3565
3566 MDNode *buildProfBranchWeightsMD();
3567
3568 void init();
3569
3570public:
3571 using CaseWeightOpt = Optional<uint32_t>;
3572 SwitchInst *operator->() { return &SI; }
3573 SwitchInst &operator*() { return SI; }
3574 operator SwitchInst *() { return &SI; }
3575
3576 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3577
3578 ~SwitchInstProfUpdateWrapper() {
3579 if (Changed)
3580 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3581 }
3582
3583 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3584 /// correspondent branch weight.
3585 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3586
3587 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3588 /// specified branch weight for the added case.
3589 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3590
3591 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3592 /// this object to not touch the underlying SwitchInst in destructor.
3593 SymbolTableList<Instruction>::iterator eraseFromParent();
3594
3595 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3596 CaseWeightOpt getSuccessorWeight(unsigned idx);
3597
3598 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3599};
3600
3601template <>
3602struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3603};
3604
3605DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3605, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3605, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3606
3607//===----------------------------------------------------------------------===//
3608// IndirectBrInst Class
3609//===----------------------------------------------------------------------===//
3610
3611//===---------------------------------------------------------------------------
3612/// Indirect Branch Instruction.
3613///
3614class IndirectBrInst : public Instruction {
3615 unsigned ReservedSpace;
3616
3617 // Operand[0] = Address to jump to
3618 // Operand[n+1] = n-th destination
3619 IndirectBrInst(const IndirectBrInst &IBI);
3620
3621 /// Create a new indirectbr instruction, specifying an
3622 /// Address to jump to. The number of expected destinations can be specified
3623 /// here to make memory allocation more efficient. This constructor can also
3624 /// autoinsert before another instruction.
3625 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3626
3627 /// Create a new indirectbr instruction, specifying an
3628 /// Address to jump to. The number of expected destinations can be specified
3629 /// here to make memory allocation more efficient. This constructor also
3630 /// autoinserts at the end of the specified BasicBlock.
3631 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3632
3633 // allocate space for exactly zero operands
3634 void *operator new(size_t S) { return User::operator new(S); }
3635
3636 void init(Value *Address, unsigned NumDests);
3637 void growOperands();
3638
3639protected:
3640 // Note: Instruction needs to be a friend here to call cloneImpl.
3641 friend class Instruction;
3642
3643 IndirectBrInst *cloneImpl() const;
3644
3645public:
3646 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3647
3648 /// Iterator type that casts an operand to a basic block.
3649 ///
3650 /// This only makes sense because the successors are stored as adjacent
3651 /// operands for indirectbr instructions.
3652 struct succ_op_iterator
3653 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3654 std::random_access_iterator_tag, BasicBlock *,
3655 ptrdiff_t, BasicBlock *, BasicBlock *> {
3656 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3657
3658 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3659 BasicBlock *operator->() const { return operator*(); }
3660 };
3661
3662 /// The const version of `succ_op_iterator`.
3663 struct const_succ_op_iterator
3664 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3665 std::random_access_iterator_tag,
3666 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3667 const BasicBlock *> {
3668 explicit const_succ_op_iterator(const_value_op_iterator I)
3669 : iterator_adaptor_base(I) {}
3670
3671 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3672 const BasicBlock *operator->() const { return operator*(); }
3673 };
3674
3675 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3676 Instruction *InsertBefore = nullptr) {
3677 return new IndirectBrInst(Address, NumDests, InsertBefore);
3678 }
3679
3680 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3681 BasicBlock *InsertAtEnd) {
3682 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3683 }
3684
3685 /// Provide fast operand accessors.
3686 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3687
3688 // Accessor Methods for IndirectBrInst instruction.
3689 Value *getAddress() { return getOperand(0); }
3690 const Value *getAddress() const { return getOperand(0); }
3691 void setAddress(Value *V) { setOperand(0, V); }
3692
3693 /// return the number of possible destinations in this
3694 /// indirectbr instruction.
3695 unsigned getNumDestinations() const { return getNumOperands()-1; }
3696
3697 /// Return the specified destination.
3698 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3699 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3700
3701 /// Add a destination.
3702 ///
3703 void addDestination(BasicBlock *Dest);
3704
3705 /// This method removes the specified successor from the
3706 /// indirectbr instruction.
3707 void removeDestination(unsigned i);
3708
3709 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3710 BasicBlock *getSuccessor(unsigned i) const {
3711 return cast<BasicBlock>(getOperand(i+1));
3712 }
3713 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3714 setOperand(i + 1, NewSucc);
3715 }
3716
3717 iterator_range<succ_op_iterator> successors() {
3718 return make_range(succ_op_iterator(std::next(value_op_begin())),
3719 succ_op_iterator(value_op_end()));
3720 }
3721
3722 iterator_range<const_succ_op_iterator> successors() const {
3723 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3724 const_succ_op_iterator(value_op_end()));
3725 }
3726
3727 // Methods for support type inquiry through isa, cast, and dyn_cast:
3728 static bool classof(const Instruction *I) {
3729 return I->getOpcode() == Instruction::IndirectBr;
3730 }
3731 static bool classof(const Value *V) {
3732 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3733 }
3734};
3735
3736template <>
3737struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3738};
3739
3740DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3740, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3740, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
IndirectBrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3741
3742//===----------------------------------------------------------------------===//
3743// InvokeInst Class
3744//===----------------------------------------------------------------------===//
3745
3746/// Invoke instruction. The SubclassData field is used to hold the
3747/// calling convention of the call.
3748///
3749class InvokeInst : public CallBase {
3750 /// The number of operands for this call beyond the called function,
3751 /// arguments, and operand bundles.
3752 static constexpr int NumExtraOperands = 2;
3753
3754 /// The index from the end of the operand array to the normal destination.
3755 static constexpr int NormalDestOpEndIdx = -3;
3756
3757 /// The index from the end of the operand array to the unwind destination.
3758 static constexpr int UnwindDestOpEndIdx = -2;
3759
3760 InvokeInst(const InvokeInst &BI);
3761
3762 /// Construct an InvokeInst given a range of arguments.
3763 ///
3764 /// Construct an InvokeInst from a range of arguments
3765 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3766 BasicBlock *IfException, ArrayRef<Value *> Args,
3767 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3768 const Twine &NameStr, Instruction *InsertBefore);
3769
3770 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3771 BasicBlock *IfException, ArrayRef<Value *> Args,
3772 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3773 const Twine &NameStr, BasicBlock *InsertAtEnd);
3774
3775 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3776 BasicBlock *IfException, ArrayRef<Value *> Args,
3777 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3778
3779 /// Compute the number of operands to allocate.
3780 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3781 // We need one operand for the called function, plus our extra operands and
3782 // the input operand counts provided.
3783 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3784 }
3785
3786protected:
3787 // Note: Instruction needs to be a friend here to call cloneImpl.
3788 friend class Instruction;
3789
3790 InvokeInst *cloneImpl() const;
3791
3792public:
3793 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3794 BasicBlock *IfException, ArrayRef<Value *> Args,
3795 const Twine &NameStr,
3796 Instruction *InsertBefore = nullptr) {
3797 int NumOperands = ComputeNumOperands(Args.size());
3798 return new (NumOperands)
3799 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3800 NameStr, InsertBefore);
3801 }
3802
3803 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3804 BasicBlock *IfException, ArrayRef<Value *> Args,
3805 ArrayRef<OperandBundleDef> Bundles = None,
3806 const Twine &NameStr = "",
3807 Instruction *InsertBefore = nullptr) {
3808 int NumOperands =
3809 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3810 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3811
3812 return new (NumOperands, DescriptorBytes)
3813 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3814 NameStr, InsertBefore);
3815 }
3816
3817 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3818 BasicBlock *IfException, ArrayRef<Value *> Args,
3819 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3820 int NumOperands = ComputeNumOperands(Args.size());
3821 return new (NumOperands)
3822 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3823 NameStr, InsertAtEnd);
3824 }
3825
3826 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3827 BasicBlock *IfException, ArrayRef<Value *> Args,
3828 ArrayRef<OperandBundleDef> Bundles,
3829 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3830 int NumOperands =
3831 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3832 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3833
3834 return new (NumOperands, DescriptorBytes)
3835 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3836 NameStr, InsertAtEnd);
3837 }
3838
3839 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3840 BasicBlock *IfException, ArrayRef<Value *> Args,
3841 const Twine &NameStr,
3842 Instruction *InsertBefore = nullptr) {
3843 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3844 IfException, Args, None, NameStr, InsertBefore);
3845 }
3846
3847 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3848 BasicBlock *IfException, ArrayRef<Value *> Args,
3849 ArrayRef<OperandBundleDef> Bundles = None,
3850 const Twine &NameStr = "",
3851 Instruction *InsertBefore = nullptr) {
3852 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3853 IfException, Args, Bundles, NameStr, InsertBefore);
3854 }
3855
3856 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3857 BasicBlock *IfException, ArrayRef<Value *> Args,
3858 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3859 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3860 IfException, Args, NameStr, InsertAtEnd);
3861 }
3862
3863 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3864 BasicBlock *IfException, ArrayRef<Value *> Args,
3865 ArrayRef<OperandBundleDef> Bundles,
3866 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3867 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3868 IfException, Args, Bundles, NameStr, InsertAtEnd);
3869 }
3870
3871 /// Create a clone of \p II with a different set of operand bundles and
3872 /// insert it before \p InsertPt.
3873 ///
3874 /// The returned invoke instruction is identical to \p II in every way except
3875 /// that the operand bundles for the new instruction are set to the operand
3876 /// bundles in \p Bundles.
3877 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3878 Instruction *InsertPt = nullptr);
3879
3880 // get*Dest - Return the destination basic blocks...
3881 BasicBlock *getNormalDest() const {
3882 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3883 }
3884 BasicBlock *getUnwindDest() const {
3885 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
15
Calling 'cast<llvm::BasicBlock, llvm::Use>'
28
Returning from 'cast<llvm::BasicBlock, llvm::Use>'
29
Returning pointer
3886 }
3887 void setNormalDest(BasicBlock *B) {
3888 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3889 }
3890 void setUnwindDest(BasicBlock *B) {
3891 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3892 }
3893
3894 /// Get the landingpad instruction from the landing pad
3895 /// block (the unwind destination).
3896 LandingPadInst *getLandingPadInst() const;
3897
3898 BasicBlock *getSuccessor(unsigned i) const {
3899 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3899, __extension__ __PRETTY_FUNCTION__))
;
3900 return i == 0 ? getNormalDest() : getUnwindDest();
3901 }
3902
3903 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3904 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/Instructions.h"
, 3904, __extension__ __PRETTY_FUNCTION__))
;
3905 if (i == 0)
3906 setNormalDest(NewSucc);
3907 else
3908 setUnwindDest(NewSucc);
3909 }
3910
3911 unsigned getNumSuccessors() const { return 2; }
3912
3913 // Methods for support type inquiry through isa, cast, and dyn_cast:
3914 static bool classof(const Instruction *I) {
3915 return (I->getOpcode() == Instruction::Invoke);
3916 }
3917 static bool classof(const Value *V) {
3918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3919 }
3920
3921private:
3922 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3923 // method so that subclasses cannot accidentally use it.
3924 template <typename Bitfield>
3925 void setSubclassData(typename Bitfield::Type Value) {
3926 Instruction::setSubclassData<Bitfield>(Value);
3927 }
3928};
3929
3930InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3931 BasicBlock *IfException, ArrayRef<Value *> Args,
3932 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3933 const Twine &NameStr, Instruction *InsertBefore)
3934 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3935 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3936 InsertBefore) {
3937 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3938}
3939
3940InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3941 BasicBlock *IfException, ArrayRef<Value *> Args,
3942 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3943 const Twine &NameStr, BasicBlock *InsertAtEnd)
3944 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3945 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3946 InsertAtEnd) {
3947 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3948}
3949
3950//===----------------------------------------------------------------------===//
3951// CallBrInst Class
3952//===----------------------------------------------------------------------===//
3953
3954/// CallBr instruction, tracking function calls that may not return control but
3955/// instead transfer it to a third location. The SubclassData field is used to
3956/// hold the calling convention of the call.
3957///
3958class CallBrInst : public CallBase {
3959
3960 unsigned NumIndirectDests;
3961
3962 CallBrInst(const CallBrInst &BI);
3963
3964 /// Construct a CallBrInst given a range of arguments.
3965 ///
3966 /// Construct a CallBrInst from a range of arguments
3967 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3968 ArrayRef<BasicBlock *> IndirectDests,
3969 ArrayRef<Value *> Args,
3970 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3971 const Twine &NameStr, Instruction *InsertBefore);
3972
3973 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3974 ArrayRef<BasicBlock *> IndirectDests,
3975 ArrayRef<Value *> Args,
3976 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3977 const Twine &NameStr, BasicBlock *InsertAtEnd);
3978
3979 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3980 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3981 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3982
3983 /// Should the Indirect Destinations change, scan + update the Arg list.
3984 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3985
3986 /// Compute the number of operands to allocate.
3987 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3988 int NumBundleInputs = 0) {
3989 // We need one operand for the called function, plus our extra operands and
3990 // the input operand counts provided.
3991 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3992 }
3993